1 /* 2 * Copyright 2010 Jerome Glisse <glisse (at) freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 #include "r600_formats.h" 24 #include "r600_shader.h" 25 #include "evergreend.h" 26 27 #include "pipe/p_shader_tokens.h" 28 #include "util/u_pack_color.h" 29 #include "util/u_memory.h" 30 #include "util/u_framebuffer.h" 31 #include "util/u_dual_blend.h" 32 #include "evergreen_compute.h" 33 #include "util/u_math.h" 34 35 static inline unsigned evergreen_array_mode(unsigned mode) 36 { 37 switch (mode) { 38 default: 39 case RADEON_SURF_MODE_LINEAR_ALIGNED: return V_028C70_ARRAY_LINEAR_ALIGNED; 40 break; 41 case RADEON_SURF_MODE_1D: return V_028C70_ARRAY_1D_TILED_THIN1; 42 break; 43 case RADEON_SURF_MODE_2D: return V_028C70_ARRAY_2D_TILED_THIN1; 44 } 45 } 46 47 static uint32_t eg_num_banks(uint32_t nbanks) 48 { 49 switch (nbanks) { 50 case 2: 51 return 0; 52 case 4: 53 return 1; 54 case 8: 55 default: 56 return 2; 57 case 16: 58 return 3; 59 } 60 } 61 62 63 static unsigned eg_tile_split(unsigned tile_split) 64 { 65 switch (tile_split) { 66 case 64: tile_split = 0; break; 67 case 128: tile_split = 1; break; 68 case 256: tile_split = 2; break; 69 case 512: tile_split = 3; break; 70 default: 71 case 1024: tile_split = 4; break; 72 case 2048: tile_split = 5; break; 73 case 4096: tile_split = 6; break; 74 } 75 return tile_split; 76 } 77 78 static unsigned eg_macro_tile_aspect(unsigned macro_tile_aspect) 79 { 80 switch (macro_tile_aspect) { 81 default: 82 case 1: macro_tile_aspect = 0; break; 83 case 2: macro_tile_aspect = 1; break; 84 case 4: macro_tile_aspect = 2; break; 85 case 8: macro_tile_aspect = 3; break; 86 } 87 return macro_tile_aspect; 88 } 89 90 static unsigned eg_bank_wh(unsigned bankwh) 91 { 92 switch (bankwh) { 93 default: 94 case 1: bankwh = 0; break; 95 case 2: bankwh = 1; break; 96 case 4: bankwh = 2; break; 97 case 8: bankwh = 3; break; 98 } 99 return bankwh; 100 } 101 102 static uint32_t r600_translate_blend_function(int blend_func) 103 { 104 switch (blend_func) { 105 case PIPE_BLEND_ADD: 106 return V_028780_COMB_DST_PLUS_SRC; 107 case PIPE_BLEND_SUBTRACT: 108 return V_028780_COMB_SRC_MINUS_DST; 109 case PIPE_BLEND_REVERSE_SUBTRACT: 110 return V_028780_COMB_DST_MINUS_SRC; 111 case PIPE_BLEND_MIN: 112 return V_028780_COMB_MIN_DST_SRC; 113 case PIPE_BLEND_MAX: 114 return V_028780_COMB_MAX_DST_SRC; 115 default: 116 R600_ERR("Unknown blend function %d\n", blend_func); 117 assert(0); 118 break; 119 } 120 return 0; 121 } 122 123 static uint32_t r600_translate_blend_factor(int blend_fact) 124 { 125 switch (blend_fact) { 126 case PIPE_BLENDFACTOR_ONE: 127 return V_028780_BLEND_ONE; 128 case PIPE_BLENDFACTOR_SRC_COLOR: 129 return V_028780_BLEND_SRC_COLOR; 130 case PIPE_BLENDFACTOR_SRC_ALPHA: 131 return V_028780_BLEND_SRC_ALPHA; 132 case PIPE_BLENDFACTOR_DST_ALPHA: 133 return V_028780_BLEND_DST_ALPHA; 134 case PIPE_BLENDFACTOR_DST_COLOR: 135 return V_028780_BLEND_DST_COLOR; 136 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: 137 return V_028780_BLEND_SRC_ALPHA_SATURATE; 138 case PIPE_BLENDFACTOR_CONST_COLOR: 139 return V_028780_BLEND_CONST_COLOR; 140 case PIPE_BLENDFACTOR_CONST_ALPHA: 141 return V_028780_BLEND_CONST_ALPHA; 142 case PIPE_BLENDFACTOR_ZERO: 143 return V_028780_BLEND_ZERO; 144 case PIPE_BLENDFACTOR_INV_SRC_COLOR: 145 return V_028780_BLEND_ONE_MINUS_SRC_COLOR; 146 case PIPE_BLENDFACTOR_INV_SRC_ALPHA: 147 return V_028780_BLEND_ONE_MINUS_SRC_ALPHA; 148 case PIPE_BLENDFACTOR_INV_DST_ALPHA: 149 return V_028780_BLEND_ONE_MINUS_DST_ALPHA; 150 case PIPE_BLENDFACTOR_INV_DST_COLOR: 151 return V_028780_BLEND_ONE_MINUS_DST_COLOR; 152 case PIPE_BLENDFACTOR_INV_CONST_COLOR: 153 return V_028780_BLEND_ONE_MINUS_CONST_COLOR; 154 case PIPE_BLENDFACTOR_INV_CONST_ALPHA: 155 return V_028780_BLEND_ONE_MINUS_CONST_ALPHA; 156 case PIPE_BLENDFACTOR_SRC1_COLOR: 157 return V_028780_BLEND_SRC1_COLOR; 158 case PIPE_BLENDFACTOR_SRC1_ALPHA: 159 return V_028780_BLEND_SRC1_ALPHA; 160 case PIPE_BLENDFACTOR_INV_SRC1_COLOR: 161 return V_028780_BLEND_INV_SRC1_COLOR; 162 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA: 163 return V_028780_BLEND_INV_SRC1_ALPHA; 164 default: 165 R600_ERR("Bad blend factor %d not supported!\n", blend_fact); 166 assert(0); 167 break; 168 } 169 return 0; 170 } 171 172 static unsigned r600_tex_dim(struct r600_texture *rtex, 173 unsigned view_target, unsigned nr_samples) 174 { 175 unsigned res_target = rtex->resource.b.b.target; 176 177 if (view_target == PIPE_TEXTURE_CUBE || 178 view_target == PIPE_TEXTURE_CUBE_ARRAY) 179 res_target = view_target; 180 /* If interpreting cubemaps as something else, set 2D_ARRAY. */ 181 else if (res_target == PIPE_TEXTURE_CUBE || 182 res_target == PIPE_TEXTURE_CUBE_ARRAY) 183 res_target = PIPE_TEXTURE_2D_ARRAY; 184 185 switch (res_target) { 186 default: 187 case PIPE_TEXTURE_1D: 188 return V_030000_SQ_TEX_DIM_1D; 189 case PIPE_TEXTURE_1D_ARRAY: 190 return V_030000_SQ_TEX_DIM_1D_ARRAY; 191 case PIPE_TEXTURE_2D: 192 case PIPE_TEXTURE_RECT: 193 return nr_samples > 1 ? V_030000_SQ_TEX_DIM_2D_MSAA : 194 V_030000_SQ_TEX_DIM_2D; 195 case PIPE_TEXTURE_2D_ARRAY: 196 return nr_samples > 1 ? V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA : 197 V_030000_SQ_TEX_DIM_2D_ARRAY; 198 case PIPE_TEXTURE_3D: 199 return V_030000_SQ_TEX_DIM_3D; 200 case PIPE_TEXTURE_CUBE: 201 case PIPE_TEXTURE_CUBE_ARRAY: 202 return V_030000_SQ_TEX_DIM_CUBEMAP; 203 } 204 } 205 206 static uint32_t r600_translate_dbformat(enum pipe_format format) 207 { 208 switch (format) { 209 case PIPE_FORMAT_Z16_UNORM: 210 return V_028040_Z_16; 211 case PIPE_FORMAT_Z24X8_UNORM: 212 case PIPE_FORMAT_Z24_UNORM_S8_UINT: 213 case PIPE_FORMAT_X8Z24_UNORM: 214 case PIPE_FORMAT_S8_UINT_Z24_UNORM: 215 return V_028040_Z_24; 216 case PIPE_FORMAT_Z32_FLOAT: 217 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT: 218 return V_028040_Z_32_FLOAT; 219 default: 220 return ~0U; 221 } 222 } 223 224 static bool r600_is_sampler_format_supported(struct pipe_screen *screen, enum pipe_format format) 225 { 226 return r600_translate_texformat(screen, format, NULL, NULL, NULL, 227 FALSE) != ~0U; 228 } 229 230 static bool r600_is_colorbuffer_format_supported(enum chip_class chip, enum pipe_format format) 231 { 232 return r600_translate_colorformat(chip, format, FALSE) != ~0U && 233 r600_translate_colorswap(format, FALSE) != ~0U; 234 } 235 236 static bool r600_is_zs_format_supported(enum pipe_format format) 237 { 238 return r600_translate_dbformat(format) != ~0U; 239 } 240 241 boolean evergreen_is_format_supported(struct pipe_screen *screen, 242 enum pipe_format format, 243 enum pipe_texture_target target, 244 unsigned sample_count, 245 unsigned usage) 246 { 247 struct r600_screen *rscreen = (struct r600_screen*)screen; 248 unsigned retval = 0; 249 250 if (target >= PIPE_MAX_TEXTURE_TYPES) { 251 R600_ERR("r600: unsupported texture type %d\n", target); 252 return FALSE; 253 } 254 255 if (!util_format_is_supported(format, usage)) 256 return FALSE; 257 258 if (sample_count > 1) { 259 if (!rscreen->has_msaa) 260 return FALSE; 261 262 switch (sample_count) { 263 case 2: 264 case 4: 265 case 8: 266 break; 267 default: 268 return FALSE; 269 } 270 } 271 272 if (usage & PIPE_BIND_SAMPLER_VIEW) { 273 if (target == PIPE_BUFFER) { 274 if (r600_is_vertex_format_supported(format)) 275 retval |= PIPE_BIND_SAMPLER_VIEW; 276 } else { 277 if (r600_is_sampler_format_supported(screen, format)) 278 retval |= PIPE_BIND_SAMPLER_VIEW; 279 } 280 } 281 282 if ((usage & (PIPE_BIND_RENDER_TARGET | 283 PIPE_BIND_DISPLAY_TARGET | 284 PIPE_BIND_SCANOUT | 285 PIPE_BIND_SHARED | 286 PIPE_BIND_BLENDABLE)) && 287 r600_is_colorbuffer_format_supported(rscreen->b.chip_class, format)) { 288 retval |= usage & 289 (PIPE_BIND_RENDER_TARGET | 290 PIPE_BIND_DISPLAY_TARGET | 291 PIPE_BIND_SCANOUT | 292 PIPE_BIND_SHARED); 293 if (!util_format_is_pure_integer(format) && 294 !util_format_is_depth_or_stencil(format)) 295 retval |= usage & PIPE_BIND_BLENDABLE; 296 } 297 298 if ((usage & PIPE_BIND_DEPTH_STENCIL) && 299 r600_is_zs_format_supported(format)) { 300 retval |= PIPE_BIND_DEPTH_STENCIL; 301 } 302 303 if ((usage & PIPE_BIND_VERTEX_BUFFER) && 304 r600_is_vertex_format_supported(format)) { 305 retval |= PIPE_BIND_VERTEX_BUFFER; 306 } 307 308 if ((usage & PIPE_BIND_LINEAR) && 309 !util_format_is_compressed(format) && 310 !(usage & PIPE_BIND_DEPTH_STENCIL)) 311 retval |= PIPE_BIND_LINEAR; 312 313 return retval == usage; 314 } 315 316 static void *evergreen_create_blend_state_mode(struct pipe_context *ctx, 317 const struct pipe_blend_state *state, int mode) 318 { 319 uint32_t color_control = 0, target_mask = 0; 320 struct r600_blend_state *blend = CALLOC_STRUCT(r600_blend_state); 321 322 if (!blend) { 323 return NULL; 324 } 325 326 r600_init_command_buffer(&blend->buffer, 20); 327 r600_init_command_buffer(&blend->buffer_no_blend, 20); 328 329 if (state->logicop_enable) { 330 color_control |= (state->logicop_func << 16) | (state->logicop_func << 20); 331 } else { 332 color_control |= (0xcc << 16); 333 } 334 /* we pretend 8 buffer are used, CB_SHADER_MASK will disable unused one */ 335 if (state->independent_blend_enable) { 336 for (int i = 0; i < 8; i++) { 337 target_mask |= (state->rt[i].colormask << (4 * i)); 338 } 339 } else { 340 for (int i = 0; i < 8; i++) { 341 target_mask |= (state->rt[0].colormask << (4 * i)); 342 } 343 } 344 345 /* only have dual source on MRT0 */ 346 blend->dual_src_blend = util_blend_state_is_dual(state, 0); 347 blend->cb_target_mask = target_mask; 348 blend->alpha_to_one = state->alpha_to_one; 349 350 if (target_mask) 351 color_control |= S_028808_MODE(mode); 352 else 353 color_control |= S_028808_MODE(V_028808_CB_DISABLE); 354 355 356 r600_store_context_reg(&blend->buffer, R_028808_CB_COLOR_CONTROL, color_control); 357 r600_store_context_reg(&blend->buffer, R_028B70_DB_ALPHA_TO_MASK, 358 S_028B70_ALPHA_TO_MASK_ENABLE(state->alpha_to_coverage) | 359 S_028B70_ALPHA_TO_MASK_OFFSET0(2) | 360 S_028B70_ALPHA_TO_MASK_OFFSET1(2) | 361 S_028B70_ALPHA_TO_MASK_OFFSET2(2) | 362 S_028B70_ALPHA_TO_MASK_OFFSET3(2)); 363 r600_store_context_reg_seq(&blend->buffer, R_028780_CB_BLEND0_CONTROL, 8); 364 365 /* Copy over the dwords set so far into buffer_no_blend. 366 * Only the CB_BLENDi_CONTROL registers must be set after this. */ 367 memcpy(blend->buffer_no_blend.buf, blend->buffer.buf, blend->buffer.num_dw * 4); 368 blend->buffer_no_blend.num_dw = blend->buffer.num_dw; 369 370 for (int i = 0; i < 8; i++) { 371 /* state->rt entries > 0 only written if independent blending */ 372 const int j = state->independent_blend_enable ? i : 0; 373 374 unsigned eqRGB = state->rt[j].rgb_func; 375 unsigned srcRGB = state->rt[j].rgb_src_factor; 376 unsigned dstRGB = state->rt[j].rgb_dst_factor; 377 unsigned eqA = state->rt[j].alpha_func; 378 unsigned srcA = state->rt[j].alpha_src_factor; 379 unsigned dstA = state->rt[j].alpha_dst_factor; 380 uint32_t bc = 0; 381 382 r600_store_value(&blend->buffer_no_blend, 0); 383 384 if (!state->rt[j].blend_enable) { 385 r600_store_value(&blend->buffer, 0); 386 continue; 387 } 388 389 bc |= S_028780_BLEND_CONTROL_ENABLE(1); 390 bc |= S_028780_COLOR_COMB_FCN(r600_translate_blend_function(eqRGB)); 391 bc |= S_028780_COLOR_SRCBLEND(r600_translate_blend_factor(srcRGB)); 392 bc |= S_028780_COLOR_DESTBLEND(r600_translate_blend_factor(dstRGB)); 393 394 if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) { 395 bc |= S_028780_SEPARATE_ALPHA_BLEND(1); 396 bc |= S_028780_ALPHA_COMB_FCN(r600_translate_blend_function(eqA)); 397 bc |= S_028780_ALPHA_SRCBLEND(r600_translate_blend_factor(srcA)); 398 bc |= S_028780_ALPHA_DESTBLEND(r600_translate_blend_factor(dstA)); 399 } 400 r600_store_value(&blend->buffer, bc); 401 } 402 return blend; 403 } 404 405 static void *evergreen_create_blend_state(struct pipe_context *ctx, 406 const struct pipe_blend_state *state) 407 { 408 409 return evergreen_create_blend_state_mode(ctx, state, V_028808_CB_NORMAL); 410 } 411 412 static void *evergreen_create_dsa_state(struct pipe_context *ctx, 413 const struct pipe_depth_stencil_alpha_state *state) 414 { 415 unsigned db_depth_control, alpha_test_control, alpha_ref; 416 struct r600_dsa_state *dsa = CALLOC_STRUCT(r600_dsa_state); 417 418 if (!dsa) { 419 return NULL; 420 } 421 422 r600_init_command_buffer(&dsa->buffer, 3); 423 424 dsa->valuemask[0] = state->stencil[0].valuemask; 425 dsa->valuemask[1] = state->stencil[1].valuemask; 426 dsa->writemask[0] = state->stencil[0].writemask; 427 dsa->writemask[1] = state->stencil[1].writemask; 428 dsa->zwritemask = state->depth.writemask; 429 430 db_depth_control = S_028800_Z_ENABLE(state->depth.enabled) | 431 S_028800_Z_WRITE_ENABLE(state->depth.writemask) | 432 S_028800_ZFUNC(state->depth.func); 433 434 /* stencil */ 435 if (state->stencil[0].enabled) { 436 db_depth_control |= S_028800_STENCIL_ENABLE(1); 437 db_depth_control |= S_028800_STENCILFUNC(state->stencil[0].func); /* translates straight */ 438 db_depth_control |= S_028800_STENCILFAIL(r600_translate_stencil_op(state->stencil[0].fail_op)); 439 db_depth_control |= S_028800_STENCILZPASS(r600_translate_stencil_op(state->stencil[0].zpass_op)); 440 db_depth_control |= S_028800_STENCILZFAIL(r600_translate_stencil_op(state->stencil[0].zfail_op)); 441 442 if (state->stencil[1].enabled) { 443 db_depth_control |= S_028800_BACKFACE_ENABLE(1); 444 db_depth_control |= S_028800_STENCILFUNC_BF(state->stencil[1].func); /* translates straight */ 445 db_depth_control |= S_028800_STENCILFAIL_BF(r600_translate_stencil_op(state->stencil[1].fail_op)); 446 db_depth_control |= S_028800_STENCILZPASS_BF(r600_translate_stencil_op(state->stencil[1].zpass_op)); 447 db_depth_control |= S_028800_STENCILZFAIL_BF(r600_translate_stencil_op(state->stencil[1].zfail_op)); 448 } 449 } 450 451 /* alpha */ 452 alpha_test_control = 0; 453 alpha_ref = 0; 454 if (state->alpha.enabled) { 455 alpha_test_control = S_028410_ALPHA_FUNC(state->alpha.func); 456 alpha_test_control |= S_028410_ALPHA_TEST_ENABLE(1); 457 alpha_ref = fui(state->alpha.ref_value); 458 } 459 dsa->sx_alpha_test_control = alpha_test_control & 0xff; 460 dsa->alpha_ref = alpha_ref; 461 462 /* misc */ 463 r600_store_context_reg(&dsa->buffer, R_028800_DB_DEPTH_CONTROL, db_depth_control); 464 return dsa; 465 } 466 467 static void *evergreen_create_rs_state(struct pipe_context *ctx, 468 const struct pipe_rasterizer_state *state) 469 { 470 struct r600_context *rctx = (struct r600_context *)ctx; 471 unsigned tmp, spi_interp; 472 float psize_min, psize_max; 473 struct r600_rasterizer_state *rs = CALLOC_STRUCT(r600_rasterizer_state); 474 475 if (!rs) { 476 return NULL; 477 } 478 479 r600_init_command_buffer(&rs->buffer, 30); 480 481 rs->scissor_enable = state->scissor; 482 rs->clip_halfz = state->clip_halfz; 483 rs->flatshade = state->flatshade; 484 rs->sprite_coord_enable = state->sprite_coord_enable; 485 rs->rasterizer_discard = state->rasterizer_discard; 486 rs->two_side = state->light_twoside; 487 rs->clip_plane_enable = state->clip_plane_enable; 488 rs->pa_sc_line_stipple = state->line_stipple_enable ? 489 S_028A0C_LINE_PATTERN(state->line_stipple_pattern) | 490 S_028A0C_REPEAT_COUNT(state->line_stipple_factor) : 0; 491 rs->pa_cl_clip_cntl = 492 S_028810_DX_CLIP_SPACE_DEF(state->clip_halfz) | 493 S_028810_ZCLIP_NEAR_DISABLE(!state->depth_clip) | 494 S_028810_ZCLIP_FAR_DISABLE(!state->depth_clip) | 495 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1) | 496 S_028810_DX_RASTERIZATION_KILL(state->rasterizer_discard); 497 rs->multisample_enable = state->multisample; 498 499 /* offset */ 500 rs->offset_units = state->offset_units; 501 rs->offset_scale = state->offset_scale * 16.0f; 502 rs->offset_enable = state->offset_point || state->offset_line || state->offset_tri; 503 rs->offset_units_unscaled = state->offset_units_unscaled; 504 505 if (state->point_size_per_vertex) { 506 psize_min = util_get_min_point_size(state); 507 psize_max = 8192; 508 } else { 509 /* Force the point size to be as if the vertex output was disabled. */ 510 psize_min = state->point_size; 511 psize_max = state->point_size; 512 } 513 514 spi_interp = S_0286D4_FLAT_SHADE_ENA(1); 515 if (state->sprite_coord_enable) { 516 spi_interp |= S_0286D4_PNT_SPRITE_ENA(1) | 517 S_0286D4_PNT_SPRITE_OVRD_X(2) | 518 S_0286D4_PNT_SPRITE_OVRD_Y(3) | 519 S_0286D4_PNT_SPRITE_OVRD_Z(0) | 520 S_0286D4_PNT_SPRITE_OVRD_W(1); 521 if (state->sprite_coord_mode != PIPE_SPRITE_COORD_UPPER_LEFT) { 522 spi_interp |= S_0286D4_PNT_SPRITE_TOP_1(1); 523 } 524 } 525 526 r600_store_context_reg_seq(&rs->buffer, R_028A00_PA_SU_POINT_SIZE, 3); 527 /* point size 12.4 fixed point (divide by two, because 0.5 = 1 pixel) */ 528 tmp = r600_pack_float_12p4(state->point_size/2); 529 r600_store_value(&rs->buffer, /* R_028A00_PA_SU_POINT_SIZE */ 530 S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp)); 531 r600_store_value(&rs->buffer, /* R_028A04_PA_SU_POINT_MINMAX */ 532 S_028A04_MIN_SIZE(r600_pack_float_12p4(psize_min/2)) | 533 S_028A04_MAX_SIZE(r600_pack_float_12p4(psize_max/2))); 534 r600_store_value(&rs->buffer, /* R_028A08_PA_SU_LINE_CNTL */ 535 S_028A08_WIDTH((unsigned)(state->line_width * 8))); 536 537 r600_store_context_reg(&rs->buffer, R_0286D4_SPI_INTERP_CONTROL_0, spi_interp); 538 r600_store_context_reg(&rs->buffer, R_028A48_PA_SC_MODE_CNTL_0, 539 S_028A48_MSAA_ENABLE(state->multisample) | 540 S_028A48_VPORT_SCISSOR_ENABLE(1) | 541 S_028A48_LINE_STIPPLE_ENABLE(state->line_stipple_enable)); 542 543 if (rctx->b.chip_class == CAYMAN) { 544 r600_store_context_reg(&rs->buffer, CM_R_028BE4_PA_SU_VTX_CNTL, 545 S_028C08_PIX_CENTER_HALF(state->half_pixel_center) | 546 S_028C08_QUANT_MODE(V_028C08_X_1_256TH)); 547 } else { 548 r600_store_context_reg(&rs->buffer, R_028C08_PA_SU_VTX_CNTL, 549 S_028C08_PIX_CENTER_HALF(state->half_pixel_center) | 550 S_028C08_QUANT_MODE(V_028C08_X_1_256TH)); 551 } 552 553 r600_store_context_reg(&rs->buffer, R_028B7C_PA_SU_POLY_OFFSET_CLAMP, fui(state->offset_clamp)); 554 r600_store_context_reg(&rs->buffer, R_028814_PA_SU_SC_MODE_CNTL, 555 S_028814_PROVOKING_VTX_LAST(!state->flatshade_first) | 556 S_028814_CULL_FRONT((state->cull_face & PIPE_FACE_FRONT) ? 1 : 0) | 557 S_028814_CULL_BACK((state->cull_face & PIPE_FACE_BACK) ? 1 : 0) | 558 S_028814_FACE(!state->front_ccw) | 559 S_028814_POLY_OFFSET_FRONT_ENABLE(util_get_offset(state, state->fill_front)) | 560 S_028814_POLY_OFFSET_BACK_ENABLE(util_get_offset(state, state->fill_back)) | 561 S_028814_POLY_OFFSET_PARA_ENABLE(state->offset_point || state->offset_line) | 562 S_028814_POLY_MODE(state->fill_front != PIPE_POLYGON_MODE_FILL || 563 state->fill_back != PIPE_POLYGON_MODE_FILL) | 564 S_028814_POLYMODE_FRONT_PTYPE(r600_translate_fill(state->fill_front)) | 565 S_028814_POLYMODE_BACK_PTYPE(r600_translate_fill(state->fill_back))); 566 return rs; 567 } 568 569 static void *evergreen_create_sampler_state(struct pipe_context *ctx, 570 const struct pipe_sampler_state *state) 571 { 572 struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen; 573 struct r600_pipe_sampler_state *ss = CALLOC_STRUCT(r600_pipe_sampler_state); 574 unsigned max_aniso = rscreen->force_aniso >= 0 ? rscreen->force_aniso 575 : state->max_anisotropy; 576 unsigned max_aniso_ratio = r600_tex_aniso_filter(max_aniso); 577 578 if (!ss) { 579 return NULL; 580 } 581 582 ss->border_color_use = sampler_state_needs_border_color(state); 583 584 /* R_03C000_SQ_TEX_SAMPLER_WORD0_0 */ 585 ss->tex_sampler_words[0] = 586 S_03C000_CLAMP_X(r600_tex_wrap(state->wrap_s)) | 587 S_03C000_CLAMP_Y(r600_tex_wrap(state->wrap_t)) | 588 S_03C000_CLAMP_Z(r600_tex_wrap(state->wrap_r)) | 589 S_03C000_XY_MAG_FILTER(eg_tex_filter(state->mag_img_filter, max_aniso)) | 590 S_03C000_XY_MIN_FILTER(eg_tex_filter(state->min_img_filter, max_aniso)) | 591 S_03C000_MIP_FILTER(r600_tex_mipfilter(state->min_mip_filter)) | 592 S_03C000_MAX_ANISO_RATIO(max_aniso_ratio) | 593 S_03C000_DEPTH_COMPARE_FUNCTION(r600_tex_compare(state->compare_func)) | 594 S_03C000_BORDER_COLOR_TYPE(ss->border_color_use ? V_03C000_SQ_TEX_BORDER_COLOR_REGISTER : 0); 595 /* R_03C004_SQ_TEX_SAMPLER_WORD1_0 */ 596 ss->tex_sampler_words[1] = 597 S_03C004_MIN_LOD(S_FIXED(CLAMP(state->min_lod, 0, 15), 8)) | 598 S_03C004_MAX_LOD(S_FIXED(CLAMP(state->max_lod, 0, 15), 8)); 599 /* R_03C008_SQ_TEX_SAMPLER_WORD2_0 */ 600 ss->tex_sampler_words[2] = 601 S_03C008_LOD_BIAS(S_FIXED(CLAMP(state->lod_bias, -16, 16), 8)) | 602 (state->seamless_cube_map ? 0 : S_03C008_DISABLE_CUBE_WRAP(1)) | 603 S_03C008_TYPE(1); 604 605 if (ss->border_color_use) { 606 memcpy(&ss->border_color, &state->border_color, sizeof(state->border_color)); 607 } 608 return ss; 609 } 610 611 struct eg_buf_res_params { 612 enum pipe_format pipe_format; 613 unsigned offset; 614 unsigned size; 615 unsigned char swizzle[4]; 616 bool uncached; 617 bool force_swizzle; 618 }; 619 620 static void evergreen_fill_buffer_resource_words(struct r600_context *rctx, 621 struct pipe_resource *buffer, 622 struct eg_buf_res_params *params, 623 bool *skip_mip_address_reloc, 624 unsigned tex_resource_words[8]) 625 { 626 struct r600_texture *tmp = (struct r600_texture*)buffer; 627 uint64_t va; 628 int stride = util_format_get_blocksize(params->pipe_format); 629 unsigned format, num_format, format_comp, endian; 630 unsigned swizzle_res; 631 const struct util_format_description *desc; 632 633 r600_vertex_data_type(params->pipe_format, 634 &format, &num_format, &format_comp, 635 &endian); 636 637 desc = util_format_description(params->pipe_format); 638 639 if (params->force_swizzle) 640 swizzle_res = r600_get_swizzle_combined(params->swizzle, NULL, TRUE); 641 else 642 swizzle_res = r600_get_swizzle_combined(desc->swizzle, params->swizzle, TRUE); 643 644 va = tmp->resource.gpu_address + params->offset; 645 *skip_mip_address_reloc = true; 646 tex_resource_words[0] = va; 647 tex_resource_words[1] = params->size - 1; 648 tex_resource_words[2] = S_030008_BASE_ADDRESS_HI(va >> 32UL) | 649 S_030008_STRIDE(stride) | 650 S_030008_DATA_FORMAT(format) | 651 S_030008_NUM_FORMAT_ALL(num_format) | 652 S_030008_FORMAT_COMP_ALL(format_comp) | 653 S_030008_ENDIAN_SWAP(endian); 654 tex_resource_words[3] = swizzle_res | S_03000C_UNCACHED(params->uncached); 655 /* 656 * dword 4 is for number of elements, for use with resinfo, 657 * albeit the amd gpu shader analyser 658 * uses a const buffer to store the element sizes for buffer txq 659 */ 660 tex_resource_words[4] = params->size / stride; 661 662 tex_resource_words[5] = tex_resource_words[6] = 0; 663 tex_resource_words[7] = S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_BUFFER); 664 } 665 666 static struct pipe_sampler_view * 667 texture_buffer_sampler_view(struct r600_context *rctx, 668 struct r600_pipe_sampler_view *view, 669 unsigned width0, unsigned height0) 670 { 671 struct r600_texture *tmp = (struct r600_texture*)view->base.texture; 672 struct eg_buf_res_params params; 673 674 memset(¶ms, 0, sizeof(params)); 675 676 params.pipe_format = view->base.format; 677 params.offset = view->base.u.buf.offset; 678 params.size = view->base.u.buf.size; 679 params.swizzle[0] = view->base.swizzle_r; 680 params.swizzle[1] = view->base.swizzle_g; 681 params.swizzle[2] = view->base.swizzle_b; 682 params.swizzle[3] = view->base.swizzle_a; 683 684 evergreen_fill_buffer_resource_words(rctx, view->base.texture, 685 ¶ms, &view->skip_mip_address_reloc, 686 view->tex_resource_words); 687 view->tex_resource = &tmp->resource; 688 689 if (tmp->resource.gpu_address) 690 LIST_ADDTAIL(&view->list, &rctx->texture_buffers); 691 return &view->base; 692 } 693 694 struct eg_tex_res_params { 695 enum pipe_format pipe_format; 696 int force_level; 697 unsigned width0; 698 unsigned height0; 699 unsigned first_level; 700 unsigned last_level; 701 unsigned first_layer; 702 unsigned last_layer; 703 unsigned target; 704 unsigned char swizzle[4]; 705 }; 706 707 static int evergreen_fill_tex_resource_words(struct r600_context *rctx, 708 struct pipe_resource *texture, 709 struct eg_tex_res_params *params, 710 bool *skip_mip_address_reloc, 711 unsigned tex_resource_words[8]) 712 { 713 struct r600_screen *rscreen = (struct r600_screen*)rctx->b.b.screen; 714 struct r600_texture *tmp = (struct r600_texture*)texture; 715 unsigned format, endian; 716 uint32_t word4 = 0, yuv_format = 0, pitch = 0; 717 unsigned char array_mode = 0, non_disp_tiling = 0; 718 unsigned height, depth, width; 719 unsigned macro_aspect, tile_split, bankh, bankw, nbanks, fmask_bankh; 720 struct legacy_surf_level *surflevel; 721 unsigned base_level, first_level, last_level; 722 unsigned dim, last_layer; 723 uint64_t va; 724 bool do_endian_swap = FALSE; 725 726 tile_split = tmp->surface.u.legacy.tile_split; 727 surflevel = tmp->surface.u.legacy.level; 728 729 /* Texturing with separate depth and stencil. */ 730 if (tmp->db_compatible) { 731 switch (params->pipe_format) { 732 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT: 733 params->pipe_format = PIPE_FORMAT_Z32_FLOAT; 734 break; 735 case PIPE_FORMAT_X8Z24_UNORM: 736 case PIPE_FORMAT_S8_UINT_Z24_UNORM: 737 /* Z24 is always stored like this for DB 738 * compatibility. 739 */ 740 params->pipe_format = PIPE_FORMAT_Z24X8_UNORM; 741 break; 742 case PIPE_FORMAT_X24S8_UINT: 743 case PIPE_FORMAT_S8X24_UINT: 744 case PIPE_FORMAT_X32_S8X24_UINT: 745 params->pipe_format = PIPE_FORMAT_S8_UINT; 746 tile_split = tmp->surface.u.legacy.stencil_tile_split; 747 surflevel = tmp->surface.u.legacy.stencil_level; 748 break; 749 default:; 750 } 751 } 752 753 if (R600_BIG_ENDIAN) 754 do_endian_swap = !tmp->db_compatible; 755 756 format = r600_translate_texformat(rctx->b.b.screen, params->pipe_format, 757 params->swizzle, 758 &word4, &yuv_format, do_endian_swap); 759 assert(format != ~0); 760 if (format == ~0) { 761 return -1; 762 } 763 764 endian = r600_colorformat_endian_swap(format, do_endian_swap); 765 766 base_level = 0; 767 first_level = params->first_level; 768 last_level = params->last_level; 769 width = params->width0; 770 height = params->height0; 771 depth = texture->depth0; 772 773 if (params->force_level) { 774 base_level = params->force_level; 775 first_level = 0; 776 last_level = 0; 777 width = u_minify(width, params->force_level); 778 height = u_minify(height, params->force_level); 779 depth = u_minify(depth, params->force_level); 780 } 781 782 pitch = surflevel[base_level].nblk_x * util_format_get_blockwidth(params->pipe_format); 783 non_disp_tiling = tmp->non_disp_tiling; 784 785 switch (surflevel[base_level].mode) { 786 default: 787 case RADEON_SURF_MODE_LINEAR_ALIGNED: 788 array_mode = V_028C70_ARRAY_LINEAR_ALIGNED; 789 break; 790 case RADEON_SURF_MODE_2D: 791 array_mode = V_028C70_ARRAY_2D_TILED_THIN1; 792 break; 793 case RADEON_SURF_MODE_1D: 794 array_mode = V_028C70_ARRAY_1D_TILED_THIN1; 795 break; 796 } 797 macro_aspect = tmp->surface.u.legacy.mtilea; 798 bankw = tmp->surface.u.legacy.bankw; 799 bankh = tmp->surface.u.legacy.bankh; 800 tile_split = eg_tile_split(tile_split); 801 macro_aspect = eg_macro_tile_aspect(macro_aspect); 802 bankw = eg_bank_wh(bankw); 803 bankh = eg_bank_wh(bankh); 804 fmask_bankh = eg_bank_wh(tmp->fmask.bank_height); 805 806 /* 128 bit formats require tile type = 1 */ 807 if (rscreen->b.chip_class == CAYMAN) { 808 if (util_format_get_blocksize(params->pipe_format) >= 16) 809 non_disp_tiling = 1; 810 } 811 nbanks = eg_num_banks(rscreen->b.info.r600_num_banks); 812 813 814 va = tmp->resource.gpu_address; 815 816 /* array type views and views into array types need to use layer offset */ 817 dim = r600_tex_dim(tmp, params->target, texture->nr_samples); 818 819 if (dim == V_030000_SQ_TEX_DIM_1D_ARRAY) { 820 height = 1; 821 depth = texture->array_size; 822 } else if (dim == V_030000_SQ_TEX_DIM_2D_ARRAY || 823 dim == V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA) { 824 depth = texture->array_size; 825 } else if (dim == V_030000_SQ_TEX_DIM_CUBEMAP) 826 depth = texture->array_size / 6; 827 828 tex_resource_words[0] = (S_030000_DIM(dim) | 829 S_030000_PITCH((pitch / 8) - 1) | 830 S_030000_TEX_WIDTH(width - 1)); 831 if (rscreen->b.chip_class == CAYMAN) 832 tex_resource_words[0] |= CM_S_030000_NON_DISP_TILING_ORDER(non_disp_tiling); 833 else 834 tex_resource_words[0] |= S_030000_NON_DISP_TILING_ORDER(non_disp_tiling); 835 tex_resource_words[1] = (S_030004_TEX_HEIGHT(height - 1) | 836 S_030004_TEX_DEPTH(depth - 1) | 837 S_030004_ARRAY_MODE(array_mode)); 838 tex_resource_words[2] = (surflevel[base_level].offset + va) >> 8; 839 840 *skip_mip_address_reloc = false; 841 /* TEX_RESOURCE_WORD3.MIP_ADDRESS */ 842 if (texture->nr_samples > 1 && rscreen->has_compressed_msaa_texturing) { 843 if (tmp->is_depth) { 844 /* disable FMASK (0 = disabled) */ 845 tex_resource_words[3] = 0; 846 *skip_mip_address_reloc = true; 847 } else { 848 /* FMASK should be in MIP_ADDRESS for multisample textures */ 849 tex_resource_words[3] = (tmp->fmask.offset + va) >> 8; 850 } 851 } else if (last_level && texture->nr_samples <= 1) { 852 tex_resource_words[3] = (surflevel[1].offset + va) >> 8; 853 } else { 854 tex_resource_words[3] = (surflevel[base_level].offset + va) >> 8; 855 } 856 857 last_layer = params->last_layer; 858 if (params->target != texture->target && depth == 1) { 859 last_layer = params->first_layer; 860 } 861 tex_resource_words[4] = (word4 | 862 S_030010_ENDIAN_SWAP(endian)); 863 tex_resource_words[5] = S_030014_BASE_ARRAY(params->first_layer) | 864 S_030014_LAST_ARRAY(last_layer); 865 tex_resource_words[6] = S_030018_TILE_SPLIT(tile_split); 866 867 if (texture->nr_samples > 1) { 868 unsigned log_samples = util_logbase2(texture->nr_samples); 869 if (rscreen->b.chip_class == CAYMAN) { 870 tex_resource_words[4] |= S_030010_LOG2_NUM_FRAGMENTS(log_samples); 871 } 872 /* LAST_LEVEL holds log2(nr_samples) for multisample textures */ 873 tex_resource_words[5] |= S_030014_LAST_LEVEL(log_samples); 874 tex_resource_words[6] |= S_030018_FMASK_BANK_HEIGHT(fmask_bankh); 875 } else { 876 bool no_mip = first_level == last_level; 877 878 tex_resource_words[4] |= S_030010_BASE_LEVEL(first_level); 879 tex_resource_words[5] |= S_030014_LAST_LEVEL(last_level); 880 /* aniso max 16 samples */ 881 tex_resource_words[6] |= S_030018_MAX_ANISO_RATIO(no_mip ? 0 : 4); 882 } 883 884 tex_resource_words[7] = S_03001C_DATA_FORMAT(format) | 885 S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_TEXTURE) | 886 S_03001C_BANK_WIDTH(bankw) | 887 S_03001C_BANK_HEIGHT(bankh) | 888 S_03001C_MACRO_TILE_ASPECT(macro_aspect) | 889 S_03001C_NUM_BANKS(nbanks) | 890 S_03001C_DEPTH_SAMPLE_ORDER(tmp->db_compatible); 891 return 0; 892 } 893 894 struct pipe_sampler_view * 895 evergreen_create_sampler_view_custom(struct pipe_context *ctx, 896 struct pipe_resource *texture, 897 const struct pipe_sampler_view *state, 898 unsigned width0, unsigned height0, 899 unsigned force_level) 900 { 901 struct r600_context *rctx = (struct r600_context*)ctx; 902 struct r600_pipe_sampler_view *view = CALLOC_STRUCT(r600_pipe_sampler_view); 903 struct r600_texture *tmp = (struct r600_texture*)texture; 904 struct eg_tex_res_params params; 905 int ret; 906 907 if (!view) 908 return NULL; 909 910 /* initialize base object */ 911 view->base = *state; 912 view->base.texture = NULL; 913 pipe_reference(NULL, &texture->reference); 914 view->base.texture = texture; 915 view->base.reference.count = 1; 916 view->base.context = ctx; 917 918 if (state->target == PIPE_BUFFER) 919 return texture_buffer_sampler_view(rctx, view, width0, height0); 920 921 memset(¶ms, 0, sizeof(params)); 922 params.pipe_format = state->format; 923 params.force_level = force_level; 924 params.width0 = width0; 925 params.height0 = height0; 926 params.first_level = state->u.tex.first_level; 927 params.last_level = state->u.tex.last_level; 928 params.first_layer = state->u.tex.first_layer; 929 params.last_layer = state->u.tex.last_layer; 930 params.target = state->target; 931 params.swizzle[0] = state->swizzle_r; 932 params.swizzle[1] = state->swizzle_g; 933 params.swizzle[2] = state->swizzle_b; 934 params.swizzle[3] = state->swizzle_a; 935 936 ret = evergreen_fill_tex_resource_words(rctx, texture, ¶ms, 937 &view->skip_mip_address_reloc, 938 view->tex_resource_words); 939 if (ret != 0) { 940 FREE(view); 941 return NULL; 942 } 943 944 if (state->format == PIPE_FORMAT_X24S8_UINT || 945 state->format == PIPE_FORMAT_S8X24_UINT || 946 state->format == PIPE_FORMAT_X32_S8X24_UINT || 947 state->format == PIPE_FORMAT_S8_UINT) 948 view->is_stencil_sampler = true; 949 950 view->tex_resource = &tmp->resource; 951 952 return &view->base; 953 } 954 955 static struct pipe_sampler_view * 956 evergreen_create_sampler_view(struct pipe_context *ctx, 957 struct pipe_resource *tex, 958 const struct pipe_sampler_view *state) 959 { 960 return evergreen_create_sampler_view_custom(ctx, tex, state, 961 tex->width0, tex->height0, 0); 962 } 963 964 static void evergreen_emit_config_state(struct r600_context *rctx, struct r600_atom *atom) 965 { 966 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 967 struct r600_config_state *a = (struct r600_config_state*)atom; 968 969 radeon_set_config_reg_seq(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, 3); 970 if (a->dyn_gpr_enabled) { 971 radeon_emit(cs, S_008C04_NUM_CLAUSE_TEMP_GPRS(rctx->r6xx_num_clause_temp_gprs)); 972 radeon_emit(cs, 0); 973 radeon_emit(cs, 0); 974 } else { 975 radeon_emit(cs, a->sq_gpr_resource_mgmt_1); 976 radeon_emit(cs, a->sq_gpr_resource_mgmt_2); 977 radeon_emit(cs, a->sq_gpr_resource_mgmt_3); 978 } 979 radeon_set_config_reg(cs, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, (a->dyn_gpr_enabled << 8)); 980 if (a->dyn_gpr_enabled) { 981 radeon_set_context_reg(cs, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1, 982 S_028838_PS_GPRS(0x1e) | 983 S_028838_VS_GPRS(0x1e) | 984 S_028838_GS_GPRS(0x1e) | 985 S_028838_ES_GPRS(0x1e) | 986 S_028838_HS_GPRS(0x1e) | 987 S_028838_LS_GPRS(0x1e)); /* workaround for hw issues with dyn gpr - must set all limits to 240 instead of 0, 0x1e == 240 / 8*/ 988 } 989 } 990 991 static void evergreen_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom) 992 { 993 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 994 struct pipe_clip_state *state = &rctx->clip_state.state; 995 996 radeon_set_context_reg_seq(cs, R_0285BC_PA_CL_UCP0_X, 6*4); 997 radeon_emit_array(cs, (unsigned*)state, 6*4); 998 } 999 1000 static void evergreen_set_polygon_stipple(struct pipe_context *ctx, 1001 const struct pipe_poly_stipple *state) 1002 { 1003 } 1004 1005 static void evergreen_get_scissor_rect(struct r600_context *rctx, 1006 unsigned tl_x, unsigned tl_y, unsigned br_x, unsigned br_y, 1007 uint32_t *tl, uint32_t *br) 1008 { 1009 struct pipe_scissor_state scissor = {tl_x, tl_y, br_x, br_y}; 1010 1011 evergreen_apply_scissor_bug_workaround(&rctx->b, &scissor); 1012 1013 *tl = S_028240_TL_X(scissor.minx) | S_028240_TL_Y(scissor.miny); 1014 *br = S_028244_BR_X(scissor.maxx) | S_028244_BR_Y(scissor.maxy); 1015 } 1016 1017 struct r600_tex_color_info { 1018 unsigned info; 1019 unsigned view; 1020 unsigned dim; 1021 unsigned pitch; 1022 unsigned slice; 1023 unsigned attrib; 1024 unsigned ntype; 1025 unsigned fmask; 1026 unsigned fmask_slice; 1027 uint64_t offset; 1028 boolean export_16bpc; 1029 }; 1030 1031 static void evergreen_set_color_surface_buffer(struct r600_context *rctx, 1032 struct r600_resource *res, 1033 enum pipe_format pformat, 1034 unsigned first_element, 1035 unsigned last_element, 1036 struct r600_tex_color_info *color) 1037 { 1038 unsigned format, swap, ntype, endian; 1039 const struct util_format_description *desc; 1040 unsigned block_size = util_format_get_blocksize(res->b.b.format); 1041 unsigned pitch_alignment = 1042 MAX2(64, rctx->screen->b.info.pipe_interleave_bytes / block_size); 1043 unsigned pitch = align(res->b.b.width0, pitch_alignment); 1044 int i; 1045 unsigned width_elements; 1046 1047 width_elements = last_element - first_element + 1; 1048 1049 format = r600_translate_colorformat(rctx->b.chip_class, pformat, FALSE); 1050 swap = r600_translate_colorswap(pformat, FALSE); 1051 1052 endian = r600_colorformat_endian_swap(format, FALSE); 1053 1054 desc = util_format_description(pformat); 1055 for (i = 0; i < 4; i++) { 1056 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) { 1057 break; 1058 } 1059 } 1060 ntype = V_028C70_NUMBER_UNORM; 1061 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) 1062 ntype = V_028C70_NUMBER_SRGB; 1063 else if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) { 1064 if (desc->channel[i].normalized) 1065 ntype = V_028C70_NUMBER_SNORM; 1066 else if (desc->channel[i].pure_integer) 1067 ntype = V_028C70_NUMBER_SINT; 1068 } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) { 1069 if (desc->channel[i].normalized) 1070 ntype = V_028C70_NUMBER_UNORM; 1071 else if (desc->channel[i].pure_integer) 1072 ntype = V_028C70_NUMBER_UINT; 1073 } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT) { 1074 ntype = V_028C70_NUMBER_FLOAT; 1075 } 1076 1077 pitch = (pitch / 8) - 1; 1078 color->pitch = S_028C64_PITCH_TILE_MAX(pitch); 1079 1080 color->info = S_028C70_ARRAY_MODE(V_028C70_ARRAY_LINEAR_ALIGNED); 1081 color->info |= S_028C70_FORMAT(format) | 1082 S_028C70_COMP_SWAP(swap) | 1083 S_028C70_BLEND_CLAMP(0) | 1084 S_028C70_BLEND_BYPASS(1) | 1085 S_028C70_NUMBER_TYPE(ntype) | 1086 S_028C70_ENDIAN(endian); 1087 color->attrib = S_028C74_NON_DISP_TILING_ORDER(1); 1088 color->ntype = ntype; 1089 color->export_16bpc = false; 1090 color->dim = width_elements - 1; 1091 color->slice = 0; /* (width_elements / 64) - 1;*/ 1092 color->view = 0; 1093 color->offset = (res->gpu_address + first_element) >> 8; 1094 1095 color->fmask = color->offset; 1096 color->fmask_slice = 0; 1097 } 1098 1099 static void evergreen_set_color_surface_common(struct r600_context *rctx, 1100 struct r600_texture *rtex, 1101 unsigned level, 1102 unsigned first_layer, 1103 unsigned last_layer, 1104 enum pipe_format pformat, 1105 struct r600_tex_color_info *color) 1106 { 1107 struct r600_screen *rscreen = rctx->screen; 1108 unsigned pitch, slice; 1109 unsigned non_disp_tiling, macro_aspect, tile_split, bankh, bankw, fmask_bankh, nbanks; 1110 unsigned format, swap, ntype, endian; 1111 const struct util_format_description *desc; 1112 bool blend_clamp = 0, blend_bypass = 0, do_endian_swap = FALSE; 1113 int i; 1114 1115 color->offset = rtex->surface.u.legacy.level[level].offset; 1116 color->view = S_028C6C_SLICE_START(first_layer) | 1117 S_028C6C_SLICE_MAX(last_layer); 1118 1119 color->offset += rtex->resource.gpu_address; 1120 color->offset >>= 8; 1121 1122 color->dim = 0; 1123 pitch = (rtex->surface.u.legacy.level[level].nblk_x) / 8 - 1; 1124 slice = (rtex->surface.u.legacy.level[level].nblk_x * rtex->surface.u.legacy.level[level].nblk_y) / 64; 1125 if (slice) { 1126 slice = slice - 1; 1127 } 1128 1129 color->info = 0; 1130 switch (rtex->surface.u.legacy.level[level].mode) { 1131 default: 1132 case RADEON_SURF_MODE_LINEAR_ALIGNED: 1133 color->info = S_028C70_ARRAY_MODE(V_028C70_ARRAY_LINEAR_ALIGNED); 1134 non_disp_tiling = 1; 1135 break; 1136 case RADEON_SURF_MODE_1D: 1137 color->info = S_028C70_ARRAY_MODE(V_028C70_ARRAY_1D_TILED_THIN1); 1138 non_disp_tiling = rtex->non_disp_tiling; 1139 break; 1140 case RADEON_SURF_MODE_2D: 1141 color->info = S_028C70_ARRAY_MODE(V_028C70_ARRAY_2D_TILED_THIN1); 1142 non_disp_tiling = rtex->non_disp_tiling; 1143 break; 1144 } 1145 tile_split = rtex->surface.u.legacy.tile_split; 1146 macro_aspect = rtex->surface.u.legacy.mtilea; 1147 bankw = rtex->surface.u.legacy.bankw; 1148 bankh = rtex->surface.u.legacy.bankh; 1149 if (rtex->fmask.size) 1150 fmask_bankh = rtex->fmask.bank_height; 1151 else 1152 fmask_bankh = rtex->surface.u.legacy.bankh; 1153 tile_split = eg_tile_split(tile_split); 1154 macro_aspect = eg_macro_tile_aspect(macro_aspect); 1155 bankw = eg_bank_wh(bankw); 1156 bankh = eg_bank_wh(bankh); 1157 fmask_bankh = eg_bank_wh(fmask_bankh); 1158 1159 if (rscreen->b.chip_class == CAYMAN) { 1160 if (util_format_get_blocksize(pformat) >= 16) 1161 non_disp_tiling = 1; 1162 } 1163 nbanks = eg_num_banks(rscreen->b.info.r600_num_banks); 1164 desc = util_format_description(pformat); 1165 for (i = 0; i < 4; i++) { 1166 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) { 1167 break; 1168 } 1169 } 1170 color->attrib = S_028C74_TILE_SPLIT(tile_split)| 1171 S_028C74_NUM_BANKS(nbanks) | 1172 S_028C74_BANK_WIDTH(bankw) | 1173 S_028C74_BANK_HEIGHT(bankh) | 1174 S_028C74_MACRO_TILE_ASPECT(macro_aspect) | 1175 S_028C74_NON_DISP_TILING_ORDER(non_disp_tiling) | 1176 S_028C74_FMASK_BANK_HEIGHT(fmask_bankh); 1177 1178 if (rctx->b.chip_class == CAYMAN) { 1179 color->attrib |= S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == 1180 PIPE_SWIZZLE_1); 1181 1182 if (rtex->resource.b.b.nr_samples > 1) { 1183 unsigned log_samples = util_logbase2(rtex->resource.b.b.nr_samples); 1184 color->attrib |= S_028C74_NUM_SAMPLES(log_samples) | 1185 S_028C74_NUM_FRAGMENTS(log_samples); 1186 } 1187 } 1188 1189 ntype = V_028C70_NUMBER_UNORM; 1190 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) 1191 ntype = V_028C70_NUMBER_SRGB; 1192 else if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) { 1193 if (desc->channel[i].normalized) 1194 ntype = V_028C70_NUMBER_SNORM; 1195 else if (desc->channel[i].pure_integer) 1196 ntype = V_028C70_NUMBER_SINT; 1197 } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) { 1198 if (desc->channel[i].normalized) 1199 ntype = V_028C70_NUMBER_UNORM; 1200 else if (desc->channel[i].pure_integer) 1201 ntype = V_028C70_NUMBER_UINT; 1202 } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT) { 1203 ntype = V_028C70_NUMBER_FLOAT; 1204 } 1205 1206 if (R600_BIG_ENDIAN) 1207 do_endian_swap = !rtex->db_compatible; 1208 1209 format = r600_translate_colorformat(rctx->b.chip_class, pformat, do_endian_swap); 1210 assert(format != ~0); 1211 swap = r600_translate_colorswap(pformat, do_endian_swap); 1212 assert(swap != ~0); 1213 1214 endian = r600_colorformat_endian_swap(format, do_endian_swap); 1215 1216 /* blend clamp should be set for all NORM/SRGB types */ 1217 if (ntype == V_028C70_NUMBER_UNORM || ntype == V_028C70_NUMBER_SNORM || 1218 ntype == V_028C70_NUMBER_SRGB) 1219 blend_clamp = 1; 1220 1221 /* set blend bypass according to docs if SINT/UINT or 1222 8/24 COLOR variants */ 1223 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT || 1224 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 || 1225 format == V_028C70_COLOR_X24_8_32_FLOAT) { 1226 blend_clamp = 0; 1227 blend_bypass = 1; 1228 } 1229 1230 color->ntype = ntype; 1231 color->info |= S_028C70_FORMAT(format) | 1232 S_028C70_COMP_SWAP(swap) | 1233 S_028C70_BLEND_CLAMP(blend_clamp) | 1234 S_028C70_BLEND_BYPASS(blend_bypass) | 1235 S_028C70_SIMPLE_FLOAT(1) | 1236 S_028C70_NUMBER_TYPE(ntype) | 1237 S_028C70_ENDIAN(endian); 1238 1239 if (rtex->fmask.size) { 1240 color->info |= S_028C70_COMPRESSION(1); 1241 } 1242 1243 /* EXPORT_NORM is an optimzation that can be enabled for better 1244 * performance in certain cases. 1245 * EXPORT_NORM can be enabled if: 1246 * - 11-bit or smaller UNORM/SNORM/SRGB 1247 * - 16-bit or smaller FLOAT 1248 */ 1249 color->export_16bpc = false; 1250 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS && 1251 ((desc->channel[i].size < 12 && 1252 desc->channel[i].type != UTIL_FORMAT_TYPE_FLOAT && 1253 ntype != V_028C70_NUMBER_UINT && ntype != V_028C70_NUMBER_SINT) || 1254 (desc->channel[i].size < 17 && 1255 desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT))) { 1256 color->info |= S_028C70_SOURCE_FORMAT(V_028C70_EXPORT_4C_16BPC); 1257 color->export_16bpc = true; 1258 } 1259 1260 color->pitch = S_028C64_PITCH_TILE_MAX(pitch); 1261 color->slice = S_028C68_SLICE_TILE_MAX(slice); 1262 1263 if (rtex->fmask.size) { 1264 color->fmask = (rtex->resource.gpu_address + rtex->fmask.offset) >> 8; 1265 color->fmask_slice = S_028C88_TILE_MAX(rtex->fmask.slice_tile_max); 1266 } else { 1267 color->fmask = color->offset; 1268 color->fmask_slice = S_028C88_TILE_MAX(slice); 1269 } 1270 } 1271 1272 /** 1273 * This function intializes the CB* register values for RATs. It is meant 1274 * to be used for 1D aligned buffers that do not have an associated 1275 * radeon_surf. 1276 */ 1277 void evergreen_init_color_surface_rat(struct r600_context *rctx, 1278 struct r600_surface *surf) 1279 { 1280 struct pipe_resource *pipe_buffer = surf->base.texture; 1281 struct r600_tex_color_info color; 1282 1283 evergreen_set_color_surface_buffer(rctx, (struct r600_resource *)surf->base.texture, 1284 surf->base.format, 0, pipe_buffer->width0, 1285 &color); 1286 1287 surf->cb_color_base = color.offset; 1288 surf->cb_color_dim = color.dim; 1289 surf->cb_color_info = color.info | S_028C70_RAT(1); 1290 surf->cb_color_pitch = color.pitch; 1291 surf->cb_color_slice = color.slice; 1292 surf->cb_color_view = color.view; 1293 surf->cb_color_attrib = color.attrib; 1294 surf->cb_color_fmask = color.fmask; 1295 surf->cb_color_fmask_slice = color.fmask_slice; 1296 1297 surf->cb_color_view = 0; 1298 1299 /* Set the buffer range the GPU will have access to: */ 1300 util_range_add(&r600_resource(pipe_buffer)->valid_buffer_range, 1301 0, pipe_buffer->width0); 1302 } 1303 1304 1305 void evergreen_init_color_surface(struct r600_context *rctx, 1306 struct r600_surface *surf) 1307 { 1308 struct r600_texture *rtex = (struct r600_texture*)surf->base.texture; 1309 unsigned level = surf->base.u.tex.level; 1310 struct r600_tex_color_info color; 1311 1312 evergreen_set_color_surface_common(rctx, rtex, level, 1313 surf->base.u.tex.first_layer, 1314 surf->base.u.tex.last_layer, 1315 surf->base.format, 1316 &color); 1317 1318 surf->alphatest_bypass = color.ntype == V_028C70_NUMBER_UINT || 1319 color.ntype == V_028C70_NUMBER_SINT; 1320 surf->export_16bpc = color.export_16bpc; 1321 1322 /* XXX handle enabling of CB beyond BASE8 which has different offset */ 1323 surf->cb_color_base = color.offset; 1324 surf->cb_color_dim = color.dim; 1325 surf->cb_color_info = color.info; 1326 surf->cb_color_pitch = color.pitch; 1327 surf->cb_color_slice = color.slice; 1328 surf->cb_color_view = color.view; 1329 surf->cb_color_attrib = color.attrib; 1330 surf->cb_color_fmask = color.fmask; 1331 surf->cb_color_fmask_slice = color.fmask_slice; 1332 1333 surf->color_initialized = true; 1334 } 1335 1336 static void evergreen_init_depth_surface(struct r600_context *rctx, 1337 struct r600_surface *surf) 1338 { 1339 struct r600_screen *rscreen = rctx->screen; 1340 struct r600_texture *rtex = (struct r600_texture*)surf->base.texture; 1341 unsigned level = surf->base.u.tex.level; 1342 struct legacy_surf_level *levelinfo = &rtex->surface.u.legacy.level[level]; 1343 uint64_t offset; 1344 unsigned format, array_mode; 1345 unsigned macro_aspect, tile_split, bankh, bankw, nbanks; 1346 1347 1348 format = r600_translate_dbformat(surf->base.format); 1349 assert(format != ~0); 1350 1351 offset = rtex->resource.gpu_address; 1352 offset += rtex->surface.u.legacy.level[level].offset; 1353 1354 switch (rtex->surface.u.legacy.level[level].mode) { 1355 case RADEON_SURF_MODE_2D: 1356 array_mode = V_028C70_ARRAY_2D_TILED_THIN1; 1357 break; 1358 case RADEON_SURF_MODE_1D: 1359 case RADEON_SURF_MODE_LINEAR_ALIGNED: 1360 default: 1361 array_mode = V_028C70_ARRAY_1D_TILED_THIN1; 1362 break; 1363 } 1364 tile_split = rtex->surface.u.legacy.tile_split; 1365 macro_aspect = rtex->surface.u.legacy.mtilea; 1366 bankw = rtex->surface.u.legacy.bankw; 1367 bankh = rtex->surface.u.legacy.bankh; 1368 tile_split = eg_tile_split(tile_split); 1369 macro_aspect = eg_macro_tile_aspect(macro_aspect); 1370 bankw = eg_bank_wh(bankw); 1371 bankh = eg_bank_wh(bankh); 1372 nbanks = eg_num_banks(rscreen->b.info.r600_num_banks); 1373 offset >>= 8; 1374 1375 surf->db_z_info = S_028040_ARRAY_MODE(array_mode) | 1376 S_028040_FORMAT(format) | 1377 S_028040_TILE_SPLIT(tile_split)| 1378 S_028040_NUM_BANKS(nbanks) | 1379 S_028040_BANK_WIDTH(bankw) | 1380 S_028040_BANK_HEIGHT(bankh) | 1381 S_028040_MACRO_TILE_ASPECT(macro_aspect); 1382 if (rscreen->b.chip_class == CAYMAN && rtex->resource.b.b.nr_samples > 1) { 1383 surf->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(rtex->resource.b.b.nr_samples)); 1384 } 1385 1386 assert(levelinfo->nblk_x % 8 == 0 && levelinfo->nblk_y % 8 == 0); 1387 1388 surf->db_depth_base = offset; 1389 surf->db_depth_view = S_028008_SLICE_START(surf->base.u.tex.first_layer) | 1390 S_028008_SLICE_MAX(surf->base.u.tex.last_layer); 1391 surf->db_depth_size = S_028058_PITCH_TILE_MAX(levelinfo->nblk_x / 8 - 1) | 1392 S_028058_HEIGHT_TILE_MAX(levelinfo->nblk_y / 8 - 1); 1393 surf->db_depth_slice = S_02805C_SLICE_TILE_MAX(levelinfo->nblk_x * 1394 levelinfo->nblk_y / 64 - 1); 1395 1396 if (rtex->surface.has_stencil) { 1397 uint64_t stencil_offset; 1398 unsigned stile_split = rtex->surface.u.legacy.stencil_tile_split; 1399 1400 stile_split = eg_tile_split(stile_split); 1401 1402 stencil_offset = rtex->surface.u.legacy.stencil_level[level].offset; 1403 stencil_offset += rtex->resource.gpu_address; 1404 1405 surf->db_stencil_base = stencil_offset >> 8; 1406 surf->db_stencil_info = S_028044_FORMAT(V_028044_STENCIL_8) | 1407 S_028044_TILE_SPLIT(stile_split); 1408 } else { 1409 surf->db_stencil_base = offset; 1410 /* DRM 2.6.18 allows the INVALID format to disable stencil. 1411 * Older kernels are out of luck. */ 1412 surf->db_stencil_info = rctx->screen->b.info.drm_minor >= 18 ? 1413 S_028044_FORMAT(V_028044_STENCIL_INVALID) : 1414 S_028044_FORMAT(V_028044_STENCIL_8); 1415 } 1416 1417 if (r600_htile_enabled(rtex, level)) { 1418 uint64_t va = rtex->resource.gpu_address + rtex->htile_offset; 1419 surf->db_htile_data_base = va >> 8; 1420 surf->db_htile_surface = S_028ABC_HTILE_WIDTH(1) | 1421 S_028ABC_HTILE_HEIGHT(1) | 1422 S_028ABC_FULL_CACHE(1); 1423 surf->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1); 1424 surf->db_preload_control = 0; 1425 } 1426 1427 surf->depth_initialized = true; 1428 } 1429 1430 static void evergreen_set_framebuffer_state(struct pipe_context *ctx, 1431 const struct pipe_framebuffer_state *state) 1432 { 1433 struct r600_context *rctx = (struct r600_context *)ctx; 1434 struct r600_surface *surf; 1435 struct r600_texture *rtex; 1436 uint32_t i, log_samples; 1437 1438 /* Flush TC when changing the framebuffer state, because the only 1439 * client not using TC that can change textures is the framebuffer. 1440 * Other places don't typically have to flush TC. 1441 */ 1442 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | 1443 R600_CONTEXT_FLUSH_AND_INV | 1444 R600_CONTEXT_FLUSH_AND_INV_CB | 1445 R600_CONTEXT_FLUSH_AND_INV_CB_META | 1446 R600_CONTEXT_FLUSH_AND_INV_DB | 1447 R600_CONTEXT_FLUSH_AND_INV_DB_META | 1448 R600_CONTEXT_INV_TEX_CACHE; 1449 1450 util_copy_framebuffer_state(&rctx->framebuffer.state, state); 1451 1452 /* Colorbuffers. */ 1453 rctx->framebuffer.export_16bpc = state->nr_cbufs != 0; 1454 rctx->framebuffer.cb0_is_integer = state->nr_cbufs && state->cbufs[0] && 1455 util_format_is_pure_integer(state->cbufs[0]->format); 1456 rctx->framebuffer.compressed_cb_mask = 0; 1457 rctx->framebuffer.nr_samples = util_framebuffer_get_num_samples(state); 1458 1459 for (i = 0; i < state->nr_cbufs; i++) { 1460 surf = (struct r600_surface*)state->cbufs[i]; 1461 if (!surf) 1462 continue; 1463 1464 rtex = (struct r600_texture*)surf->base.texture; 1465 1466 r600_context_add_resource_size(ctx, state->cbufs[i]->texture); 1467 1468 if (!surf->color_initialized) { 1469 evergreen_init_color_surface(rctx, surf); 1470 } 1471 1472 if (!surf->export_16bpc) { 1473 rctx->framebuffer.export_16bpc = false; 1474 } 1475 1476 if (rtex->fmask.size) { 1477 rctx->framebuffer.compressed_cb_mask |= 1 << i; 1478 } 1479 } 1480 1481 /* Update alpha-test state dependencies. 1482 * Alpha-test is done on the first colorbuffer only. */ 1483 if (state->nr_cbufs) { 1484 bool alphatest_bypass = false; 1485 bool export_16bpc = true; 1486 1487 surf = (struct r600_surface*)state->cbufs[0]; 1488 if (surf) { 1489 alphatest_bypass = surf->alphatest_bypass; 1490 export_16bpc = surf->export_16bpc; 1491 } 1492 1493 if (rctx->alphatest_state.bypass != alphatest_bypass) { 1494 rctx->alphatest_state.bypass = alphatest_bypass; 1495 r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom); 1496 } 1497 if (rctx->alphatest_state.cb0_export_16bpc != export_16bpc) { 1498 rctx->alphatest_state.cb0_export_16bpc = export_16bpc; 1499 r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom); 1500 } 1501 } 1502 1503 /* ZS buffer. */ 1504 if (state->zsbuf) { 1505 surf = (struct r600_surface*)state->zsbuf; 1506 1507 r600_context_add_resource_size(ctx, state->zsbuf->texture); 1508 1509 if (!surf->depth_initialized) { 1510 evergreen_init_depth_surface(rctx, surf); 1511 } 1512 1513 if (state->zsbuf->format != rctx->poly_offset_state.zs_format) { 1514 rctx->poly_offset_state.zs_format = state->zsbuf->format; 1515 r600_mark_atom_dirty(rctx, &rctx->poly_offset_state.atom); 1516 } 1517 1518 if (rctx->db_state.rsurf != surf) { 1519 rctx->db_state.rsurf = surf; 1520 r600_mark_atom_dirty(rctx, &rctx->db_state.atom); 1521 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 1522 } 1523 } else if (rctx->db_state.rsurf) { 1524 rctx->db_state.rsurf = NULL; 1525 r600_mark_atom_dirty(rctx, &rctx->db_state.atom); 1526 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 1527 } 1528 1529 if (rctx->cb_misc_state.nr_cbufs != state->nr_cbufs) { 1530 rctx->cb_misc_state.nr_cbufs = state->nr_cbufs; 1531 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom); 1532 } 1533 1534 if (state->nr_cbufs == 0 && rctx->alphatest_state.bypass) { 1535 rctx->alphatest_state.bypass = false; 1536 r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom); 1537 } 1538 1539 log_samples = util_logbase2(rctx->framebuffer.nr_samples); 1540 /* This is for Cayman to program SAMPLE_RATE, and for RV770 to fix a hw bug. */ 1541 if ((rctx->b.chip_class == CAYMAN || 1542 rctx->b.family == CHIP_RV770) && 1543 rctx->db_misc_state.log_samples != log_samples) { 1544 rctx->db_misc_state.log_samples = log_samples; 1545 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 1546 } 1547 1548 1549 /* Calculate the CS size. */ 1550 rctx->framebuffer.atom.num_dw = 4; /* SCISSOR */ 1551 1552 /* MSAA. */ 1553 if (rctx->b.chip_class == EVERGREEN) 1554 rctx->framebuffer.atom.num_dw += 17; /* Evergreen */ 1555 else 1556 rctx->framebuffer.atom.num_dw += 28; /* Cayman */ 1557 1558 /* Colorbuffers. */ 1559 rctx->framebuffer.atom.num_dw += state->nr_cbufs * 23; 1560 rctx->framebuffer.atom.num_dw += state->nr_cbufs * 2; 1561 rctx->framebuffer.atom.num_dw += (12 - state->nr_cbufs) * 3; 1562 1563 /* ZS buffer. */ 1564 if (state->zsbuf) { 1565 rctx->framebuffer.atom.num_dw += 24; 1566 rctx->framebuffer.atom.num_dw += 2; 1567 } else if (rctx->screen->b.info.drm_minor >= 18) { 1568 rctx->framebuffer.atom.num_dw += 4; 1569 } 1570 1571 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom); 1572 1573 r600_set_sample_locations_constant_buffer(rctx); 1574 rctx->framebuffer.do_update_surf_dirtiness = true; 1575 } 1576 1577 static void evergreen_set_min_samples(struct pipe_context *ctx, unsigned min_samples) 1578 { 1579 struct r600_context *rctx = (struct r600_context *)ctx; 1580 1581 if (rctx->ps_iter_samples == min_samples) 1582 return; 1583 1584 rctx->ps_iter_samples = min_samples; 1585 if (rctx->framebuffer.nr_samples > 1) { 1586 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom); 1587 } 1588 } 1589 1590 /* 8xMSAA */ 1591 static uint32_t sample_locs_8x[] = { 1592 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3), 1593 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7), 1594 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3), 1595 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7), 1596 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3), 1597 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7), 1598 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3), 1599 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7), 1600 }; 1601 static unsigned max_dist_8x = 7; 1602 1603 static void evergreen_get_sample_position(struct pipe_context *ctx, 1604 unsigned sample_count, 1605 unsigned sample_index, 1606 float *out_value) 1607 { 1608 int offset, index; 1609 struct { 1610 int idx:4; 1611 } val; 1612 switch (sample_count) { 1613 case 1: 1614 default: 1615 out_value[0] = out_value[1] = 0.5; 1616 break; 1617 case 2: 1618 offset = 4 * (sample_index * 2); 1619 val.idx = (eg_sample_locs_2x[0] >> offset) & 0xf; 1620 out_value[0] = (float)(val.idx + 8) / 16.0f; 1621 val.idx = (eg_sample_locs_2x[0] >> (offset + 4)) & 0xf; 1622 out_value[1] = (float)(val.idx + 8) / 16.0f; 1623 break; 1624 case 4: 1625 offset = 4 * (sample_index * 2); 1626 val.idx = (eg_sample_locs_4x[0] >> offset) & 0xf; 1627 out_value[0] = (float)(val.idx + 8) / 16.0f; 1628 val.idx = (eg_sample_locs_4x[0] >> (offset + 4)) & 0xf; 1629 out_value[1] = (float)(val.idx + 8) / 16.0f; 1630 break; 1631 case 8: 1632 offset = 4 * (sample_index % 4 * 2); 1633 index = (sample_index / 4); 1634 val.idx = (sample_locs_8x[index] >> offset) & 0xf; 1635 out_value[0] = (float)(val.idx + 8) / 16.0f; 1636 val.idx = (sample_locs_8x[index] >> (offset + 4)) & 0xf; 1637 out_value[1] = (float)(val.idx + 8) / 16.0f; 1638 break; 1639 } 1640 } 1641 1642 static void evergreen_emit_msaa_state(struct r600_context *rctx, int nr_samples, int ps_iter_samples) 1643 { 1644 1645 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1646 unsigned max_dist = 0; 1647 1648 switch (nr_samples) { 1649 default: 1650 nr_samples = 0; 1651 break; 1652 case 2: 1653 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, ARRAY_SIZE(eg_sample_locs_2x)); 1654 radeon_emit_array(cs, eg_sample_locs_2x, ARRAY_SIZE(eg_sample_locs_2x)); 1655 max_dist = eg_max_dist_2x; 1656 break; 1657 case 4: 1658 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, ARRAY_SIZE(eg_sample_locs_4x)); 1659 radeon_emit_array(cs, eg_sample_locs_4x, ARRAY_SIZE(eg_sample_locs_4x)); 1660 max_dist = eg_max_dist_4x; 1661 break; 1662 case 8: 1663 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, ARRAY_SIZE(sample_locs_8x)); 1664 radeon_emit_array(cs, sample_locs_8x, ARRAY_SIZE(sample_locs_8x)); 1665 max_dist = max_dist_8x; 1666 break; 1667 } 1668 1669 if (nr_samples > 1) { 1670 radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); 1671 radeon_emit(cs, S_028C00_LAST_PIXEL(1) | 1672 S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */ 1673 radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) | 1674 S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */ 1675 radeon_set_context_reg(cs, R_028A4C_PA_SC_MODE_CNTL_1, 1676 EG_S_028A4C_PS_ITER_SAMPLE(ps_iter_samples > 1) | 1677 EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) | 1678 EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1)); 1679 } else { 1680 radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); 1681 radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */ 1682 radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */ 1683 radeon_set_context_reg(cs, R_028A4C_PA_SC_MODE_CNTL_1, 1684 EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) | 1685 EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1)); 1686 } 1687 } 1688 1689 static void evergreen_emit_image_state(struct r600_context *rctx, struct r600_atom *atom, 1690 int immed_id_base, int res_id_base, int offset, uint32_t pkt_flags) 1691 { 1692 struct r600_image_state *state = (struct r600_image_state *)atom; 1693 struct pipe_framebuffer_state *fb_state = &rctx->framebuffer.state; 1694 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1695 struct r600_texture *rtex; 1696 struct r600_resource *resource; 1697 int i; 1698 1699 for (i = 0; i < R600_MAX_IMAGES; i++) { 1700 struct r600_image_view *image = &state->views[i]; 1701 unsigned reloc, immed_reloc; 1702 int idx = i + offset; 1703 1704 if (!pkt_flags) 1705 idx += fb_state->nr_cbufs + (rctx->dual_src_blend ? 1 : 0); 1706 if (!image->base.resource) 1707 continue; 1708 1709 resource = (struct r600_resource *)image->base.resource; 1710 if (resource->b.b.target != PIPE_BUFFER) 1711 rtex = (struct r600_texture *)image->base.resource; 1712 else 1713 rtex = NULL; 1714 1715 reloc = radeon_add_to_buffer_list(&rctx->b, 1716 &rctx->b.gfx, 1717 resource, 1718 RADEON_USAGE_READWRITE, 1719 RADEON_PRIO_SHADER_RW_BUFFER); 1720 1721 immed_reloc = radeon_add_to_buffer_list(&rctx->b, 1722 &rctx->b.gfx, 1723 resource->immed_buffer, 1724 RADEON_USAGE_READWRITE, 1725 RADEON_PRIO_SHADER_RW_BUFFER); 1726 1727 if (pkt_flags) 1728 radeon_compute_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + idx * 0x3C, 13); 1729 else 1730 radeon_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + idx * 0x3C, 13); 1731 1732 radeon_emit(cs, image->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ 1733 radeon_emit(cs, image->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ 1734 radeon_emit(cs, image->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ 1735 radeon_emit(cs, image->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */ 1736 radeon_emit(cs, image->cb_color_info); /* R_028C70_CB_COLOR0_INFO */ 1737 radeon_emit(cs, image->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */ 1738 radeon_emit(cs, image->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */ 1739 radeon_emit(cs, rtex ? rtex->cmask.base_address_reg : image->cb_color_base); /* R_028C7C_CB_COLOR0_CMASK */ 1740 radeon_emit(cs, rtex ? rtex->cmask.slice_tile_max : 0); /* R_028C80_CB_COLOR0_CMASK_SLICE */ 1741 radeon_emit(cs, image->cb_color_fmask); /* R_028C84_CB_COLOR0_FMASK */ 1742 radeon_emit(cs, image->cb_color_fmask_slice); /* R_028C88_CB_COLOR0_FMASK_SLICE */ 1743 radeon_emit(cs, rtex ? rtex->color_clear_value[0] : 0); /* R_028C8C_CB_COLOR0_CLEAR_WORD0 */ 1744 radeon_emit(cs, rtex ? rtex->color_clear_value[1] : 0); /* R_028C90_CB_COLOR0_CLEAR_WORD1 */ 1745 1746 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ 1747 radeon_emit(cs, reloc); 1748 1749 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ 1750 radeon_emit(cs, reloc); 1751 1752 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C7C_CB_COLOR0_CMASK */ 1753 radeon_emit(cs, reloc); 1754 1755 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C84_CB_COLOR0_FMASK */ 1756 radeon_emit(cs, reloc); 1757 1758 if (pkt_flags) 1759 radeon_compute_set_context_reg(cs, R_028B9C_CB_IMMED0_BASE + (idx * 4), resource->immed_buffer->gpu_address >> 8); 1760 else 1761 radeon_set_context_reg(cs, R_028B9C_CB_IMMED0_BASE + (idx * 4), resource->immed_buffer->gpu_address >> 8); 1762 1763 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /**/ 1764 radeon_emit(cs, immed_reloc); 1765 1766 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags); 1767 radeon_emit(cs, (immed_id_base + i + offset) * 8); 1768 radeon_emit_array(cs, image->immed_resource_words, 8); 1769 1770 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); 1771 radeon_emit(cs, immed_reloc); 1772 1773 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags); 1774 radeon_emit(cs, (res_id_base + i + offset) * 8); 1775 radeon_emit_array(cs, image->resource_words, 8); 1776 1777 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); 1778 radeon_emit(cs, reloc); 1779 1780 if (!image->skip_mip_address_reloc) { 1781 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); 1782 radeon_emit(cs, reloc); 1783 } 1784 } 1785 } 1786 1787 static void evergreen_emit_fragment_image_state(struct r600_context *rctx, struct r600_atom *atom) 1788 { 1789 evergreen_emit_image_state(rctx, atom, 1790 R600_IMAGE_IMMED_RESOURCE_OFFSET, 1791 R600_IMAGE_REAL_RESOURCE_OFFSET, 0, 0); 1792 } 1793 1794 static void evergreen_emit_compute_image_state(struct r600_context *rctx, struct r600_atom *atom) 1795 { 1796 evergreen_emit_image_state(rctx, atom, 1797 EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_IMMED_RESOURCE_OFFSET, 1798 EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_REAL_RESOURCE_OFFSET, 1799 0, RADEON_CP_PACKET3_COMPUTE_MODE); 1800 } 1801 1802 static void evergreen_emit_fragment_buffer_state(struct r600_context *rctx, struct r600_atom *atom) 1803 { 1804 int offset = util_bitcount(rctx->fragment_images.enabled_mask); 1805 evergreen_emit_image_state(rctx, atom, 1806 R600_IMAGE_IMMED_RESOURCE_OFFSET, 1807 R600_IMAGE_REAL_RESOURCE_OFFSET, offset, 0); 1808 } 1809 1810 static void evergreen_emit_compute_buffer_state(struct r600_context *rctx, struct r600_atom *atom) 1811 { 1812 int offset = util_bitcount(rctx->compute_images.enabled_mask); 1813 evergreen_emit_image_state(rctx, atom, 1814 EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_IMMED_RESOURCE_OFFSET, 1815 EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_REAL_RESOURCE_OFFSET, 1816 offset, RADEON_CP_PACKET3_COMPUTE_MODE); 1817 } 1818 1819 static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom) 1820 { 1821 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1822 struct pipe_framebuffer_state *state = &rctx->framebuffer.state; 1823 unsigned nr_cbufs = state->nr_cbufs; 1824 unsigned i, tl, br; 1825 struct r600_texture *tex = NULL; 1826 struct r600_surface *cb = NULL; 1827 1828 /* XXX support more colorbuffers once we need them */ 1829 assert(nr_cbufs <= 8); 1830 if (nr_cbufs > 8) 1831 nr_cbufs = 8; 1832 1833 /* Colorbuffers. */ 1834 for (i = 0; i < nr_cbufs; i++) { 1835 unsigned reloc, cmask_reloc; 1836 1837 cb = (struct r600_surface*)state->cbufs[i]; 1838 if (!cb) { 1839 radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 1840 S_028C70_FORMAT(V_028C70_COLOR_INVALID)); 1841 continue; 1842 } 1843 1844 tex = (struct r600_texture *)cb->base.texture; 1845 reloc = radeon_add_to_buffer_list(&rctx->b, 1846 &rctx->b.gfx, 1847 (struct r600_resource*)cb->base.texture, 1848 RADEON_USAGE_READWRITE, 1849 tex->resource.b.b.nr_samples > 1 ? 1850 RADEON_PRIO_COLOR_BUFFER_MSAA : 1851 RADEON_PRIO_COLOR_BUFFER); 1852 1853 if (tex->cmask_buffer && tex->cmask_buffer != &tex->resource) { 1854 cmask_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, 1855 tex->cmask_buffer, RADEON_USAGE_READWRITE, 1856 RADEON_PRIO_CMASK); 1857 } else { 1858 cmask_reloc = reloc; 1859 } 1860 1861 radeon_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 13); 1862 radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ 1863 radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ 1864 radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ 1865 radeon_emit(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */ 1866 radeon_emit(cs, cb->cb_color_info | tex->cb_color_info); /* R_028C70_CB_COLOR0_INFO */ 1867 radeon_emit(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */ 1868 radeon_emit(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */ 1869 radeon_emit(cs, tex->cmask.base_address_reg); /* R_028C7C_CB_COLOR0_CMASK */ 1870 radeon_emit(cs, tex->cmask.slice_tile_max); /* R_028C80_CB_COLOR0_CMASK_SLICE */ 1871 radeon_emit(cs, cb->cb_color_fmask); /* R_028C84_CB_COLOR0_FMASK */ 1872 radeon_emit(cs, cb->cb_color_fmask_slice); /* R_028C88_CB_COLOR0_FMASK_SLICE */ 1873 radeon_emit(cs, tex->color_clear_value[0]); /* R_028C8C_CB_COLOR0_CLEAR_WORD0 */ 1874 radeon_emit(cs, tex->color_clear_value[1]); /* R_028C90_CB_COLOR0_CLEAR_WORD1 */ 1875 1876 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ 1877 radeon_emit(cs, reloc); 1878 1879 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ 1880 radeon_emit(cs, reloc); 1881 1882 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C7C_CB_COLOR0_CMASK */ 1883 radeon_emit(cs, cmask_reloc); 1884 1885 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C84_CB_COLOR0_FMASK */ 1886 radeon_emit(cs, reloc); 1887 } 1888 /* set CB_COLOR1_INFO for possible dual-src blending */ 1889 if (rctx->framebuffer.dual_src_blend && i == 1 && state->cbufs[0]) { 1890 radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + 1 * 0x3C, 1891 cb->cb_color_info | tex->cb_color_info); 1892 i++; 1893 } 1894 i += util_bitcount(rctx->fragment_images.enabled_mask); 1895 i += util_bitcount(rctx->fragment_buffers.enabled_mask); 1896 for (; i < 8 ; i++) 1897 radeon_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 0); 1898 for (; i < 12; i++) 1899 radeon_set_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, 0); 1900 1901 /* ZS buffer. */ 1902 if (state->zsbuf) { 1903 struct r600_surface *zb = (struct r600_surface*)state->zsbuf; 1904 unsigned reloc = radeon_add_to_buffer_list(&rctx->b, 1905 &rctx->b.gfx, 1906 (struct r600_resource*)state->zsbuf->texture, 1907 RADEON_USAGE_READWRITE, 1908 zb->base.texture->nr_samples > 1 ? 1909 RADEON_PRIO_DEPTH_BUFFER_MSAA : 1910 RADEON_PRIO_DEPTH_BUFFER); 1911 1912 radeon_set_context_reg(cs, R_028008_DB_DEPTH_VIEW, zb->db_depth_view); 1913 1914 radeon_set_context_reg_seq(cs, R_028040_DB_Z_INFO, 8); 1915 radeon_emit(cs, zb->db_z_info); /* R_028040_DB_Z_INFO */ 1916 radeon_emit(cs, zb->db_stencil_info); /* R_028044_DB_STENCIL_INFO */ 1917 radeon_emit(cs, zb->db_depth_base); /* R_028048_DB_Z_READ_BASE */ 1918 radeon_emit(cs, zb->db_stencil_base); /* R_02804C_DB_STENCIL_READ_BASE */ 1919 radeon_emit(cs, zb->db_depth_base); /* R_028050_DB_Z_WRITE_BASE */ 1920 radeon_emit(cs, zb->db_stencil_base); /* R_028054_DB_STENCIL_WRITE_BASE */ 1921 radeon_emit(cs, zb->db_depth_size); /* R_028058_DB_DEPTH_SIZE */ 1922 radeon_emit(cs, zb->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */ 1923 1924 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028048_DB_Z_READ_BASE */ 1925 radeon_emit(cs, reloc); 1926 1927 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_02804C_DB_STENCIL_READ_BASE */ 1928 radeon_emit(cs, reloc); 1929 1930 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028050_DB_Z_WRITE_BASE */ 1931 radeon_emit(cs, reloc); 1932 1933 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028054_DB_STENCIL_WRITE_BASE */ 1934 radeon_emit(cs, reloc); 1935 } else if (rctx->screen->b.info.drm_minor >= 18) { 1936 /* DRM 2.6.18 allows the INVALID format to disable depth/stencil. 1937 * Older kernels are out of luck. */ 1938 radeon_set_context_reg_seq(cs, R_028040_DB_Z_INFO, 2); 1939 radeon_emit(cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */ 1940 radeon_emit(cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */ 1941 } 1942 1943 /* Framebuffer dimensions. */ 1944 evergreen_get_scissor_rect(rctx, 0, 0, state->width, state->height, &tl, &br); 1945 1946 radeon_set_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2); 1947 radeon_emit(cs, tl); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */ 1948 radeon_emit(cs, br); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */ 1949 1950 if (rctx->b.chip_class == EVERGREEN) { 1951 evergreen_emit_msaa_state(rctx, rctx->framebuffer.nr_samples, rctx->ps_iter_samples); 1952 } else { 1953 unsigned sc_mode_cntl_1 = 1954 EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) | 1955 EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1); 1956 1957 if (rctx->framebuffer.nr_samples > 1) 1958 cayman_emit_msaa_sample_locs(cs, rctx->framebuffer.nr_samples); 1959 cayman_emit_msaa_config(cs, rctx->framebuffer.nr_samples, 1960 rctx->ps_iter_samples, 0, sc_mode_cntl_1); 1961 } 1962 } 1963 1964 static void evergreen_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a) 1965 { 1966 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1967 struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a; 1968 float offset_units = state->offset_units; 1969 float offset_scale = state->offset_scale; 1970 uint32_t pa_su_poly_offset_db_fmt_cntl = 0; 1971 1972 if (!state->offset_units_unscaled) { 1973 switch (state->zs_format) { 1974 case PIPE_FORMAT_Z24X8_UNORM: 1975 case PIPE_FORMAT_Z24_UNORM_S8_UINT: 1976 case PIPE_FORMAT_X8Z24_UNORM: 1977 case PIPE_FORMAT_S8_UINT_Z24_UNORM: 1978 offset_units *= 2.0f; 1979 pa_su_poly_offset_db_fmt_cntl = 1980 S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS((char)-24); 1981 break; 1982 case PIPE_FORMAT_Z16_UNORM: 1983 offset_units *= 4.0f; 1984 pa_su_poly_offset_db_fmt_cntl = 1985 S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS((char)-16); 1986 break; 1987 default: 1988 pa_su_poly_offset_db_fmt_cntl = 1989 S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS((char)-23) | 1990 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1); 1991 } 1992 } 1993 1994 radeon_set_context_reg_seq(cs, R_028B80_PA_SU_POLY_OFFSET_FRONT_SCALE, 4); 1995 radeon_emit(cs, fui(offset_scale)); 1996 radeon_emit(cs, fui(offset_units)); 1997 radeon_emit(cs, fui(offset_scale)); 1998 radeon_emit(cs, fui(offset_units)); 1999 2000 radeon_set_context_reg(cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL, 2001 pa_su_poly_offset_db_fmt_cntl); 2002 } 2003 2004 uint32_t evergreen_construct_rat_mask(struct r600_context *rctx, struct r600_cb_misc_state *a, 2005 unsigned nr_cbufs) 2006 { 2007 unsigned base_mask = 0; 2008 unsigned dirty_mask = a->image_rat_enabled_mask; 2009 while (dirty_mask) { 2010 unsigned idx = u_bit_scan(&dirty_mask); 2011 base_mask |= (0xf << (idx * 4)); 2012 } 2013 unsigned offset = util_last_bit(a->image_rat_enabled_mask); 2014 dirty_mask = a->buffer_rat_enabled_mask; 2015 while (dirty_mask) { 2016 unsigned idx = u_bit_scan(&dirty_mask); 2017 base_mask |= (0xf << (idx + offset) * 4); 2018 } 2019 return base_mask << (nr_cbufs * 4); 2020 } 2021 2022 static void evergreen_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom) 2023 { 2024 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 2025 struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom; 2026 unsigned fb_colormask = (1ULL << ((unsigned)a->nr_cbufs * 4)) - 1; 2027 unsigned ps_colormask = (1ULL << ((unsigned)a->nr_ps_color_outputs * 4)) - 1; 2028 unsigned rat_colormask = evergreen_construct_rat_mask(rctx, a, a->nr_cbufs); 2029 radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); 2030 radeon_emit(cs, (a->blend_colormask & fb_colormask) | rat_colormask); /* R_028238_CB_TARGET_MASK */ 2031 /* This must match the used export instructions exactly. 2032 * Other values may lead to undefined behavior and hangs. 2033 */ 2034 radeon_emit(cs, ps_colormask); /* R_02823C_CB_SHADER_MASK */ 2035 } 2036 2037 static void evergreen_emit_db_state(struct r600_context *rctx, struct r600_atom *atom) 2038 { 2039 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 2040 struct r600_db_state *a = (struct r600_db_state*)atom; 2041 2042 if (a->rsurf && a->rsurf->db_htile_surface) { 2043 struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture; 2044 unsigned reloc_idx; 2045 2046 radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value)); 2047 radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, a->rsurf->db_htile_surface); 2048 radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, a->rsurf->db_preload_control); 2049 radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base); 2050 reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, &rtex->resource, 2051 RADEON_USAGE_READWRITE, RADEON_PRIO_HTILE); 2052 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 2053 radeon_emit(cs, reloc_idx); 2054 } else { 2055 radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, 0); 2056 radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, 0); 2057 } 2058 } 2059 2060 static void evergreen_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom) 2061 { 2062 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 2063 struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom; 2064 unsigned db_render_control = 0; 2065 unsigned db_count_control = 0; 2066 unsigned db_render_override = 2067 S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) | 2068 S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE); 2069 2070 if (rctx->b.num_occlusion_queries > 0 && 2071 !a->occlusion_queries_disabled) { 2072 db_count_control |= S_028004_PERFECT_ZPASS_COUNTS(1); 2073 if (rctx->b.chip_class == CAYMAN) { 2074 db_count_control |= S_028004_SAMPLE_RATE(a->log_samples); 2075 } 2076 db_render_override |= S_02800C_NOOP_CULL_DISABLE(1); 2077 } else { 2078 db_count_control |= S_028004_ZPASS_INCREMENT_DISABLE(1); 2079 } 2080 2081 /* This is to fix a lockup when hyperz and alpha test are enabled at 2082 * the same time somehow GPU get confuse on which order to pick for 2083 * z test 2084 */ 2085 if (rctx->alphatest_state.sx_alpha_test_control) 2086 db_render_override |= S_02800C_FORCE_SHADER_Z_ORDER(1); 2087 2088 if (a->flush_depthstencil_through_cb) { 2089 assert(a->copy_depth || a->copy_stencil); 2090 2091 db_render_control |= S_028000_DEPTH_COPY_ENABLE(a->copy_depth) | 2092 S_028000_STENCIL_COPY_ENABLE(a->copy_stencil) | 2093 S_028000_COPY_CENTROID(1) | 2094 S_028000_COPY_SAMPLE(a->copy_sample); 2095 } else if (a->flush_depth_inplace || a->flush_stencil_inplace) { 2096 db_render_control |= S_028000_DEPTH_COMPRESS_DISABLE(a->flush_depth_inplace) | 2097 S_028000_STENCIL_COMPRESS_DISABLE(a->flush_stencil_inplace); 2098 db_render_override |= S_02800C_DISABLE_PIXEL_RATE_TILES(1); 2099 } 2100 if (a->htile_clear) { 2101 /* FIXME we might want to disable cliprect here */ 2102 db_render_control |= S_028000_DEPTH_CLEAR_ENABLE(1); 2103 } 2104 2105 radeon_set_context_reg_seq(cs, R_028000_DB_RENDER_CONTROL, 2); 2106 radeon_emit(cs, db_render_control); /* R_028000_DB_RENDER_CONTROL */ 2107 radeon_emit(cs, db_count_control); /* R_028004_DB_COUNT_CONTROL */ 2108 radeon_set_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override); 2109 radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control); 2110 } 2111 2112 static void evergreen_emit_vertex_buffers(struct r600_context *rctx, 2113 struct r600_vertexbuf_state *state, 2114 unsigned resource_offset, 2115 unsigned pkt_flags) 2116 { 2117 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 2118 uint32_t dirty_mask = state->dirty_mask; 2119 2120 while (dirty_mask) { 2121 struct pipe_vertex_buffer *vb; 2122 struct r600_resource *rbuffer; 2123 uint64_t va; 2124 unsigned buffer_index = u_bit_scan(&dirty_mask); 2125 2126 vb = &state->vb[buffer_index]; 2127 rbuffer = (struct r600_resource*)vb->buffer.resource; 2128 assert(rbuffer); 2129 2130 va = rbuffer->gpu_address + vb->buffer_offset; 2131 2132 /* fetch resources start at index 992 */ 2133 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags); 2134 radeon_emit(cs, (resource_offset + buffer_index) * 8); 2135 radeon_emit(cs, va); /* RESOURCEi_WORD0 */ 2136 radeon_emit(cs, rbuffer->b.b.width0 - vb->buffer_offset - 1); /* RESOURCEi_WORD1 */ 2137 radeon_emit(cs, /* RESOURCEi_WORD2 */ 2138 S_030008_ENDIAN_SWAP(r600_endian_swap(32)) | 2139 S_030008_STRIDE(vb->stride) | 2140 S_030008_BASE_ADDRESS_HI(va >> 32UL)); 2141 radeon_emit(cs, /* RESOURCEi_WORD3 */ 2142 S_03000C_DST_SEL_X(V_03000C_SQ_SEL_X) | 2143 S_03000C_DST_SEL_Y(V_03000C_SQ_SEL_Y) | 2144 S_03000C_DST_SEL_Z(V_03000C_SQ_SEL_Z) | 2145 S_03000C_DST_SEL_W(V_03000C_SQ_SEL_W)); 2146 radeon_emit(cs, 0); /* RESOURCEi_WORD4 */ 2147 radeon_emit(cs, 0); /* RESOURCEi_WORD5 */ 2148 radeon_emit(cs, 0); /* RESOURCEi_WORD6 */ 2149 radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD7 */ 2150 2151 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); 2152 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 2153 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER)); 2154 } 2155 state->dirty_mask = 0; 2156 } 2157 2158 static void evergreen_fs_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom * atom) 2159 { 2160 evergreen_emit_vertex_buffers(rctx, &rctx->vertex_buffer_state, EG_FETCH_CONSTANTS_OFFSET_FS, 0); 2161 } 2162 2163 static void evergreen_cs_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom * atom) 2164 { 2165 evergreen_emit_vertex_buffers(rctx, &rctx->cs_vertex_buffer_state, EG_FETCH_CONSTANTS_OFFSET_CS, 2166 RADEON_CP_PACKET3_COMPUTE_MODE); 2167 } 2168 2169 static void evergreen_emit_constant_buffers(struct r600_context *rctx, 2170 struct r600_constbuf_state *state, 2171 unsigned buffer_id_base, 2172 unsigned reg_alu_constbuf_size, 2173 unsigned reg_alu_const_cache, 2174 unsigned pkt_flags) 2175 { 2176 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 2177 uint32_t dirty_mask = state->dirty_mask; 2178 2179 while (dirty_mask) { 2180 struct pipe_constant_buffer *cb; 2181 struct r600_resource *rbuffer; 2182 uint64_t va; 2183 unsigned buffer_index = ffs(dirty_mask) - 1; 2184 unsigned gs_ring_buffer = (buffer_index == R600_GS_RING_CONST_BUFFER); 2185 2186 cb = &state->cb[buffer_index]; 2187 rbuffer = (struct r600_resource*)cb->buffer; 2188 assert(rbuffer); 2189 2190 va = rbuffer->gpu_address + cb->buffer_offset; 2191 2192 if (buffer_index < R600_MAX_HW_CONST_BUFFERS) { 2193 radeon_set_context_reg_flag(cs, reg_alu_constbuf_size + buffer_index * 4, 2194 DIV_ROUND_UP(cb->buffer_size, 256), pkt_flags); 2195 radeon_set_context_reg_flag(cs, reg_alu_const_cache + buffer_index * 4, va >> 8, 2196 pkt_flags); 2197 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); 2198 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 2199 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER)); 2200 } 2201 2202 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags); 2203 radeon_emit(cs, (buffer_id_base + buffer_index) * 8); 2204 radeon_emit(cs, va); /* RESOURCEi_WORD0 */ 2205 radeon_emit(cs, rbuffer->b.b.width0 - cb->buffer_offset - 1); /* RESOURCEi_WORD1 */ 2206 radeon_emit(cs, /* RESOURCEi_WORD2 */ 2207 S_030008_ENDIAN_SWAP(gs_ring_buffer ? ENDIAN_NONE : r600_endian_swap(32)) | 2208 S_030008_STRIDE(gs_ring_buffer ? 4 : 16) | 2209 S_030008_BASE_ADDRESS_HI(va >> 32UL) | 2210 S_030008_DATA_FORMAT(FMT_32_32_32_32_FLOAT)); 2211 radeon_emit(cs, /* RESOURCEi_WORD3 */ 2212 S_03000C_UNCACHED(gs_ring_buffer ? 1 : 0) | 2213 S_03000C_DST_SEL_X(V_03000C_SQ_SEL_X) | 2214 S_03000C_DST_SEL_Y(V_03000C_SQ_SEL_Y) | 2215 S_03000C_DST_SEL_Z(V_03000C_SQ_SEL_Z) | 2216 S_03000C_DST_SEL_W(V_03000C_SQ_SEL_W)); 2217 radeon_emit(cs, 0); /* RESOURCEi_WORD4 */ 2218 radeon_emit(cs, 0); /* RESOURCEi_WORD5 */ 2219 radeon_emit(cs, 0); /* RESOURCEi_WORD6 */ 2220 radeon_emit(cs, /* RESOURCEi_WORD7 */ 2221 S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_BUFFER)); 2222 2223 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); 2224 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 2225 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER)); 2226 2227 dirty_mask &= ~(1 << buffer_index); 2228 } 2229 state->dirty_mask = 0; 2230 } 2231 2232 /* VS constants can be in VS/ES (same space) or LS if tess is enabled */ 2233 static void evergreen_emit_vs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 2234 { 2235 if (rctx->vs_shader->current->shader.vs_as_ls) { 2236 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX], 2237 EG_FETCH_CONSTANTS_OFFSET_LS, 2238 R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0, 2239 R_028F40_ALU_CONST_CACHE_LS_0, 2240 0 /* PKT3 flags */); 2241 } else { 2242 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX], 2243 EG_FETCH_CONSTANTS_OFFSET_VS, 2244 R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 2245 R_028980_ALU_CONST_CACHE_VS_0, 2246 0 /* PKT3 flags */); 2247 } 2248 } 2249 2250 static void evergreen_emit_gs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 2251 { 2252 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY], 2253 EG_FETCH_CONSTANTS_OFFSET_GS, 2254 R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 2255 R_0289C0_ALU_CONST_CACHE_GS_0, 2256 0 /* PKT3 flags */); 2257 } 2258 2259 static void evergreen_emit_ps_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 2260 { 2261 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT], 2262 EG_FETCH_CONSTANTS_OFFSET_PS, 2263 R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 2264 R_028940_ALU_CONST_CACHE_PS_0, 2265 0 /* PKT3 flags */); 2266 } 2267 2268 static void evergreen_emit_cs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 2269 { 2270 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_COMPUTE], 2271 EG_FETCH_CONSTANTS_OFFSET_CS, 2272 R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0, 2273 R_028F40_ALU_CONST_CACHE_LS_0, 2274 RADEON_CP_PACKET3_COMPUTE_MODE); 2275 } 2276 2277 /* tes constants can be emitted to VS or ES - which are common */ 2278 static void evergreen_emit_tes_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 2279 { 2280 if (!rctx->tes_shader) 2281 return; 2282 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_TESS_EVAL], 2283 EG_FETCH_CONSTANTS_OFFSET_VS, 2284 R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 2285 R_028980_ALU_CONST_CACHE_VS_0, 2286 0); 2287 } 2288 2289 static void evergreen_emit_tcs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 2290 { 2291 if (!rctx->tes_shader) 2292 return; 2293 evergreen_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_TESS_CTRL], 2294 EG_FETCH_CONSTANTS_OFFSET_HS, 2295 R_028F80_ALU_CONST_BUFFER_SIZE_HS_0, 2296 R_028F00_ALU_CONST_CACHE_HS_0, 2297 0); 2298 } 2299 2300 static void evergreen_emit_sampler_views(struct r600_context *rctx, 2301 struct r600_samplerview_state *state, 2302 unsigned resource_id_base, unsigned pkt_flags) 2303 { 2304 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 2305 uint32_t dirty_mask = state->dirty_mask; 2306 2307 while (dirty_mask) { 2308 struct r600_pipe_sampler_view *rview; 2309 unsigned resource_index = u_bit_scan(&dirty_mask); 2310 unsigned reloc; 2311 2312 rview = state->views[resource_index]; 2313 assert(rview); 2314 2315 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags); 2316 radeon_emit(cs, (resource_id_base + resource_index) * 8); 2317 radeon_emit_array(cs, rview->tex_resource_words, 8); 2318 2319 reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rview->tex_resource, 2320 RADEON_USAGE_READ, 2321 r600_get_sampler_view_priority(rview->tex_resource)); 2322 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); 2323 radeon_emit(cs, reloc); 2324 2325 if (!rview->skip_mip_address_reloc) { 2326 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); 2327 radeon_emit(cs, reloc); 2328 } 2329 } 2330 state->dirty_mask = 0; 2331 } 2332 2333 static void evergreen_emit_vs_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 2334 { 2335 if (rctx->vs_shader->current->shader.vs_as_ls) { 2336 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views, 2337 EG_FETCH_CONSTANTS_OFFSET_LS + R600_MAX_CONST_BUFFERS, 0); 2338 } else { 2339 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views, 2340 EG_FETCH_CONSTANTS_OFFSET_VS + R600_MAX_CONST_BUFFERS, 0); 2341 } 2342 } 2343 2344 static void evergreen_emit_gs_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 2345 { 2346 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views, 2347 EG_FETCH_CONSTANTS_OFFSET_GS + R600_MAX_CONST_BUFFERS, 0); 2348 } 2349 2350 static void evergreen_emit_tcs_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 2351 { 2352 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_TESS_CTRL].views, 2353 EG_FETCH_CONSTANTS_OFFSET_HS + R600_MAX_CONST_BUFFERS, 0); 2354 } 2355 2356 static void evergreen_emit_tes_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 2357 { 2358 if (!rctx->tes_shader) 2359 return; 2360 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL].views, 2361 EG_FETCH_CONSTANTS_OFFSET_VS + R600_MAX_CONST_BUFFERS, 0); 2362 } 2363 2364 static void evergreen_emit_ps_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 2365 { 2366 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views, 2367 EG_FETCH_CONSTANTS_OFFSET_PS + R600_MAX_CONST_BUFFERS, 0); 2368 } 2369 2370 static void evergreen_emit_cs_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 2371 { 2372 evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].views, 2373 EG_FETCH_CONSTANTS_OFFSET_CS + R600_MAX_CONST_BUFFERS, RADEON_CP_PACKET3_COMPUTE_MODE); 2374 } 2375 2376 static void evergreen_emit_sampler_states(struct r600_context *rctx, 2377 struct r600_textures_info *texinfo, 2378 unsigned resource_id_base, 2379 unsigned border_index_reg, 2380 unsigned pkt_flags) 2381 { 2382 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 2383 uint32_t dirty_mask = texinfo->states.dirty_mask; 2384 2385 while (dirty_mask) { 2386 struct r600_pipe_sampler_state *rstate; 2387 unsigned i = u_bit_scan(&dirty_mask); 2388 2389 rstate = texinfo->states.states[i]; 2390 assert(rstate); 2391 2392 radeon_emit(cs, PKT3(PKT3_SET_SAMPLER, 3, 0) | pkt_flags); 2393 radeon_emit(cs, (resource_id_base + i) * 3); 2394 radeon_emit_array(cs, rstate->tex_sampler_words, 3); 2395 2396 if (rstate->border_color_use) { 2397 radeon_set_config_reg_seq(cs, border_index_reg, 5); 2398 radeon_emit(cs, i); 2399 radeon_emit_array(cs, rstate->border_color.ui, 4); 2400 } 2401 } 2402 texinfo->states.dirty_mask = 0; 2403 } 2404 2405 static void evergreen_emit_vs_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 2406 { 2407 if (rctx->vs_shader->current->shader.vs_as_ls) { 2408 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_VERTEX], 72, 2409 R_00A450_TD_LS_SAMPLER0_BORDER_COLOR_INDEX, 0); 2410 } else { 2411 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_VERTEX], 18, 2412 R_00A414_TD_VS_SAMPLER0_BORDER_INDEX, 0); 2413 } 2414 } 2415 2416 static void evergreen_emit_gs_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 2417 { 2418 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY], 36, 2419 R_00A428_TD_GS_SAMPLER0_BORDER_INDEX, 0); 2420 } 2421 2422 static void evergreen_emit_tcs_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 2423 { 2424 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_TESS_CTRL], 54, 2425 R_00A43C_TD_HS_SAMPLER0_BORDER_COLOR_INDEX, 0); 2426 } 2427 2428 static void evergreen_emit_tes_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 2429 { 2430 if (!rctx->tes_shader) 2431 return; 2432 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL], 18, 2433 R_00A414_TD_VS_SAMPLER0_BORDER_INDEX, 0); 2434 } 2435 2436 static void evergreen_emit_ps_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 2437 { 2438 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT], 0, 2439 R_00A400_TD_PS_SAMPLER0_BORDER_INDEX, 0); 2440 } 2441 2442 static void evergreen_emit_cs_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 2443 { 2444 evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE], 90, 2445 R_00A464_TD_CS_SAMPLER0_BORDER_INDEX, 2446 RADEON_CP_PACKET3_COMPUTE_MODE); 2447 } 2448 2449 static void evergreen_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a) 2450 { 2451 struct r600_sample_mask *s = (struct r600_sample_mask*)a; 2452 uint8_t mask = s->sample_mask; 2453 2454 radeon_set_context_reg(rctx->b.gfx.cs, R_028C3C_PA_SC_AA_MASK, 2455 mask | (mask << 8) | (mask << 16) | (mask << 24)); 2456 } 2457 2458 static void cayman_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a) 2459 { 2460 struct r600_sample_mask *s = (struct r600_sample_mask*)a; 2461 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 2462 uint16_t mask = s->sample_mask; 2463 2464 radeon_set_context_reg_seq(cs, CM_R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2); 2465 radeon_emit(cs, mask | (mask << 16)); /* X0Y0_X1Y0 */ 2466 radeon_emit(cs, mask | (mask << 16)); /* X0Y1_X1Y1 */ 2467 } 2468 2469 static void evergreen_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a) 2470 { 2471 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 2472 struct r600_cso_state *state = (struct r600_cso_state*)a; 2473 struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso; 2474 2475 if (!shader) 2476 return; 2477 2478 radeon_set_context_reg(cs, R_0288A4_SQ_PGM_START_FS, 2479 (shader->buffer->gpu_address + shader->offset) >> 8); 2480 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 2481 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->buffer, 2482 RADEON_USAGE_READ, 2483 RADEON_PRIO_SHADER_BINARY)); 2484 } 2485 2486 static void evergreen_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a) 2487 { 2488 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 2489 struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a; 2490 2491 uint32_t v = 0, v2 = 0, primid = 0, tf_param = 0; 2492 2493 if (rctx->vs_shader->current->shader.vs_as_gs_a) { 2494 v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_A); 2495 primid = 1; 2496 } 2497 2498 if (state->geom_enable) { 2499 uint32_t cut_val; 2500 2501 if (rctx->gs_shader->gs_max_out_vertices <= 128) 2502 cut_val = V_028A40_GS_CUT_128; 2503 else if (rctx->gs_shader->gs_max_out_vertices <= 256) 2504 cut_val = V_028A40_GS_CUT_256; 2505 else if (rctx->gs_shader->gs_max_out_vertices <= 512) 2506 cut_val = V_028A40_GS_CUT_512; 2507 else 2508 cut_val = V_028A40_GS_CUT_1024; 2509 2510 v = S_028B54_GS_EN(1) | 2511 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER); 2512 if (!rctx->tes_shader) 2513 v |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL); 2514 2515 v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_G) | 2516 S_028A40_CUT_MODE(cut_val); 2517 2518 if (rctx->gs_shader->current->shader.gs_prim_id_input) 2519 primid = 1; 2520 } 2521 2522 if (rctx->tes_shader) { 2523 uint32_t type, partitioning, topology; 2524 struct tgsi_shader_info *info = &rctx->tes_shader->current->selector->info; 2525 unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE]; 2526 unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING]; 2527 bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW]; 2528 bool tes_point_mode = info->properties[TGSI_PROPERTY_TES_POINT_MODE]; 2529 switch (tes_prim_mode) { 2530 case PIPE_PRIM_LINES: 2531 type = V_028B6C_TESS_ISOLINE; 2532 break; 2533 case PIPE_PRIM_TRIANGLES: 2534 type = V_028B6C_TESS_TRIANGLE; 2535 break; 2536 case PIPE_PRIM_QUADS: 2537 type = V_028B6C_TESS_QUAD; 2538 break; 2539 default: 2540 assert(0); 2541 return; 2542 } 2543 2544 switch (tes_spacing) { 2545 case PIPE_TESS_SPACING_FRACTIONAL_ODD: 2546 partitioning = V_028B6C_PART_FRAC_ODD; 2547 break; 2548 case PIPE_TESS_SPACING_FRACTIONAL_EVEN: 2549 partitioning = V_028B6C_PART_FRAC_EVEN; 2550 break; 2551 case PIPE_TESS_SPACING_EQUAL: 2552 partitioning = V_028B6C_PART_INTEGER; 2553 break; 2554 default: 2555 assert(0); 2556 return; 2557 } 2558 2559 if (tes_point_mode) 2560 topology = V_028B6C_OUTPUT_POINT; 2561 else if (tes_prim_mode == PIPE_PRIM_LINES) 2562 topology = V_028B6C_OUTPUT_LINE; 2563 else if (tes_vertex_order_cw) 2564 /* XXX follow radeonsi and invert */ 2565 topology = V_028B6C_OUTPUT_TRIANGLE_CCW; 2566 else 2567 topology = V_028B6C_OUTPUT_TRIANGLE_CW; 2568 2569 tf_param = S_028B6C_TYPE(type) | 2570 S_028B6C_PARTITIONING(partitioning) | 2571 S_028B6C_TOPOLOGY(topology); 2572 } 2573 2574 if (rctx->tes_shader) { 2575 v |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) | 2576 S_028B54_HS_EN(1); 2577 if (!state->geom_enable) 2578 v |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS); 2579 else 2580 v |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS); 2581 } 2582 2583 radeon_set_context_reg(cs, R_028AB8_VGT_VTX_CNT_EN, v ? 1 : 0 ); 2584 radeon_set_context_reg(cs, R_028B54_VGT_SHADER_STAGES_EN, v); 2585 radeon_set_context_reg(cs, R_028A40_VGT_GS_MODE, v2); 2586 radeon_set_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid); 2587 radeon_set_context_reg(cs, R_028B6C_VGT_TF_PARAM, tf_param); 2588 } 2589 2590 static void evergreen_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a) 2591 { 2592 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 2593 struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a; 2594 struct r600_resource *rbuffer; 2595 2596 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); 2597 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 2598 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); 2599 2600 if (state->enable) { 2601 rbuffer =(struct r600_resource*)state->esgs_ring.buffer; 2602 radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 2603 rbuffer->gpu_address >> 8); 2604 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 2605 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 2606 RADEON_USAGE_READWRITE, 2607 RADEON_PRIO_SHADER_RINGS)); 2608 radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 2609 state->esgs_ring.buffer_size >> 8); 2610 2611 rbuffer =(struct r600_resource*)state->gsvs_ring.buffer; 2612 radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 2613 rbuffer->gpu_address >> 8); 2614 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 2615 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 2616 RADEON_USAGE_READWRITE, 2617 RADEON_PRIO_SHADER_RINGS)); 2618 radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 2619 state->gsvs_ring.buffer_size >> 8); 2620 } else { 2621 radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0); 2622 radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0); 2623 } 2624 2625 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); 2626 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 2627 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); 2628 } 2629 2630 void cayman_init_common_regs(struct r600_command_buffer *cb, 2631 enum chip_class ctx_chip_class, 2632 enum radeon_family ctx_family, 2633 int ctx_drm_minor) 2634 { 2635 r600_store_config_reg_seq(cb, R_008C00_SQ_CONFIG, 2); 2636 r600_store_value(cb, S_008C00_EXPORT_SRC_C(1)); /* R_008C00_SQ_CONFIG */ 2637 /* always set the temp clauses */ 2638 r600_store_value(cb, S_008C04_NUM_CLAUSE_TEMP_GPRS(4)); /* R_008C04_SQ_GPR_RESOURCE_MGMT_1 */ 2639 2640 r600_store_config_reg_seq(cb, R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1, 2); 2641 r600_store_value(cb, 0); /* R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1 */ 2642 r600_store_value(cb, 0); /* R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2 */ 2643 2644 r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, (1 << 8)); 2645 2646 r600_store_context_reg_seq(cb, R_028350_SX_MISC, 2); 2647 r600_store_value(cb, 0); 2648 r600_store_value(cb, S_028354_SURFACE_SYNC_MASK(0xf)); 2649 2650 r600_store_context_reg(cb, R_028800_DB_DEPTH_CONTROL, 0); 2651 } 2652 2653 static void cayman_init_atom_start_cs(struct r600_context *rctx) 2654 { 2655 struct r600_command_buffer *cb = &rctx->start_cs_cmd; 2656 int i; 2657 2658 r600_init_command_buffer(cb, 338); 2659 2660 /* This must be first. */ 2661 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0)); 2662 r600_store_value(cb, 0x80000000); 2663 r600_store_value(cb, 0x80000000); 2664 2665 /* We're setting config registers here. */ 2666 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); 2667 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); 2668 2669 /* This enables pipeline stat & streamout queries. 2670 * They are only disabled by blits. 2671 */ 2672 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); 2673 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) | EVENT_INDEX(0)); 2674 2675 cayman_init_common_regs(cb, rctx->b.chip_class, 2676 rctx->b.family, rctx->screen->b.info.drm_minor); 2677 2678 r600_store_config_reg(cb, R_009100_SPI_CONFIG_CNTL, 0); 2679 r600_store_config_reg(cb, R_00913C_SPI_CONFIG_CNTL_1, S_00913C_VTX_DONE_DELAY(4)); 2680 2681 /* remove LS/HS from one SIMD for hw workaround */ 2682 r600_store_config_reg_seq(cb, R_008E20_SQ_STATIC_THREAD_MGMT1, 3); 2683 r600_store_value(cb, 0xffffffff); 2684 r600_store_value(cb, 0xffffffff); 2685 r600_store_value(cb, 0xfffffffe); 2686 2687 r600_store_context_reg_seq(cb, R_028900_SQ_ESGS_RING_ITEMSIZE, 6); 2688 r600_store_value(cb, 0); /* R_028900_SQ_ESGS_RING_ITEMSIZE */ 2689 r600_store_value(cb, 0); /* R_028904_SQ_GSVS_RING_ITEMSIZE */ 2690 r600_store_value(cb, 0); /* R_028908_SQ_ESTMP_RING_ITEMSIZE */ 2691 r600_store_value(cb, 0); /* R_02890C_SQ_GSTMP_RING_ITEMSIZE */ 2692 r600_store_value(cb, 0); /* R_028910_SQ_VSTMP_RING_ITEMSIZE */ 2693 r600_store_value(cb, 0); /* R_028914_SQ_PSTMP_RING_ITEMSIZE */ 2694 2695 r600_store_context_reg_seq(cb, R_02891C_SQ_GS_VERT_ITEMSIZE, 4); 2696 r600_store_value(cb, 0); /* R_02891C_SQ_GS_VERT_ITEMSIZE */ 2697 r600_store_value(cb, 0); /* R_028920_SQ_GS_VERT_ITEMSIZE_1 */ 2698 r600_store_value(cb, 0); /* R_028924_SQ_GS_VERT_ITEMSIZE_2 */ 2699 r600_store_value(cb, 0); /* R_028928_SQ_GS_VERT_ITEMSIZE_3 */ 2700 2701 r600_store_context_reg_seq(cb, R_028A10_VGT_OUTPUT_PATH_CNTL, 13); 2702 r600_store_value(cb, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */ 2703 r600_store_value(cb, 0); /* R_028A14_VGT_HOS_CNTL */ 2704 r600_store_value(cb, fui(64)); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */ 2705 r600_store_value(cb, fui(0)); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */ 2706 r600_store_value(cb, 16); /* R_028A20_VGT_HOS_REUSE_DEPTH */ 2707 r600_store_value(cb, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */ 2708 r600_store_value(cb, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */ 2709 r600_store_value(cb, 0); /* R_028A2C_VGT_GROUP_DECR */ 2710 r600_store_value(cb, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */ 2711 r600_store_value(cb, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */ 2712 r600_store_value(cb, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */ 2713 r600_store_value(cb, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */ 2714 r600_store_value(cb, 0); /* R_028A40_VGT_GS_MODE */ 2715 2716 r600_store_context_reg(cb, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0); 2717 2718 r600_store_config_reg(cb, R_008A14_PA_CL_ENHANCE, (3 << 1) | 1); 2719 2720 r600_store_context_reg_seq(cb, CM_R_028BD4_PA_SC_CENTROID_PRIORITY_0, 2); 2721 r600_store_value(cb, 0x76543210); /* CM_R_028BD4_PA_SC_CENTROID_PRIORITY_0 */ 2722 r600_store_value(cb, 0xfedcba98); /* CM_R_028BD8_PA_SC_CENTROID_PRIORITY_1 */ 2723 2724 r600_store_context_reg(cb, R_028724_GDS_ADDR_SIZE, 0x3fff); 2725 r600_store_context_reg_seq(cb, R_0288E8_SQ_LDS_ALLOC, 2); 2726 r600_store_value(cb, 0); /* R_0288E8_SQ_LDS_ALLOC */ 2727 r600_store_value(cb, 0); /* R_0288EC_SQ_LDS_ALLOC_PS */ 2728 2729 r600_store_context_reg(cb, R_0288F0_SQ_VTX_SEMANTIC_CLEAR, ~0); 2730 2731 r600_store_context_reg_seq(cb, R_028400_VGT_MAX_VTX_INDX, 2); 2732 r600_store_value(cb, ~0); /* R_028400_VGT_MAX_VTX_INDX */ 2733 r600_store_value(cb, 0); /* R_028404_VGT_MIN_VTX_INDX */ 2734 2735 r600_store_ctl_const(cb, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0); 2736 2737 r600_store_context_reg(cb, R_028028_DB_STENCIL_CLEAR, 0); 2738 2739 r600_store_context_reg(cb, R_0286DC_SPI_FOG_CNTL, 0); 2740 2741 r600_store_context_reg_seq(cb, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 3); 2742 r600_store_value(cb, 0); /* R_028AC0_DB_SRESULTS_COMPARE_STATE0 */ 2743 r600_store_value(cb, 0); /* R_028AC4_DB_SRESULTS_COMPARE_STATE1 */ 2744 r600_store_value(cb, 0); /* R_028AC8_DB_PRELOAD_CONTROL */ 2745 2746 r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0); 2747 r600_store_context_reg(cb, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF); 2748 2749 r600_store_context_reg(cb, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA); 2750 r600_store_context_reg(cb, R_028820_PA_CL_NANINF_CNTL, 0); 2751 2752 r600_store_context_reg_seq(cb, R_028240_PA_SC_GENERIC_SCISSOR_TL, 2); 2753 r600_store_value(cb, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */ 2754 r600_store_value(cb, S_028244_BR_X(16384) | S_028244_BR_Y(16384)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */ 2755 2756 r600_store_context_reg_seq(cb, R_028030_PA_SC_SCREEN_SCISSOR_TL, 2); 2757 r600_store_value(cb, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */ 2758 r600_store_value(cb, S_028034_BR_X(16384) | S_028034_BR_Y(16384)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */ 2759 2760 r600_store_context_reg(cb, R_028848_SQ_PGM_RESOURCES_2_PS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN)); 2761 r600_store_context_reg(cb, R_028864_SQ_PGM_RESOURCES_2_VS, S_028864_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN)); 2762 r600_store_context_reg(cb, R_02887C_SQ_PGM_RESOURCES_2_GS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN)); 2763 r600_store_context_reg(cb, R_028894_SQ_PGM_RESOURCES_2_ES, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN)); 2764 r600_store_context_reg(cb, R_0288C0_SQ_PGM_RESOURCES_2_HS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN)); 2765 r600_store_context_reg(cb, R_0288D8_SQ_PGM_RESOURCES_2_LS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN)); 2766 2767 r600_store_context_reg(cb, R_0288A8_SQ_PGM_RESOURCES_FS, 0); 2768 2769 /* to avoid GPU doing any preloading of constant from random address */ 2770 r600_store_context_reg_seq(cb, R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 16); 2771 for (i = 0; i < 16; i++) 2772 r600_store_value(cb, 0); 2773 2774 r600_store_context_reg_seq(cb, R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 16); 2775 for (i = 0; i < 16; i++) 2776 r600_store_value(cb, 0); 2777 2778 r600_store_context_reg_seq(cb, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 16); 2779 for (i = 0; i < 16; i++) 2780 r600_store_value(cb, 0); 2781 2782 r600_store_context_reg_seq(cb, R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0, 16); 2783 for (i = 0; i < 16; i++) 2784 r600_store_value(cb, 0); 2785 2786 r600_store_context_reg_seq(cb, R_028F80_ALU_CONST_BUFFER_SIZE_HS_0, 16); 2787 for (i = 0; i < 16; i++) 2788 r600_store_value(cb, 0); 2789 2790 if (rctx->screen->b.has_streamout) { 2791 r600_store_context_reg(cb, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0); 2792 } 2793 2794 r600_store_context_reg(cb, R_028010_DB_RENDER_OVERRIDE2, 0); 2795 r600_store_context_reg(cb, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0); 2796 r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 0); 2797 r600_store_context_reg_seq(cb, R_0286E4_SPI_PS_IN_CONTROL_2, 2); 2798 r600_store_value(cb, 0); /* R_0286E4_SPI_PS_IN_CONTROL_2 */ 2799 r600_store_value(cb, 0); /* R_0286E8_SPI_COMPUTE_INPUT_CNTL */ 2800 2801 r600_store_context_reg_seq(cb, R_028B54_VGT_SHADER_STAGES_EN, 2); 2802 r600_store_value(cb, 0); /* R028B54_VGT_SHADER_STAGES_EN */ 2803 r600_store_value(cb, 0); /* R028B58_VGT_LS_HS_CONFIG */ 2804 r600_store_context_reg(cb, R_028B6C_VGT_TF_PARAM, 0); 2805 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0, 0x01000FFF); 2806 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (32 * 4), 0x01000FFF); 2807 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (64 * 4), 0x01000FFF); 2808 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (96 * 4), 0x01000FFF); 2809 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (128 * 4), 0x01000FFF); 2810 } 2811 2812 void evergreen_init_common_regs(struct r600_context *rctx, struct r600_command_buffer *cb, 2813 enum chip_class ctx_chip_class, 2814 enum radeon_family ctx_family, 2815 int ctx_drm_minor) 2816 { 2817 int ps_prio; 2818 int vs_prio; 2819 int gs_prio; 2820 int es_prio; 2821 2822 int hs_prio; 2823 int cs_prio; 2824 int ls_prio; 2825 2826 unsigned tmp; 2827 2828 ps_prio = 0; 2829 vs_prio = 1; 2830 gs_prio = 2; 2831 es_prio = 3; 2832 hs_prio = 3; 2833 ls_prio = 3; 2834 cs_prio = 0; 2835 2836 rctx->default_gprs[R600_HW_STAGE_PS] = 93; 2837 rctx->default_gprs[R600_HW_STAGE_VS] = 46; 2838 rctx->r6xx_num_clause_temp_gprs = 4; 2839 rctx->default_gprs[R600_HW_STAGE_GS] = 31; 2840 rctx->default_gprs[R600_HW_STAGE_ES] = 31; 2841 rctx->default_gprs[EG_HW_STAGE_HS] = 23; 2842 rctx->default_gprs[EG_HW_STAGE_LS] = 23; 2843 2844 tmp = 0; 2845 switch (ctx_family) { 2846 case CHIP_CEDAR: 2847 case CHIP_PALM: 2848 case CHIP_SUMO: 2849 case CHIP_SUMO2: 2850 case CHIP_CAICOS: 2851 break; 2852 default: 2853 tmp |= S_008C00_VC_ENABLE(1); 2854 break; 2855 } 2856 tmp |= S_008C00_EXPORT_SRC_C(1); 2857 tmp |= S_008C00_CS_PRIO(cs_prio); 2858 tmp |= S_008C00_LS_PRIO(ls_prio); 2859 tmp |= S_008C00_HS_PRIO(hs_prio); 2860 tmp |= S_008C00_PS_PRIO(ps_prio); 2861 tmp |= S_008C00_VS_PRIO(vs_prio); 2862 tmp |= S_008C00_GS_PRIO(gs_prio); 2863 tmp |= S_008C00_ES_PRIO(es_prio); 2864 2865 r600_store_config_reg_seq(cb, R_008C00_SQ_CONFIG, 1); 2866 r600_store_value(cb, tmp); /* R_008C00_SQ_CONFIG */ 2867 2868 r600_store_config_reg_seq(cb, R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1, 2); 2869 r600_store_value(cb, 0); /* R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1 */ 2870 r600_store_value(cb, 0); /* R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2 */ 2871 2872 /* The cs checker requires this register to be set. */ 2873 r600_store_context_reg(cb, R_028800_DB_DEPTH_CONTROL, 0); 2874 2875 r600_store_context_reg_seq(cb, R_028350_SX_MISC, 2); 2876 r600_store_value(cb, 0); 2877 r600_store_value(cb, S_028354_SURFACE_SYNC_MASK(0xf)); 2878 2879 return; 2880 } 2881 2882 void evergreen_init_atom_start_cs(struct r600_context *rctx) 2883 { 2884 struct r600_command_buffer *cb = &rctx->start_cs_cmd; 2885 int num_ps_threads; 2886 int num_vs_threads; 2887 int num_gs_threads; 2888 int num_es_threads; 2889 int num_hs_threads; 2890 int num_ls_threads; 2891 2892 int num_ps_stack_entries; 2893 int num_vs_stack_entries; 2894 int num_gs_stack_entries; 2895 int num_es_stack_entries; 2896 int num_hs_stack_entries; 2897 int num_ls_stack_entries; 2898 enum radeon_family family; 2899 unsigned tmp, i; 2900 2901 if (rctx->b.chip_class == CAYMAN) { 2902 cayman_init_atom_start_cs(rctx); 2903 return; 2904 } 2905 2906 r600_init_command_buffer(cb, 338); 2907 2908 /* This must be first. */ 2909 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0)); 2910 r600_store_value(cb, 0x80000000); 2911 r600_store_value(cb, 0x80000000); 2912 2913 /* We're setting config registers here. */ 2914 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); 2915 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); 2916 2917 /* This enables pipeline stat & streamout queries. 2918 * They are only disabled by blits. 2919 */ 2920 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); 2921 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) | EVENT_INDEX(0)); 2922 2923 evergreen_init_common_regs(rctx, cb, rctx->b.chip_class, 2924 rctx->b.family, rctx->screen->b.info.drm_minor); 2925 2926 family = rctx->b.family; 2927 switch (family) { 2928 case CHIP_CEDAR: 2929 default: 2930 num_ps_threads = 96; 2931 num_vs_threads = 16; 2932 num_gs_threads = 16; 2933 num_es_threads = 16; 2934 num_hs_threads = 16; 2935 num_ls_threads = 16; 2936 num_ps_stack_entries = 42; 2937 num_vs_stack_entries = 42; 2938 num_gs_stack_entries = 42; 2939 num_es_stack_entries = 42; 2940 num_hs_stack_entries = 42; 2941 num_ls_stack_entries = 42; 2942 break; 2943 case CHIP_REDWOOD: 2944 num_ps_threads = 128; 2945 num_vs_threads = 20; 2946 num_gs_threads = 20; 2947 num_es_threads = 20; 2948 num_hs_threads = 20; 2949 num_ls_threads = 20; 2950 num_ps_stack_entries = 42; 2951 num_vs_stack_entries = 42; 2952 num_gs_stack_entries = 42; 2953 num_es_stack_entries = 42; 2954 num_hs_stack_entries = 42; 2955 num_ls_stack_entries = 42; 2956 break; 2957 case CHIP_JUNIPER: 2958 num_ps_threads = 128; 2959 num_vs_threads = 20; 2960 num_gs_threads = 20; 2961 num_es_threads = 20; 2962 num_hs_threads = 20; 2963 num_ls_threads = 20; 2964 num_ps_stack_entries = 85; 2965 num_vs_stack_entries = 85; 2966 num_gs_stack_entries = 85; 2967 num_es_stack_entries = 85; 2968 num_hs_stack_entries = 85; 2969 num_ls_stack_entries = 85; 2970 break; 2971 case CHIP_CYPRESS: 2972 case CHIP_HEMLOCK: 2973 num_ps_threads = 128; 2974 num_vs_threads = 20; 2975 num_gs_threads = 20; 2976 num_es_threads = 20; 2977 num_hs_threads = 20; 2978 num_ls_threads = 20; 2979 num_ps_stack_entries = 85; 2980 num_vs_stack_entries = 85; 2981 num_gs_stack_entries = 85; 2982 num_es_stack_entries = 85; 2983 num_hs_stack_entries = 85; 2984 num_ls_stack_entries = 85; 2985 break; 2986 case CHIP_PALM: 2987 num_ps_threads = 96; 2988 num_vs_threads = 16; 2989 num_gs_threads = 16; 2990 num_es_threads = 16; 2991 num_hs_threads = 16; 2992 num_ls_threads = 16; 2993 num_ps_stack_entries = 42; 2994 num_vs_stack_entries = 42; 2995 num_gs_stack_entries = 42; 2996 num_es_stack_entries = 42; 2997 num_hs_stack_entries = 42; 2998 num_ls_stack_entries = 42; 2999 break; 3000 case CHIP_SUMO: 3001 num_ps_threads = 96; 3002 num_vs_threads = 25; 3003 num_gs_threads = 25; 3004 num_es_threads = 25; 3005 num_hs_threads = 16; 3006 num_ls_threads = 16; 3007 num_ps_stack_entries = 42; 3008 num_vs_stack_entries = 42; 3009 num_gs_stack_entries = 42; 3010 num_es_stack_entries = 42; 3011 num_hs_stack_entries = 42; 3012 num_ls_stack_entries = 42; 3013 break; 3014 case CHIP_SUMO2: 3015 num_ps_threads = 96; 3016 num_vs_threads = 25; 3017 num_gs_threads = 25; 3018 num_es_threads = 25; 3019 num_hs_threads = 16; 3020 num_ls_threads = 16; 3021 num_ps_stack_entries = 85; 3022 num_vs_stack_entries = 85; 3023 num_gs_stack_entries = 85; 3024 num_es_stack_entries = 85; 3025 num_hs_stack_entries = 85; 3026 num_ls_stack_entries = 85; 3027 break; 3028 case CHIP_BARTS: 3029 num_ps_threads = 128; 3030 num_vs_threads = 20; 3031 num_gs_threads = 20; 3032 num_es_threads = 20; 3033 num_hs_threads = 20; 3034 num_ls_threads = 20; 3035 num_ps_stack_entries = 85; 3036 num_vs_stack_entries = 85; 3037 num_gs_stack_entries = 85; 3038 num_es_stack_entries = 85; 3039 num_hs_stack_entries = 85; 3040 num_ls_stack_entries = 85; 3041 break; 3042 case CHIP_TURKS: 3043 num_ps_threads = 128; 3044 num_vs_threads = 20; 3045 num_gs_threads = 20; 3046 num_es_threads = 20; 3047 num_hs_threads = 20; 3048 num_ls_threads = 20; 3049 num_ps_stack_entries = 42; 3050 num_vs_stack_entries = 42; 3051 num_gs_stack_entries = 42; 3052 num_es_stack_entries = 42; 3053 num_hs_stack_entries = 42; 3054 num_ls_stack_entries = 42; 3055 break; 3056 case CHIP_CAICOS: 3057 num_ps_threads = 96; 3058 num_vs_threads = 10; 3059 num_gs_threads = 10; 3060 num_es_threads = 10; 3061 num_hs_threads = 10; 3062 num_ls_threads = 10; 3063 num_ps_stack_entries = 42; 3064 num_vs_stack_entries = 42; 3065 num_gs_stack_entries = 42; 3066 num_es_stack_entries = 42; 3067 num_hs_stack_entries = 42; 3068 num_ls_stack_entries = 42; 3069 break; 3070 } 3071 3072 tmp = S_008C18_NUM_PS_THREADS(num_ps_threads); 3073 tmp |= S_008C18_NUM_VS_THREADS(num_vs_threads); 3074 tmp |= S_008C18_NUM_GS_THREADS(num_gs_threads); 3075 tmp |= S_008C18_NUM_ES_THREADS(num_es_threads); 3076 3077 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5); 3078 r600_store_value(cb, tmp); /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1 */ 3079 3080 tmp = S_008C1C_NUM_HS_THREADS(num_hs_threads); 3081 tmp |= S_008C1C_NUM_LS_THREADS(num_ls_threads); 3082 r600_store_value(cb, tmp); /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2 */ 3083 3084 tmp = S_008C20_NUM_PS_STACK_ENTRIES(num_ps_stack_entries); 3085 tmp |= S_008C20_NUM_VS_STACK_ENTRIES(num_vs_stack_entries); 3086 r600_store_value(cb, tmp); /* R_008C20_SQ_STACK_RESOURCE_MGMT_1 */ 3087 3088 tmp = S_008C24_NUM_GS_STACK_ENTRIES(num_gs_stack_entries); 3089 tmp |= S_008C24_NUM_ES_STACK_ENTRIES(num_es_stack_entries); 3090 r600_store_value(cb, tmp); /* R_008C24_SQ_STACK_RESOURCE_MGMT_2 */ 3091 3092 tmp = S_008C28_NUM_HS_STACK_ENTRIES(num_hs_stack_entries); 3093 tmp |= S_008C28_NUM_LS_STACK_ENTRIES(num_ls_stack_entries); 3094 r600_store_value(cb, tmp); /* R_008C28_SQ_STACK_RESOURCE_MGMT_3 */ 3095 3096 r600_store_config_reg(cb, R_008E2C_SQ_LDS_RESOURCE_MGMT, 3097 S_008E2C_NUM_PS_LDS(0x1000) | S_008E2C_NUM_LS_LDS(0x1000)); 3098 3099 /* remove LS/HS from one SIMD for hw workaround */ 3100 r600_store_config_reg_seq(cb, R_008E20_SQ_STATIC_THREAD_MGMT1, 3); 3101 r600_store_value(cb, 0xffffffff); 3102 r600_store_value(cb, 0xffffffff); 3103 r600_store_value(cb, 0xfffffffe); 3104 3105 r600_store_config_reg(cb, R_009100_SPI_CONFIG_CNTL, 0); 3106 r600_store_config_reg(cb, R_00913C_SPI_CONFIG_CNTL_1, S_00913C_VTX_DONE_DELAY(4)); 3107 3108 r600_store_context_reg_seq(cb, R_028900_SQ_ESGS_RING_ITEMSIZE, 6); 3109 r600_store_value(cb, 0); /* R_028900_SQ_ESGS_RING_ITEMSIZE */ 3110 r600_store_value(cb, 0); /* R_028904_SQ_GSVS_RING_ITEMSIZE */ 3111 r600_store_value(cb, 0); /* R_028908_SQ_ESTMP_RING_ITEMSIZE */ 3112 r600_store_value(cb, 0); /* R_02890C_SQ_GSTMP_RING_ITEMSIZE */ 3113 r600_store_value(cb, 0); /* R_028910_SQ_VSTMP_RING_ITEMSIZE */ 3114 r600_store_value(cb, 0); /* R_028914_SQ_PSTMP_RING_ITEMSIZE */ 3115 3116 r600_store_context_reg_seq(cb, R_02891C_SQ_GS_VERT_ITEMSIZE, 4); 3117 r600_store_value(cb, 0); /* R_02891C_SQ_GS_VERT_ITEMSIZE */ 3118 r600_store_value(cb, 0); /* R_028920_SQ_GS_VERT_ITEMSIZE_1 */ 3119 r600_store_value(cb, 0); /* R_028924_SQ_GS_VERT_ITEMSIZE_2 */ 3120 r600_store_value(cb, 0); /* R_028928_SQ_GS_VERT_ITEMSIZE_3 */ 3121 3122 r600_store_context_reg_seq(cb, R_028A10_VGT_OUTPUT_PATH_CNTL, 13); 3123 r600_store_value(cb, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */ 3124 r600_store_value(cb, 0); /* R_028A14_VGT_HOS_CNTL */ 3125 r600_store_value(cb, fui(64)); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */ 3126 r600_store_value(cb, fui(1.0)); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */ 3127 r600_store_value(cb, 16); /* R_028A20_VGT_HOS_REUSE_DEPTH */ 3128 r600_store_value(cb, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */ 3129 r600_store_value(cb, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */ 3130 r600_store_value(cb, 0); /* R_028A2C_VGT_GROUP_DECR */ 3131 r600_store_value(cb, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */ 3132 r600_store_value(cb, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */ 3133 r600_store_value(cb, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */ 3134 r600_store_value(cb, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */ 3135 r600_store_value(cb, 0); /* R_028A40_VGT_GS_MODE */ 3136 3137 r600_store_config_reg(cb, R_008A14_PA_CL_ENHANCE, (3 << 1) | 1); 3138 3139 r600_store_context_reg(cb, R_0288F0_SQ_VTX_SEMANTIC_CLEAR, ~0); 3140 3141 r600_store_context_reg_seq(cb, R_028400_VGT_MAX_VTX_INDX, 2); 3142 r600_store_value(cb, ~0); /* R_028400_VGT_MAX_VTX_INDX */ 3143 r600_store_value(cb, 0); /* R_028404_VGT_MIN_VTX_INDX */ 3144 3145 r600_store_ctl_const(cb, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0); 3146 3147 r600_store_context_reg(cb, R_028028_DB_STENCIL_CLEAR, 0); 3148 3149 r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0); 3150 r600_store_context_reg(cb, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF); 3151 r600_store_context_reg(cb, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA); 3152 3153 r600_store_context_reg(cb, R_0286DC_SPI_FOG_CNTL, 0); 3154 r600_store_context_reg(cb, R_028820_PA_CL_NANINF_CNTL, 0); 3155 3156 r600_store_context_reg_seq(cb, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 3); 3157 r600_store_value(cb, 0); /* R_028AC0_DB_SRESULTS_COMPARE_STATE0 */ 3158 r600_store_value(cb, 0); /* R_028AC4_DB_SRESULTS_COMPARE_STATE1 */ 3159 r600_store_value(cb, 0); /* R_028AC8_DB_PRELOAD_CONTROL */ 3160 3161 r600_store_context_reg_seq(cb, R_028240_PA_SC_GENERIC_SCISSOR_TL, 2); 3162 r600_store_value(cb, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */ 3163 r600_store_value(cb, S_028244_BR_X(16384) | S_028244_BR_Y(16384)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */ 3164 3165 r600_store_context_reg_seq(cb, R_028030_PA_SC_SCREEN_SCISSOR_TL, 2); 3166 r600_store_value(cb, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */ 3167 r600_store_value(cb, S_028034_BR_X(16384) | S_028034_BR_Y(16384)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */ 3168 3169 r600_store_context_reg(cb, R_028848_SQ_PGM_RESOURCES_2_PS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN)); 3170 r600_store_context_reg(cb, R_028864_SQ_PGM_RESOURCES_2_VS, S_028864_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN)); 3171 r600_store_context_reg(cb, R_02887C_SQ_PGM_RESOURCES_2_GS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN)); 3172 r600_store_context_reg(cb, R_028894_SQ_PGM_RESOURCES_2_ES, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN)); 3173 r600_store_context_reg(cb, R_0288A8_SQ_PGM_RESOURCES_FS, 0); 3174 r600_store_context_reg(cb, R_0288C0_SQ_PGM_RESOURCES_2_HS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN)); 3175 r600_store_context_reg(cb, R_0288D8_SQ_PGM_RESOURCES_2_LS, S_028848_SINGLE_ROUND(V_SQ_ROUND_NEAREST_EVEN)); 3176 3177 /* to avoid GPU doing any preloading of constant from random address */ 3178 r600_store_context_reg_seq(cb, R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 16); 3179 for (i = 0; i < 16; i++) 3180 r600_store_value(cb, 0); 3181 3182 r600_store_context_reg_seq(cb, R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 16); 3183 for (i = 0; i < 16; i++) 3184 r600_store_value(cb, 0); 3185 3186 r600_store_context_reg_seq(cb, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 16); 3187 for (i = 0; i < 16; i++) 3188 r600_store_value(cb, 0); 3189 3190 r600_store_context_reg_seq(cb, R_028FC0_ALU_CONST_BUFFER_SIZE_LS_0, 16); 3191 for (i = 0; i < 16; i++) 3192 r600_store_value(cb, 0); 3193 3194 r600_store_context_reg_seq(cb, R_028F80_ALU_CONST_BUFFER_SIZE_HS_0, 16); 3195 for (i = 0; i < 16; i++) 3196 r600_store_value(cb, 0); 3197 3198 r600_store_context_reg(cb, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0); 3199 3200 if (rctx->screen->b.has_streamout) { 3201 r600_store_context_reg(cb, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0); 3202 } 3203 3204 r600_store_context_reg(cb, R_028010_DB_RENDER_OVERRIDE2, 0); 3205 r600_store_context_reg(cb, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET, 0); 3206 r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 0); 3207 r600_store_context_reg_seq(cb, R_0286E4_SPI_PS_IN_CONTROL_2, 2); 3208 r600_store_value(cb, 0); /* R_0286E4_SPI_PS_IN_CONTROL_2 */ 3209 r600_store_value(cb, 0); /* R_0286E8_SPI_COMPUTE_INPUT_CNTL */ 3210 3211 r600_store_context_reg_seq(cb, R_0288E8_SQ_LDS_ALLOC, 2); 3212 r600_store_value(cb, 0); /* R_0288E8_SQ_LDS_ALLOC */ 3213 r600_store_value(cb, 0); /* R_0288EC_SQ_LDS_ALLOC_PS */ 3214 3215 if (rctx->b.family == CHIP_CAICOS) { 3216 r600_store_context_reg_seq(cb, R_028B54_VGT_SHADER_STAGES_EN, 2); 3217 r600_store_value(cb, 0); /* R028B54_VGT_SHADER_STAGES_EN */ 3218 r600_store_value(cb, 0); /* R028B58_VGT_LS_HS_CONFIG */ 3219 r600_store_context_reg(cb, R_028B6C_VGT_TF_PARAM, 0); 3220 } else { 3221 r600_store_context_reg_seq(cb, R_028B54_VGT_SHADER_STAGES_EN, 7); 3222 r600_store_value(cb, 0); /* R028B54_VGT_SHADER_STAGES_EN */ 3223 r600_store_value(cb, 0); /* R028B58_VGT_LS_HS_CONFIG */ 3224 r600_store_value(cb, 0); /* R028B5C_VGT_LS_SIZE */ 3225 r600_store_value(cb, 0); /* R028B60_VGT_HS_SIZE */ 3226 r600_store_value(cb, 0); /* R028B64_VGT_LS_HS_ALLOC */ 3227 r600_store_value(cb, 0); /* R028B68_VGT_HS_PATCH_CONST */ 3228 r600_store_value(cb, 0); /* R028B68_VGT_TF_PARAM */ 3229 } 3230 3231 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0, 0x01000FFF); 3232 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (32 * 4), 0x01000FFF); 3233 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (64 * 4), 0x01000FFF); 3234 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (96 * 4), 0x01000FFF); 3235 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (128 * 4), 0x01000FFF); 3236 } 3237 3238 void evergreen_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 3239 { 3240 struct r600_context *rctx = (struct r600_context *)ctx; 3241 struct r600_command_buffer *cb = &shader->command_buffer; 3242 struct r600_shader *rshader = &shader->shader; 3243 unsigned i, exports_ps, num_cout, spi_ps_in_control_0, spi_input_z, spi_ps_in_control_1, db_shader_control = 0; 3244 int pos_index = -1, face_index = -1, fixed_pt_position_index = -1; 3245 int ninterp = 0; 3246 boolean have_perspective = FALSE, have_linear = FALSE; 3247 static const unsigned spi_baryc_enable_bit[6] = { 3248 S_0286E0_PERSP_SAMPLE_ENA(1), 3249 S_0286E0_PERSP_CENTER_ENA(1), 3250 S_0286E0_PERSP_CENTROID_ENA(1), 3251 S_0286E0_LINEAR_SAMPLE_ENA(1), 3252 S_0286E0_LINEAR_CENTER_ENA(1), 3253 S_0286E0_LINEAR_CENTROID_ENA(1) 3254 }; 3255 unsigned spi_baryc_cntl = 0, sid, tmp, num = 0; 3256 unsigned z_export = 0, stencil_export = 0, mask_export = 0; 3257 unsigned sprite_coord_enable = rctx->rasterizer ? rctx->rasterizer->sprite_coord_enable : 0; 3258 uint32_t spi_ps_input_cntl[32]; 3259 3260 if (!cb->buf) { 3261 r600_init_command_buffer(cb, 64); 3262 } else { 3263 cb->num_dw = 0; 3264 } 3265 3266 for (i = 0; i < rshader->ninput; i++) { 3267 /* evergreen NUM_INTERP only contains values interpolated into the LDS, 3268 POSITION goes via GPRs from the SC so isn't counted */ 3269 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION) 3270 pos_index = i; 3271 else if (rshader->input[i].name == TGSI_SEMANTIC_FACE) { 3272 if (face_index == -1) 3273 face_index = i; 3274 } 3275 else if (rshader->input[i].name == TGSI_SEMANTIC_SAMPLEMASK) { 3276 if (face_index == -1) 3277 face_index = i; /* lives in same register, same enable bit */ 3278 } 3279 else if (rshader->input[i].name == TGSI_SEMANTIC_SAMPLEID) { 3280 fixed_pt_position_index = i; 3281 } 3282 else { 3283 ninterp++; 3284 int k = eg_get_interpolator_index( 3285 rshader->input[i].interpolate, 3286 rshader->input[i].interpolate_location); 3287 if (k >= 0) { 3288 spi_baryc_cntl |= spi_baryc_enable_bit[k]; 3289 have_perspective |= k < 3; 3290 have_linear |= !(k < 3); 3291 } 3292 } 3293 3294 sid = rshader->input[i].spi_sid; 3295 3296 if (sid) { 3297 tmp = S_028644_SEMANTIC(sid); 3298 3299 /* D3D 9 behaviour. GL is undefined */ 3300 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR && rshader->input[i].sid == 0) 3301 tmp |= S_028644_DEFAULT_VAL(3); 3302 3303 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION || 3304 rshader->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT || 3305 (rshader->input[i].interpolate == TGSI_INTERPOLATE_COLOR && 3306 rctx->rasterizer && rctx->rasterizer->flatshade)) { 3307 tmp |= S_028644_FLAT_SHADE(1); 3308 } 3309 3310 if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC && 3311 (sprite_coord_enable & (1 << rshader->input[i].sid))) { 3312 tmp |= S_028644_PT_SPRITE_TEX(1); 3313 } 3314 3315 spi_ps_input_cntl[num++] = tmp; 3316 } 3317 } 3318 3319 r600_store_context_reg_seq(cb, R_028644_SPI_PS_INPUT_CNTL_0, num); 3320 r600_store_array(cb, num, spi_ps_input_cntl); 3321 3322 for (i = 0; i < rshader->noutput; i++) { 3323 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION) 3324 z_export = 1; 3325 if (rshader->output[i].name == TGSI_SEMANTIC_STENCIL) 3326 stencil_export = 1; 3327 if (rshader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK && 3328 rctx->framebuffer.nr_samples > 1 && rctx->ps_iter_samples > 0) 3329 mask_export = 1; 3330 } 3331 if (rshader->uses_kill) 3332 db_shader_control |= S_02880C_KILL_ENABLE(1); 3333 3334 db_shader_control |= S_02880C_Z_EXPORT_ENABLE(z_export); 3335 db_shader_control |= S_02880C_STENCIL_EXPORT_ENABLE(stencil_export); 3336 db_shader_control |= S_02880C_MASK_EXPORT_ENABLE(mask_export); 3337 3338 if (shader->selector->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL]) { 3339 db_shader_control |= S_02880C_DEPTH_BEFORE_SHADER(1) | 3340 S_02880C_EXEC_ON_NOOP(shader->selector->info.writes_memory); 3341 } else if (shader->selector->info.writes_memory) { 3342 db_shader_control |= S_02880C_EXEC_ON_HIER_FAIL(1); 3343 } 3344 3345 switch (rshader->ps_conservative_z) { 3346 default: /* fall through */ 3347 case TGSI_FS_DEPTH_LAYOUT_ANY: 3348 db_shader_control |= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_ANY_Z); 3349 break; 3350 case TGSI_FS_DEPTH_LAYOUT_GREATER: 3351 db_shader_control |= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z); 3352 break; 3353 case TGSI_FS_DEPTH_LAYOUT_LESS: 3354 db_shader_control |= S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z); 3355 break; 3356 } 3357 3358 exports_ps = 0; 3359 for (i = 0; i < rshader->noutput; i++) { 3360 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION || 3361 rshader->output[i].name == TGSI_SEMANTIC_STENCIL || 3362 rshader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK) 3363 exports_ps |= 1; 3364 } 3365 3366 num_cout = rshader->nr_ps_color_exports; 3367 3368 exports_ps |= S_02884C_EXPORT_COLORS(num_cout); 3369 if (!exports_ps) { 3370 /* always at least export 1 component per pixel */ 3371 exports_ps = 2; 3372 } 3373 shader->nr_ps_color_outputs = num_cout; 3374 if (ninterp == 0) { 3375 ninterp = 1; 3376 have_perspective = TRUE; 3377 } 3378 if (!spi_baryc_cntl) 3379 spi_baryc_cntl |= spi_baryc_enable_bit[0]; 3380 3381 if (!have_perspective && !have_linear) 3382 have_perspective = TRUE; 3383 3384 spi_ps_in_control_0 = S_0286CC_NUM_INTERP(ninterp) | 3385 S_0286CC_PERSP_GRADIENT_ENA(have_perspective) | 3386 S_0286CC_LINEAR_GRADIENT_ENA(have_linear); 3387 spi_input_z = 0; 3388 if (pos_index != -1) { 3389 spi_ps_in_control_0 |= S_0286CC_POSITION_ENA(1) | 3390 S_0286CC_POSITION_CENTROID(rshader->input[pos_index].interpolate_location == TGSI_INTERPOLATE_LOC_CENTROID) | 3391 S_0286CC_POSITION_ADDR(rshader->input[pos_index].gpr); 3392 spi_input_z |= S_0286D8_PROVIDE_Z_TO_SPI(1); 3393 } 3394 3395 spi_ps_in_control_1 = 0; 3396 if (face_index != -1) { 3397 spi_ps_in_control_1 |= S_0286D0_FRONT_FACE_ENA(1) | 3398 S_0286D0_FRONT_FACE_ADDR(rshader->input[face_index].gpr); 3399 } 3400 if (fixed_pt_position_index != -1) { 3401 spi_ps_in_control_1 |= S_0286D0_FIXED_PT_POSITION_ENA(1) | 3402 S_0286D0_FIXED_PT_POSITION_ADDR(rshader->input[fixed_pt_position_index].gpr); 3403 } 3404 3405 r600_store_context_reg_seq(cb, R_0286CC_SPI_PS_IN_CONTROL_0, 2); 3406 r600_store_value(cb, spi_ps_in_control_0); /* R_0286CC_SPI_PS_IN_CONTROL_0 */ 3407 r600_store_value(cb, spi_ps_in_control_1); /* R_0286D0_SPI_PS_IN_CONTROL_1 */ 3408 3409 r600_store_context_reg(cb, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl); 3410 r600_store_context_reg(cb, R_0286D8_SPI_INPUT_Z, spi_input_z); 3411 r600_store_context_reg(cb, R_02884C_SQ_PGM_EXPORTS_PS, exports_ps); 3412 3413 r600_store_context_reg_seq(cb, R_028840_SQ_PGM_START_PS, 2); 3414 r600_store_value(cb, shader->bo->gpu_address >> 8); 3415 r600_store_value(cb, /* R_028844_SQ_PGM_RESOURCES_PS */ 3416 S_028844_NUM_GPRS(rshader->bc.ngpr) | 3417 S_028844_PRIME_CACHE_ON_DRAW(1) | 3418 S_028844_DX10_CLAMP(1) | 3419 S_028844_STACK_SIZE(rshader->bc.nstack)); 3420 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 3421 3422 shader->db_shader_control = db_shader_control; 3423 shader->ps_depth_export = z_export | stencil_export | mask_export; 3424 3425 shader->sprite_coord_enable = sprite_coord_enable; 3426 if (rctx->rasterizer) 3427 shader->flatshade = rctx->rasterizer->flatshade; 3428 } 3429 3430 void evergreen_update_es_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 3431 { 3432 struct r600_command_buffer *cb = &shader->command_buffer; 3433 struct r600_shader *rshader = &shader->shader; 3434 3435 r600_init_command_buffer(cb, 32); 3436 3437 r600_store_context_reg(cb, R_028890_SQ_PGM_RESOURCES_ES, 3438 S_028890_NUM_GPRS(rshader->bc.ngpr) | 3439 S_028890_DX10_CLAMP(1) | 3440 S_028890_STACK_SIZE(rshader->bc.nstack)); 3441 r600_store_context_reg(cb, R_02888C_SQ_PGM_START_ES, 3442 shader->bo->gpu_address >> 8); 3443 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 3444 } 3445 3446 void evergreen_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 3447 { 3448 struct r600_context *rctx = (struct r600_context *)ctx; 3449 struct r600_command_buffer *cb = &shader->command_buffer; 3450 struct r600_shader *rshader = &shader->shader; 3451 struct r600_shader *cp_shader = &shader->gs_copy_shader->shader; 3452 unsigned gsvs_itemsizes[4] = { 3453 (cp_shader->ring_item_sizes[0] * shader->selector->gs_max_out_vertices) >> 2, 3454 (cp_shader->ring_item_sizes[1] * shader->selector->gs_max_out_vertices) >> 2, 3455 (cp_shader->ring_item_sizes[2] * shader->selector->gs_max_out_vertices) >> 2, 3456 (cp_shader->ring_item_sizes[3] * shader->selector->gs_max_out_vertices) >> 2 3457 }; 3458 3459 r600_init_command_buffer(cb, 64); 3460 3461 /* VGT_GS_MODE is written by evergreen_emit_shader_stages */ 3462 3463 3464 r600_store_context_reg(cb, R_028B38_VGT_GS_MAX_VERT_OUT, 3465 S_028B38_MAX_VERT_OUT(shader->selector->gs_max_out_vertices)); 3466 r600_store_context_reg(cb, R_028A6C_VGT_GS_OUT_PRIM_TYPE, 3467 r600_conv_prim_to_gs_out(shader->selector->gs_output_prim)); 3468 3469 if (rctx->screen->b.info.drm_minor >= 35) { 3470 r600_store_context_reg(cb, R_028B90_VGT_GS_INSTANCE_CNT, 3471 S_028B90_CNT(MIN2(shader->selector->gs_num_invocations, 127)) | 3472 S_028B90_ENABLE(shader->selector->gs_num_invocations > 0)); 3473 } 3474 r600_store_context_reg_seq(cb, R_02891C_SQ_GS_VERT_ITEMSIZE, 4); 3475 r600_store_value(cb, cp_shader->ring_item_sizes[0] >> 2); 3476 r600_store_value(cb, cp_shader->ring_item_sizes[1] >> 2); 3477 r600_store_value(cb, cp_shader->ring_item_sizes[2] >> 2); 3478 r600_store_value(cb, cp_shader->ring_item_sizes[3] >> 2); 3479 3480 r600_store_context_reg(cb, R_028900_SQ_ESGS_RING_ITEMSIZE, 3481 (rshader->ring_item_sizes[0]) >> 2); 3482 3483 r600_store_context_reg(cb, R_028904_SQ_GSVS_RING_ITEMSIZE, 3484 gsvs_itemsizes[0] + 3485 gsvs_itemsizes[1] + 3486 gsvs_itemsizes[2] + 3487 gsvs_itemsizes[3]); 3488 3489 r600_store_context_reg_seq(cb, R_02892C_SQ_GSVS_RING_OFFSET_1, 3); 3490 r600_store_value(cb, gsvs_itemsizes[0]); 3491 r600_store_value(cb, gsvs_itemsizes[0] + gsvs_itemsizes[1]); 3492 r600_store_value(cb, gsvs_itemsizes[0] + gsvs_itemsizes[1] + gsvs_itemsizes[2]); 3493 3494 /* FIXME calculate these values somehow ??? */ 3495 r600_store_context_reg_seq(cb, R_028A54_GS_PER_ES, 3); 3496 r600_store_value(cb, 0x80); /* GS_PER_ES */ 3497 r600_store_value(cb, 0x100); /* ES_PER_GS */ 3498 r600_store_value(cb, 0x2); /* GS_PER_VS */ 3499 3500 r600_store_context_reg(cb, R_028878_SQ_PGM_RESOURCES_GS, 3501 S_028878_NUM_GPRS(rshader->bc.ngpr) | 3502 S_028878_DX10_CLAMP(1) | 3503 S_028878_STACK_SIZE(rshader->bc.nstack)); 3504 r600_store_context_reg(cb, R_028874_SQ_PGM_START_GS, 3505 shader->bo->gpu_address >> 8); 3506 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 3507 } 3508 3509 3510 void evergreen_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 3511 { 3512 struct r600_command_buffer *cb = &shader->command_buffer; 3513 struct r600_shader *rshader = &shader->shader; 3514 unsigned spi_vs_out_id[10] = {}; 3515 unsigned i, tmp, nparams = 0; 3516 3517 for (i = 0; i < rshader->noutput; i++) { 3518 if (rshader->output[i].spi_sid) { 3519 tmp = rshader->output[i].spi_sid << ((nparams & 3) * 8); 3520 spi_vs_out_id[nparams / 4] |= tmp; 3521 nparams++; 3522 } 3523 } 3524 3525 r600_init_command_buffer(cb, 32); 3526 3527 r600_store_context_reg_seq(cb, R_02861C_SPI_VS_OUT_ID_0, 10); 3528 for (i = 0; i < 10; i++) { 3529 r600_store_value(cb, spi_vs_out_id[i]); 3530 } 3531 3532 /* Certain attributes (position, psize, etc.) don't count as params. 3533 * VS is required to export at least one param and r600_shader_from_tgsi() 3534 * takes care of adding a dummy export. 3535 */ 3536 if (nparams < 1) 3537 nparams = 1; 3538 3539 r600_store_context_reg(cb, R_0286C4_SPI_VS_OUT_CONFIG, 3540 S_0286C4_VS_EXPORT_COUNT(nparams - 1)); 3541 r600_store_context_reg(cb, R_028860_SQ_PGM_RESOURCES_VS, 3542 S_028860_NUM_GPRS(rshader->bc.ngpr) | 3543 S_028860_DX10_CLAMP(1) | 3544 S_028860_STACK_SIZE(rshader->bc.nstack)); 3545 if (rshader->vs_position_window_space) { 3546 r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL, 3547 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1)); 3548 } else { 3549 r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL, 3550 S_028818_VTX_W0_FMT(1) | 3551 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | 3552 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | 3553 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1)); 3554 3555 } 3556 r600_store_context_reg(cb, R_02885C_SQ_PGM_START_VS, 3557 shader->bo->gpu_address >> 8); 3558 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 3559 3560 shader->pa_cl_vs_out_cntl = 3561 S_02881C_VS_OUT_CCDIST0_VEC_ENA((rshader->cc_dist_mask & 0x0F) != 0) | 3562 S_02881C_VS_OUT_CCDIST1_VEC_ENA((rshader->cc_dist_mask & 0xF0) != 0) | 3563 S_02881C_VS_OUT_MISC_VEC_ENA(rshader->vs_out_misc_write) | 3564 S_02881C_USE_VTX_POINT_SIZE(rshader->vs_out_point_size) | 3565 S_02881C_USE_VTX_EDGE_FLAG(rshader->vs_out_edgeflag) | 3566 S_02881C_USE_VTX_VIEWPORT_INDX(rshader->vs_out_viewport) | 3567 S_02881C_USE_VTX_RENDER_TARGET_INDX(rshader->vs_out_layer); 3568 } 3569 3570 void evergreen_update_hs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 3571 { 3572 struct r600_command_buffer *cb = &shader->command_buffer; 3573 struct r600_shader *rshader = &shader->shader; 3574 3575 r600_init_command_buffer(cb, 32); 3576 r600_store_context_reg(cb, R_0288BC_SQ_PGM_RESOURCES_HS, 3577 S_0288BC_NUM_GPRS(rshader->bc.ngpr) | 3578 S_0288BC_DX10_CLAMP(1) | 3579 S_0288BC_STACK_SIZE(rshader->bc.nstack)); 3580 r600_store_context_reg(cb, R_0288B8_SQ_PGM_START_HS, 3581 shader->bo->gpu_address >> 8); 3582 } 3583 3584 void evergreen_update_ls_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 3585 { 3586 struct r600_command_buffer *cb = &shader->command_buffer; 3587 struct r600_shader *rshader = &shader->shader; 3588 3589 r600_init_command_buffer(cb, 32); 3590 r600_store_context_reg(cb, R_0288D4_SQ_PGM_RESOURCES_LS, 3591 S_0288D4_NUM_GPRS(rshader->bc.ngpr) | 3592 S_0288D4_DX10_CLAMP(1) | 3593 S_0288D4_STACK_SIZE(rshader->bc.nstack)); 3594 r600_store_context_reg(cb, R_0288D0_SQ_PGM_START_LS, 3595 shader->bo->gpu_address >> 8); 3596 } 3597 void *evergreen_create_resolve_blend(struct r600_context *rctx) 3598 { 3599 struct pipe_blend_state blend; 3600 3601 memset(&blend, 0, sizeof(blend)); 3602 blend.independent_blend_enable = true; 3603 blend.rt[0].colormask = 0xf; 3604 return evergreen_create_blend_state_mode(&rctx->b.b, &blend, V_028808_CB_RESOLVE); 3605 } 3606 3607 void *evergreen_create_decompress_blend(struct r600_context *rctx) 3608 { 3609 struct pipe_blend_state blend; 3610 unsigned mode = rctx->screen->has_compressed_msaa_texturing ? 3611 V_028808_CB_FMASK_DECOMPRESS : V_028808_CB_DECOMPRESS; 3612 3613 memset(&blend, 0, sizeof(blend)); 3614 blend.independent_blend_enable = true; 3615 blend.rt[0].colormask = 0xf; 3616 return evergreen_create_blend_state_mode(&rctx->b.b, &blend, mode); 3617 } 3618 3619 void *evergreen_create_fastclear_blend(struct r600_context *rctx) 3620 { 3621 struct pipe_blend_state blend; 3622 unsigned mode = V_028808_CB_ELIMINATE_FAST_CLEAR; 3623 3624 memset(&blend, 0, sizeof(blend)); 3625 blend.independent_blend_enable = true; 3626 blend.rt[0].colormask = 0xf; 3627 return evergreen_create_blend_state_mode(&rctx->b.b, &blend, mode); 3628 } 3629 3630 void *evergreen_create_db_flush_dsa(struct r600_context *rctx) 3631 { 3632 struct pipe_depth_stencil_alpha_state dsa = {{0}}; 3633 3634 return rctx->b.b.create_depth_stencil_alpha_state(&rctx->b.b, &dsa); 3635 } 3636 3637 void evergreen_update_db_shader_control(struct r600_context * rctx) 3638 { 3639 bool dual_export; 3640 unsigned db_shader_control; 3641 3642 if (!rctx->ps_shader) { 3643 return; 3644 } 3645 3646 dual_export = rctx->framebuffer.export_16bpc && 3647 !rctx->ps_shader->current->ps_depth_export; 3648 3649 db_shader_control = rctx->ps_shader->current->db_shader_control | 3650 S_02880C_DUAL_EXPORT_ENABLE(dual_export) | 3651 S_02880C_DB_SOURCE_FORMAT(dual_export ? V_02880C_EXPORT_DB_TWO : 3652 V_02880C_EXPORT_DB_FULL) | 3653 S_02880C_ALPHA_TO_MASK_DISABLE(rctx->framebuffer.cb0_is_integer); 3654 3655 /* When alpha test is enabled we can't trust the hw to make the proper 3656 * decision on the order in which ztest should be run related to fragment 3657 * shader execution. 3658 * 3659 * If alpha test is enabled perform early z rejection (RE_Z) but don't early 3660 * write to the zbuffer. Write to zbuffer is delayed after fragment shader 3661 * execution and thus after alpha test so if discarded by the alpha test 3662 * the z value is not written. 3663 * If ReZ is enabled, and the zfunc/zenable/zwrite values change you can 3664 * get a hang unless you flush the DB in between. For now just use 3665 * LATE_Z. 3666 */ 3667 if (rctx->alphatest_state.sx_alpha_test_control || rctx->ps_shader->info.writes_memory) { 3668 db_shader_control |= S_02880C_Z_ORDER(V_02880C_LATE_Z); 3669 } else { 3670 db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z); 3671 } 3672 3673 if (db_shader_control != rctx->db_misc_state.db_shader_control) { 3674 rctx->db_misc_state.db_shader_control = db_shader_control; 3675 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 3676 } 3677 } 3678 3679 static void evergreen_dma_copy_tile(struct r600_context *rctx, 3680 struct pipe_resource *dst, 3681 unsigned dst_level, 3682 unsigned dst_x, 3683 unsigned dst_y, 3684 unsigned dst_z, 3685 struct pipe_resource *src, 3686 unsigned src_level, 3687 unsigned src_x, 3688 unsigned src_y, 3689 unsigned src_z, 3690 unsigned copy_height, 3691 unsigned pitch, 3692 unsigned bpp) 3693 { 3694 struct radeon_winsys_cs *cs = rctx->b.dma.cs; 3695 struct r600_texture *rsrc = (struct r600_texture*)src; 3696 struct r600_texture *rdst = (struct r600_texture*)dst; 3697 unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size; 3698 unsigned ncopy, height, cheight, detile, i, x, y, z, src_mode, dst_mode; 3699 unsigned sub_cmd, bank_h, bank_w, mt_aspect, nbanks, tile_split, non_disp_tiling = 0; 3700 uint64_t base, addr; 3701 3702 dst_mode = rdst->surface.u.legacy.level[dst_level].mode; 3703 src_mode = rsrc->surface.u.legacy.level[src_level].mode; 3704 assert(dst_mode != src_mode); 3705 3706 /* non_disp_tiling bit needs to be set for depth, stencil, and fmask surfaces */ 3707 if (util_format_has_depth(util_format_description(src->format))) 3708 non_disp_tiling = 1; 3709 3710 y = 0; 3711 sub_cmd = EG_DMA_COPY_TILED; 3712 lbpp = util_logbase2(bpp); 3713 pitch_tile_max = ((pitch / bpp) / 8) - 1; 3714 nbanks = eg_num_banks(rctx->screen->b.info.r600_num_banks); 3715 3716 if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED) { 3717 /* T2L */ 3718 array_mode = evergreen_array_mode(src_mode); 3719 slice_tile_max = (rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.u.legacy.level[src_level].nblk_y) / (8*8); 3720 slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0; 3721 /* linear height must be the same as the slice tile max height, it's ok even 3722 * if the linear destination/source have smaller heigh as the size of the 3723 * dma packet will be using the copy_height which is always smaller or equal 3724 * to the linear height 3725 */ 3726 height = u_minify(rsrc->resource.b.b.height0, src_level); 3727 detile = 1; 3728 x = src_x; 3729 y = src_y; 3730 z = src_z; 3731 base = rsrc->surface.u.legacy.level[src_level].offset; 3732 addr = rdst->surface.u.legacy.level[dst_level].offset; 3733 addr += (uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z; 3734 addr += dst_y * pitch + dst_x * bpp; 3735 bank_h = eg_bank_wh(rsrc->surface.u.legacy.bankh); 3736 bank_w = eg_bank_wh(rsrc->surface.u.legacy.bankw); 3737 mt_aspect = eg_macro_tile_aspect(rsrc->surface.u.legacy.mtilea); 3738 tile_split = eg_tile_split(rsrc->surface.u.legacy.tile_split); 3739 base += rsrc->resource.gpu_address; 3740 addr += rdst->resource.gpu_address; 3741 } else { 3742 /* L2T */ 3743 array_mode = evergreen_array_mode(dst_mode); 3744 slice_tile_max = (rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.u.legacy.level[dst_level].nblk_y) / (8*8); 3745 slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0; 3746 /* linear height must be the same as the slice tile max height, it's ok even 3747 * if the linear destination/source have smaller heigh as the size of the 3748 * dma packet will be using the copy_height which is always smaller or equal 3749 * to the linear height 3750 */ 3751 height = u_minify(rdst->resource.b.b.height0, dst_level); 3752 detile = 0; 3753 x = dst_x; 3754 y = dst_y; 3755 z = dst_z; 3756 base = rdst->surface.u.legacy.level[dst_level].offset; 3757 addr = rsrc->surface.u.legacy.level[src_level].offset; 3758 addr += (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_z; 3759 addr += src_y * pitch + src_x * bpp; 3760 bank_h = eg_bank_wh(rdst->surface.u.legacy.bankh); 3761 bank_w = eg_bank_wh(rdst->surface.u.legacy.bankw); 3762 mt_aspect = eg_macro_tile_aspect(rdst->surface.u.legacy.mtilea); 3763 tile_split = eg_tile_split(rdst->surface.u.legacy.tile_split); 3764 base += rdst->resource.gpu_address; 3765 addr += rsrc->resource.gpu_address; 3766 } 3767 3768 size = (copy_height * pitch) / 4; 3769 ncopy = (size / EG_DMA_COPY_MAX_SIZE) + !!(size % EG_DMA_COPY_MAX_SIZE); 3770 r600_need_dma_space(&rctx->b, ncopy * 9, &rdst->resource, &rsrc->resource); 3771 3772 for (i = 0; i < ncopy; i++) { 3773 cheight = copy_height; 3774 if (((cheight * pitch) / 4) > EG_DMA_COPY_MAX_SIZE) { 3775 cheight = (EG_DMA_COPY_MAX_SIZE * 4) / pitch; 3776 } 3777 size = (cheight * pitch) / 4; 3778 /* emit reloc before writing cs so that cs is always in consistent state */ 3779 radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource, 3780 RADEON_USAGE_READ, RADEON_PRIO_SDMA_TEXTURE); 3781 radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource, 3782 RADEON_USAGE_WRITE, RADEON_PRIO_SDMA_TEXTURE); 3783 radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, sub_cmd, size)); 3784 radeon_emit(cs, base >> 8); 3785 radeon_emit(cs, (detile << 31) | (array_mode << 27) | 3786 (lbpp << 24) | (bank_h << 21) | 3787 (bank_w << 18) | (mt_aspect << 16)); 3788 radeon_emit(cs, (pitch_tile_max << 0) | ((height - 1) << 16)); 3789 radeon_emit(cs, (slice_tile_max << 0)); 3790 radeon_emit(cs, (x << 0) | (z << 18)); 3791 radeon_emit(cs, (y << 0) | (tile_split << 21) | (nbanks << 25) | (non_disp_tiling << 28)); 3792 radeon_emit(cs, addr & 0xfffffffc); 3793 radeon_emit(cs, (addr >> 32UL) & 0xff); 3794 copy_height -= cheight; 3795 addr += cheight * pitch; 3796 y += cheight; 3797 } 3798 } 3799 3800 static void evergreen_dma_copy(struct pipe_context *ctx, 3801 struct pipe_resource *dst, 3802 unsigned dst_level, 3803 unsigned dstx, unsigned dsty, unsigned dstz, 3804 struct pipe_resource *src, 3805 unsigned src_level, 3806 const struct pipe_box *src_box) 3807 { 3808 struct r600_context *rctx = (struct r600_context *)ctx; 3809 struct r600_texture *rsrc = (struct r600_texture*)src; 3810 struct r600_texture *rdst = (struct r600_texture*)dst; 3811 unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode, copy_height; 3812 unsigned src_w, dst_w; 3813 unsigned src_x, src_y; 3814 unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz; 3815 3816 if (rctx->b.dma.cs == NULL) { 3817 goto fallback; 3818 } 3819 3820 if (rctx->cmd_buf_is_compute) { 3821 rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL); 3822 rctx->cmd_buf_is_compute = false; 3823 } 3824 3825 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { 3826 evergreen_dma_copy_buffer(rctx, dst, src, dst_x, src_box->x, src_box->width); 3827 return; 3828 } 3829 3830 if (src_box->depth > 1 || 3831 !r600_prepare_for_dma_blit(&rctx->b, rdst, dst_level, dstx, dsty, 3832 dstz, rsrc, src_level, src_box)) 3833 goto fallback; 3834 3835 src_x = util_format_get_nblocksx(src->format, src_box->x); 3836 dst_x = util_format_get_nblocksx(src->format, dst_x); 3837 src_y = util_format_get_nblocksy(src->format, src_box->y); 3838 dst_y = util_format_get_nblocksy(src->format, dst_y); 3839 3840 bpp = rdst->surface.bpe; 3841 dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.bpe; 3842 src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.bpe; 3843 src_w = u_minify(rsrc->resource.b.b.width0, src_level); 3844 dst_w = u_minify(rdst->resource.b.b.width0, dst_level); 3845 copy_height = src_box->height / rsrc->surface.blk_h; 3846 3847 dst_mode = rdst->surface.u.legacy.level[dst_level].mode; 3848 src_mode = rsrc->surface.u.legacy.level[src_level].mode; 3849 3850 if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w) { 3851 /* FIXME evergreen can do partial blit */ 3852 goto fallback; 3853 } 3854 /* the x test here are currently useless (because we don't support partial blit) 3855 * but keep them around so we don't forget about those 3856 */ 3857 if (src_pitch % 8 || src_box->x % 8 || dst_x % 8 || src_box->y % 8 || dst_y % 8) { 3858 goto fallback; 3859 } 3860 3861 /* 128 bpp surfaces require non_disp_tiling for both 3862 * tiled and linear buffers on cayman. However, async 3863 * DMA only supports it on the tiled side. As such 3864 * the tile order is backwards after a L2T/T2L packet. 3865 */ 3866 if ((rctx->b.chip_class == CAYMAN) && 3867 (src_mode != dst_mode) && 3868 (util_format_get_blocksize(src->format) >= 16)) { 3869 goto fallback; 3870 } 3871 3872 if (src_mode == dst_mode) { 3873 uint64_t dst_offset, src_offset; 3874 /* simple dma blit would do NOTE code here assume : 3875 * src_box.x/y == 0 3876 * dst_x/y == 0 3877 * dst_pitch == src_pitch 3878 */ 3879 src_offset= rsrc->surface.u.legacy.level[src_level].offset; 3880 src_offset += (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_box->z; 3881 src_offset += src_y * src_pitch + src_x * bpp; 3882 dst_offset = rdst->surface.u.legacy.level[dst_level].offset; 3883 dst_offset += (uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z; 3884 dst_offset += dst_y * dst_pitch + dst_x * bpp; 3885 evergreen_dma_copy_buffer(rctx, dst, src, dst_offset, src_offset, 3886 src_box->height * src_pitch); 3887 } else { 3888 evergreen_dma_copy_tile(rctx, dst, dst_level, dst_x, dst_y, dst_z, 3889 src, src_level, src_x, src_y, src_box->z, 3890 copy_height, dst_pitch, bpp); 3891 } 3892 return; 3893 3894 fallback: 3895 r600_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, 3896 src, src_level, src_box); 3897 } 3898 3899 static void evergreen_set_tess_state(struct pipe_context *ctx, 3900 const float default_outer_level[4], 3901 const float default_inner_level[2]) 3902 { 3903 struct r600_context *rctx = (struct r600_context *)ctx; 3904 3905 memcpy(rctx->tess_state, default_outer_level, sizeof(float) * 4); 3906 memcpy(rctx->tess_state+4, default_inner_level, sizeof(float) * 2); 3907 rctx->driver_consts[PIPE_SHADER_TESS_CTRL].tcs_default_levels_dirty = true; 3908 } 3909 3910 static void evergreen_setup_immed_buffer(struct r600_context *rctx, 3911 struct r600_image_view *rview, 3912 enum pipe_format pformat) 3913 { 3914 struct r600_screen *rscreen = (struct r600_screen *)rctx->b.b.screen; 3915 uint32_t immed_size = rscreen->b.info.max_se * 256 * 64 * util_format_get_blocksize(pformat); 3916 struct eg_buf_res_params buf_params; 3917 bool skip_reloc = false; 3918 struct r600_resource *resource = (struct r600_resource *)rview->base.resource; 3919 if (!resource->immed_buffer) { 3920 eg_resource_alloc_immed(&rscreen->b, resource, immed_size); 3921 } 3922 3923 memset(&buf_params, 0, sizeof(buf_params)); 3924 buf_params.pipe_format = pformat; 3925 buf_params.size = resource->immed_buffer->b.b.width0; 3926 buf_params.swizzle[0] = PIPE_SWIZZLE_X; 3927 buf_params.swizzle[1] = PIPE_SWIZZLE_Y; 3928 buf_params.swizzle[2] = PIPE_SWIZZLE_Z; 3929 buf_params.swizzle[3] = PIPE_SWIZZLE_W; 3930 buf_params.uncached = 1; 3931 evergreen_fill_buffer_resource_words(rctx, &resource->immed_buffer->b.b, 3932 &buf_params, &skip_reloc, 3933 rview->immed_resource_words); 3934 } 3935 3936 static void evergreen_set_hw_atomic_buffers(struct pipe_context *ctx, 3937 unsigned start_slot, 3938 unsigned count, 3939 const struct pipe_shader_buffer *buffers) 3940 { 3941 struct r600_context *rctx = (struct r600_context *)ctx; 3942 struct r600_atomic_buffer_state *astate; 3943 int i, idx; 3944 3945 astate = &rctx->atomic_buffer_state; 3946 3947 /* we'd probably like to expand this to 8 later so put the logic in */ 3948 for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) { 3949 const struct pipe_shader_buffer *buf; 3950 struct pipe_shader_buffer *abuf; 3951 3952 abuf = &astate->buffer[i]; 3953 3954 if (!buffers || !buffers[idx].buffer) { 3955 pipe_resource_reference(&abuf->buffer, NULL); 3956 astate->enabled_mask &= ~(1 << i); 3957 continue; 3958 } 3959 buf = &buffers[idx]; 3960 3961 pipe_resource_reference(&abuf->buffer, buf->buffer); 3962 abuf->buffer_offset = buf->buffer_offset; 3963 abuf->buffer_size = buf->buffer_size; 3964 astate->enabled_mask |= (1 << i); 3965 } 3966 } 3967 3968 static void evergreen_set_shader_buffers(struct pipe_context *ctx, 3969 enum pipe_shader_type shader, unsigned start_slot, 3970 unsigned count, 3971 const struct pipe_shader_buffer *buffers) 3972 { 3973 struct r600_context *rctx = (struct r600_context *)ctx; 3974 struct r600_image_state *istate = NULL; 3975 struct r600_image_view *rview; 3976 struct r600_tex_color_info color; 3977 struct eg_buf_res_params buf_params; 3978 struct r600_resource *resource; 3979 int i, idx; 3980 unsigned old_mask; 3981 3982 if (shader != PIPE_SHADER_FRAGMENT && 3983 shader != PIPE_SHADER_COMPUTE && count == 0) 3984 return; 3985 3986 if (shader == PIPE_SHADER_FRAGMENT) 3987 istate = &rctx->fragment_buffers; 3988 else if (shader == PIPE_SHADER_COMPUTE) 3989 istate = &rctx->compute_buffers; 3990 3991 old_mask = istate->enabled_mask; 3992 for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) { 3993 const struct pipe_shader_buffer *buf; 3994 unsigned res_type; 3995 3996 rview = &istate->views[i]; 3997 3998 if (!buffers || !buffers[idx].buffer) { 3999 pipe_resource_reference((struct pipe_resource **)&rview->base.resource, NULL); 4000 istate->enabled_mask &= ~(1 << i); 4001 continue; 4002 } 4003 4004 buf = &buffers[idx]; 4005 pipe_resource_reference((struct pipe_resource **)&rview->base.resource, buf->buffer); 4006 4007 resource = (struct r600_resource *)rview->base.resource; 4008 4009 evergreen_setup_immed_buffer(rctx, rview, PIPE_FORMAT_R32_UINT); 4010 4011 color.offset = 0; 4012 color.view = 0; 4013 evergreen_set_color_surface_buffer(rctx, resource, 4014 PIPE_FORMAT_R32_UINT, 4015 buf->buffer_offset, 4016 buf->buffer_offset + buf->buffer_size, 4017 &color); 4018 4019 res_type = V_028C70_BUFFER; 4020 4021 rview->cb_color_base = color.offset; 4022 rview->cb_color_dim = color.dim; 4023 rview->cb_color_info = color.info | 4024 S_028C70_RAT(1) | 4025 S_028C70_RESOURCE_TYPE(res_type); 4026 rview->cb_color_pitch = color.pitch; 4027 rview->cb_color_slice = color.slice; 4028 rview->cb_color_view = color.view; 4029 rview->cb_color_attrib = color.attrib; 4030 rview->cb_color_fmask = color.fmask; 4031 rview->cb_color_fmask_slice = color.fmask_slice; 4032 4033 memset(&buf_params, 0, sizeof(buf_params)); 4034 buf_params.pipe_format = PIPE_FORMAT_R32_UINT; 4035 buf_params.offset = buf->buffer_offset; 4036 buf_params.size = buf->buffer_size; 4037 buf_params.swizzle[0] = PIPE_SWIZZLE_X; 4038 buf_params.swizzle[1] = PIPE_SWIZZLE_Y; 4039 buf_params.swizzle[2] = PIPE_SWIZZLE_Z; 4040 buf_params.swizzle[3] = PIPE_SWIZZLE_W; 4041 buf_params.force_swizzle = true; 4042 buf_params.uncached = 1; 4043 evergreen_fill_buffer_resource_words(rctx, &resource->b.b, 4044 &buf_params, 4045 &rview->skip_mip_address_reloc, 4046 rview->resource_words); 4047 4048 istate->enabled_mask |= (1 << i); 4049 } 4050 4051 istate->atom.num_dw = util_bitcount(istate->enabled_mask) * 46; 4052 4053 if (old_mask != istate->enabled_mask) 4054 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom); 4055 4056 /* construct the target mask */ 4057 if (rctx->cb_misc_state.buffer_rat_enabled_mask != istate->enabled_mask) { 4058 rctx->cb_misc_state.buffer_rat_enabled_mask = istate->enabled_mask; 4059 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom); 4060 } 4061 4062 if (shader == PIPE_SHADER_FRAGMENT) 4063 r600_mark_atom_dirty(rctx, &istate->atom); 4064 } 4065 4066 static void evergreen_set_shader_images(struct pipe_context *ctx, 4067 enum pipe_shader_type shader, unsigned start_slot, 4068 unsigned count, 4069 const struct pipe_image_view *images) 4070 { 4071 struct r600_context *rctx = (struct r600_context *)ctx; 4072 int i; 4073 struct r600_image_view *rview; 4074 struct pipe_resource *image; 4075 struct r600_resource *resource; 4076 struct r600_tex_color_info color; 4077 struct eg_buf_res_params buf_params; 4078 struct eg_tex_res_params tex_params; 4079 unsigned old_mask; 4080 struct r600_image_state *istate = NULL; 4081 int idx; 4082 if (shader != PIPE_SHADER_FRAGMENT && shader != PIPE_SHADER_COMPUTE && count == 0) 4083 return; 4084 4085 if (shader == PIPE_SHADER_FRAGMENT) 4086 istate = &rctx->fragment_images; 4087 else if (shader == PIPE_SHADER_COMPUTE) 4088 istate = &rctx->compute_images; 4089 4090 assert (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE); 4091 4092 old_mask = istate->enabled_mask; 4093 for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) { 4094 unsigned res_type; 4095 const struct pipe_image_view *iview; 4096 rview = &istate->views[i]; 4097 4098 if (!images || !images[idx].resource) { 4099 pipe_resource_reference((struct pipe_resource **)&rview->base.resource, NULL); 4100 istate->enabled_mask &= ~(1 << i); 4101 istate->compressed_colortex_mask &= ~(1 << i); 4102 istate->compressed_depthtex_mask &= ~(1 << i); 4103 continue; 4104 } 4105 4106 iview = &images[idx]; 4107 image = iview->resource; 4108 resource = (struct r600_resource *)image; 4109 4110 r600_context_add_resource_size(ctx, image); 4111 4112 rview->base = *iview; 4113 rview->base.resource = NULL; 4114 pipe_resource_reference((struct pipe_resource **)&rview->base.resource, image); 4115 4116 evergreen_setup_immed_buffer(rctx, rview, iview->format); 4117 4118 bool is_buffer = image->target == PIPE_BUFFER; 4119 struct r600_texture *rtex = (struct r600_texture *)image; 4120 if (!is_buffer & rtex->db_compatible) 4121 istate->compressed_depthtex_mask |= 1 << i; 4122 else 4123 istate->compressed_depthtex_mask &= ~(1 << i); 4124 4125 if (!is_buffer && rtex->cmask.size) 4126 istate->compressed_colortex_mask |= 1 << i; 4127 else 4128 istate->compressed_colortex_mask &= ~(1 << i); 4129 if (!is_buffer) { 4130 4131 evergreen_set_color_surface_common(rctx, rtex, 4132 iview->u.tex.level, 4133 iview->u.tex.first_layer, 4134 iview->u.tex.last_layer, 4135 iview->format, 4136 &color); 4137 color.dim = S_028C78_WIDTH_MAX(u_minify(image->width0, iview->u.tex.level) - 1) | 4138 S_028C78_HEIGHT_MAX(u_minify(image->height0, iview->u.tex.level) - 1); 4139 } else { 4140 color.offset = 0; 4141 color.view = 0; 4142 evergreen_set_color_surface_buffer(rctx, resource, 4143 iview->format, 4144 iview->u.buf.offset, 4145 iview->u.buf.size, 4146 &color); 4147 } 4148 4149 switch (image->target) { 4150 case PIPE_BUFFER: 4151 res_type = V_028C70_BUFFER; 4152 break; 4153 case PIPE_TEXTURE_1D: 4154 res_type = V_028C70_TEXTURE1D; 4155 break; 4156 case PIPE_TEXTURE_1D_ARRAY: 4157 res_type = V_028C70_TEXTURE1DARRAY; 4158 break; 4159 case PIPE_TEXTURE_2D: 4160 case PIPE_TEXTURE_RECT: 4161 res_type = V_028C70_TEXTURE2D; 4162 break; 4163 case PIPE_TEXTURE_3D: 4164 res_type = V_028C70_TEXTURE3D; 4165 break; 4166 case PIPE_TEXTURE_2D_ARRAY: 4167 case PIPE_TEXTURE_CUBE: 4168 case PIPE_TEXTURE_CUBE_ARRAY: 4169 res_type = V_028C70_TEXTURE2DARRAY; 4170 break; 4171 default: 4172 assert(0); 4173 res_type = 0; 4174 break; 4175 } 4176 4177 rview->cb_color_base = color.offset; 4178 rview->cb_color_dim = color.dim; 4179 rview->cb_color_info = color.info | 4180 S_028C70_RAT(1) | 4181 S_028C70_RESOURCE_TYPE(res_type); 4182 rview->cb_color_pitch = color.pitch; 4183 rview->cb_color_slice = color.slice; 4184 rview->cb_color_view = color.view; 4185 rview->cb_color_attrib = color.attrib; 4186 rview->cb_color_fmask = color.fmask; 4187 rview->cb_color_fmask_slice = color.fmask_slice; 4188 4189 if (image->target != PIPE_BUFFER) { 4190 memset(&tex_params, 0, sizeof(tex_params)); 4191 tex_params.pipe_format = iview->format; 4192 tex_params.force_level = 0; 4193 tex_params.width0 = image->width0; 4194 tex_params.height0 = image->height0; 4195 tex_params.first_level = iview->u.tex.level; 4196 tex_params.last_level = iview->u.tex.level; 4197 tex_params.first_layer = iview->u.tex.first_layer; 4198 tex_params.last_layer = iview->u.tex.last_layer; 4199 tex_params.target = image->target; 4200 tex_params.swizzle[0] = PIPE_SWIZZLE_X; 4201 tex_params.swizzle[1] = PIPE_SWIZZLE_Y; 4202 tex_params.swizzle[2] = PIPE_SWIZZLE_Z; 4203 tex_params.swizzle[3] = PIPE_SWIZZLE_W; 4204 evergreen_fill_tex_resource_words(rctx, &resource->b.b, &tex_params, 4205 &rview->skip_mip_address_reloc, 4206 rview->resource_words); 4207 4208 } else { 4209 memset(&buf_params, 0, sizeof(buf_params)); 4210 buf_params.pipe_format = iview->format; 4211 buf_params.size = iview->u.buf.size; 4212 buf_params.offset = iview->u.buf.offset; 4213 buf_params.swizzle[0] = PIPE_SWIZZLE_X; 4214 buf_params.swizzle[1] = PIPE_SWIZZLE_Y; 4215 buf_params.swizzle[2] = PIPE_SWIZZLE_Z; 4216 buf_params.swizzle[3] = PIPE_SWIZZLE_W; 4217 evergreen_fill_buffer_resource_words(rctx, &resource->b.b, 4218 &buf_params, 4219 &rview->skip_mip_address_reloc, 4220 rview->resource_words); 4221 } 4222 istate->enabled_mask |= (1 << i); 4223 } 4224 4225 istate->atom.num_dw = util_bitcount(istate->enabled_mask) * 46; 4226 istate->dirty_buffer_constants = TRUE; 4227 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; 4228 rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB | 4229 R600_CONTEXT_FLUSH_AND_INV_CB_META; 4230 4231 if (old_mask != istate->enabled_mask) 4232 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom); 4233 4234 if (rctx->cb_misc_state.image_rat_enabled_mask != istate->enabled_mask) { 4235 rctx->cb_misc_state.image_rat_enabled_mask = istate->enabled_mask; 4236 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom); 4237 } 4238 4239 if (shader == PIPE_SHADER_FRAGMENT) 4240 r600_mark_atom_dirty(rctx, &istate->atom); 4241 } 4242 4243 void evergreen_init_state_functions(struct r600_context *rctx) 4244 { 4245 unsigned id = 1; 4246 unsigned i; 4247 /* !!! 4248 * To avoid GPU lockup registers must be emitted in a specific order 4249 * (no kidding ...). The order below is important and have been 4250 * partially inferred from analyzing fglrx command stream. 4251 * 4252 * Don't reorder atom without carefully checking the effect (GPU lockup 4253 * or piglit regression). 4254 * !!! 4255 */ 4256 if (rctx->b.chip_class == EVERGREEN) { 4257 r600_init_atom(rctx, &rctx->config_state.atom, id++, evergreen_emit_config_state, 11); 4258 rctx->config_state.dyn_gpr_enabled = true; 4259 } 4260 r600_init_atom(rctx, &rctx->framebuffer.atom, id++, evergreen_emit_framebuffer_state, 0); 4261 r600_init_atom(rctx, &rctx->fragment_images.atom, id++, evergreen_emit_fragment_image_state, 0); 4262 r600_init_atom(rctx, &rctx->compute_images.atom, id++, evergreen_emit_compute_image_state, 0); 4263 r600_init_atom(rctx, &rctx->fragment_buffers.atom, id++, evergreen_emit_fragment_buffer_state, 0); 4264 r600_init_atom(rctx, &rctx->compute_buffers.atom, id++, evergreen_emit_compute_buffer_state, 0); 4265 /* shader const */ 4266 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX].atom, id++, evergreen_emit_vs_constant_buffers, 0); 4267 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY].atom, id++, evergreen_emit_gs_constant_buffers, 0); 4268 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT].atom, id++, evergreen_emit_ps_constant_buffers, 0); 4269 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_TESS_CTRL].atom, id++, evergreen_emit_tcs_constant_buffers, 0); 4270 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_TESS_EVAL].atom, id++, evergreen_emit_tes_constant_buffers, 0); 4271 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_COMPUTE].atom, id++, evergreen_emit_cs_constant_buffers, 0); 4272 /* shader program */ 4273 r600_init_atom(rctx, &rctx->cs_shader_state.atom, id++, evergreen_emit_cs_shader, 0); 4274 /* sampler */ 4275 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].states.atom, id++, evergreen_emit_vs_sampler_states, 0); 4276 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].states.atom, id++, evergreen_emit_gs_sampler_states, 0); 4277 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_TESS_CTRL].states.atom, id++, evergreen_emit_tcs_sampler_states, 0); 4278 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL].states.atom, id++, evergreen_emit_tes_sampler_states, 0); 4279 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].states.atom, id++, evergreen_emit_ps_sampler_states, 0); 4280 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].states.atom, id++, evergreen_emit_cs_sampler_states, 0); 4281 /* resources */ 4282 r600_init_atom(rctx, &rctx->vertex_buffer_state.atom, id++, evergreen_fs_emit_vertex_buffers, 0); 4283 r600_init_atom(rctx, &rctx->cs_vertex_buffer_state.atom, id++, evergreen_cs_emit_vertex_buffers, 0); 4284 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views.atom, id++, evergreen_emit_vs_sampler_views, 0); 4285 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views.atom, id++, evergreen_emit_gs_sampler_views, 0); 4286 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_TESS_CTRL].views.atom, id++, evergreen_emit_tcs_sampler_views, 0); 4287 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL].views.atom, id++, evergreen_emit_tes_sampler_views, 0); 4288 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views.atom, id++, evergreen_emit_ps_sampler_views, 0); 4289 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].views.atom, id++, evergreen_emit_cs_sampler_views, 0); 4290 4291 r600_init_atom(rctx, &rctx->vgt_state.atom, id++, r600_emit_vgt_state, 10); 4292 4293 if (rctx->b.chip_class == EVERGREEN) { 4294 r600_init_atom(rctx, &rctx->sample_mask.atom, id++, evergreen_emit_sample_mask, 3); 4295 } else { 4296 r600_init_atom(rctx, &rctx->sample_mask.atom, id++, cayman_emit_sample_mask, 4); 4297 } 4298 rctx->sample_mask.sample_mask = ~0; 4299 4300 r600_init_atom(rctx, &rctx->alphatest_state.atom, id++, r600_emit_alphatest_state, 6); 4301 r600_init_atom(rctx, &rctx->blend_color.atom, id++, r600_emit_blend_color, 6); 4302 r600_init_atom(rctx, &rctx->blend_state.atom, id++, r600_emit_cso_state, 0); 4303 r600_init_atom(rctx, &rctx->cb_misc_state.atom, id++, evergreen_emit_cb_misc_state, 4); 4304 r600_init_atom(rctx, &rctx->clip_misc_state.atom, id++, r600_emit_clip_misc_state, 9); 4305 r600_init_atom(rctx, &rctx->clip_state.atom, id++, evergreen_emit_clip_state, 26); 4306 r600_init_atom(rctx, &rctx->db_misc_state.atom, id++, evergreen_emit_db_misc_state, 10); 4307 r600_init_atom(rctx, &rctx->db_state.atom, id++, evergreen_emit_db_state, 14); 4308 r600_init_atom(rctx, &rctx->dsa_state.atom, id++, r600_emit_cso_state, 0); 4309 r600_init_atom(rctx, &rctx->poly_offset_state.atom, id++, evergreen_emit_polygon_offset, 9); 4310 r600_init_atom(rctx, &rctx->rasterizer_state.atom, id++, r600_emit_cso_state, 0); 4311 r600_add_atom(rctx, &rctx->b.scissors.atom, id++); 4312 r600_add_atom(rctx, &rctx->b.viewports.atom, id++); 4313 r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4); 4314 r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, evergreen_emit_vertex_fetch_shader, 5); 4315 r600_add_atom(rctx, &rctx->b.render_cond_atom, id++); 4316 r600_add_atom(rctx, &rctx->b.streamout.begin_atom, id++); 4317 r600_add_atom(rctx, &rctx->b.streamout.enable_atom, id++); 4318 for (i = 0; i < EG_NUM_HW_STAGES; i++) 4319 r600_init_atom(rctx, &rctx->hw_shader_stages[i].atom, id++, r600_emit_shader, 0); 4320 r600_init_atom(rctx, &rctx->shader_stages.atom, id++, evergreen_emit_shader_stages, 15); 4321 r600_init_atom(rctx, &rctx->gs_rings.atom, id++, evergreen_emit_gs_rings, 26); 4322 4323 rctx->b.b.create_blend_state = evergreen_create_blend_state; 4324 rctx->b.b.create_depth_stencil_alpha_state = evergreen_create_dsa_state; 4325 rctx->b.b.create_rasterizer_state = evergreen_create_rs_state; 4326 rctx->b.b.create_sampler_state = evergreen_create_sampler_state; 4327 rctx->b.b.create_sampler_view = evergreen_create_sampler_view; 4328 rctx->b.b.set_framebuffer_state = evergreen_set_framebuffer_state; 4329 rctx->b.b.set_polygon_stipple = evergreen_set_polygon_stipple; 4330 rctx->b.b.set_min_samples = evergreen_set_min_samples; 4331 rctx->b.b.set_tess_state = evergreen_set_tess_state; 4332 rctx->b.b.set_hw_atomic_buffers = evergreen_set_hw_atomic_buffers; 4333 rctx->b.b.set_shader_images = evergreen_set_shader_images; 4334 rctx->b.b.set_shader_buffers = evergreen_set_shader_buffers; 4335 if (rctx->b.chip_class == EVERGREEN) 4336 rctx->b.b.get_sample_position = evergreen_get_sample_position; 4337 else 4338 rctx->b.b.get_sample_position = cayman_get_sample_position; 4339 rctx->b.dma_copy = evergreen_dma_copy; 4340 4341 evergreen_init_compute_state_functions(rctx); 4342 } 4343 4344 /** 4345 * This calculates the LDS size for tessellation shaders (VS, TCS, TES). 4346 * 4347 * The information about LDS and other non-compile-time parameters is then 4348 * written to the const buffer. 4349 4350 * const buffer contains - 4351 * uint32_t input_patch_size 4352 * uint32_t input_vertex_size 4353 * uint32_t num_tcs_input_cp 4354 * uint32_t num_tcs_output_cp; 4355 * uint32_t output_patch_size 4356 * uint32_t output_vertex_size 4357 * uint32_t output_patch0_offset 4358 * uint32_t perpatch_output_offset 4359 * and the same constbuf is bound to LS/HS/VS(ES). 4360 */ 4361 void evergreen_setup_tess_constants(struct r600_context *rctx, const struct pipe_draw_info *info, unsigned *num_patches) 4362 { 4363 struct pipe_constant_buffer constbuf = {0}; 4364 struct r600_pipe_shader_selector *tcs = rctx->tcs_shader ? rctx->tcs_shader : rctx->tes_shader; 4365 struct r600_pipe_shader_selector *ls = rctx->vs_shader; 4366 unsigned num_tcs_input_cp = info->vertices_per_patch; 4367 unsigned num_tcs_outputs; 4368 unsigned num_tcs_output_cp; 4369 unsigned num_tcs_patch_outputs; 4370 unsigned num_tcs_inputs; 4371 unsigned input_vertex_size, output_vertex_size; 4372 unsigned input_patch_size, pervertex_output_patch_size, output_patch_size; 4373 unsigned output_patch0_offset, perpatch_output_offset, lds_size; 4374 uint32_t values[8]; 4375 unsigned num_waves; 4376 unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes; 4377 unsigned wave_divisor = (16 * num_pipes); 4378 4379 *num_patches = 1; 4380 4381 if (!rctx->tes_shader) { 4382 rctx->lds_alloc = 0; 4383 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX, 4384 R600_LDS_INFO_CONST_BUFFER, NULL); 4385 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_CTRL, 4386 R600_LDS_INFO_CONST_BUFFER, NULL); 4387 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL, 4388 R600_LDS_INFO_CONST_BUFFER, NULL); 4389 return; 4390 } 4391 4392 if (rctx->lds_alloc != 0 && 4393 rctx->last_ls == ls && 4394 rctx->last_num_tcs_input_cp == num_tcs_input_cp && 4395 rctx->last_tcs == tcs) 4396 return; 4397 4398 num_tcs_inputs = util_last_bit64(ls->lds_outputs_written_mask); 4399 4400 if (rctx->tcs_shader) { 4401 num_tcs_outputs = util_last_bit64(tcs->lds_outputs_written_mask); 4402 num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT]; 4403 num_tcs_patch_outputs = util_last_bit64(tcs->lds_patch_outputs_written_mask); 4404 } else { 4405 num_tcs_outputs = num_tcs_inputs; 4406 num_tcs_output_cp = num_tcs_input_cp; 4407 num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */ 4408 } 4409 4410 /* size in bytes */ 4411 input_vertex_size = num_tcs_inputs * 16; 4412 output_vertex_size = num_tcs_outputs * 16; 4413 4414 input_patch_size = num_tcs_input_cp * input_vertex_size; 4415 4416 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size; 4417 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16; 4418 4419 output_patch0_offset = rctx->tcs_shader ? input_patch_size * *num_patches : 0; 4420 perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size; 4421 4422 lds_size = output_patch0_offset + output_patch_size * *num_patches; 4423 4424 values[0] = input_patch_size; 4425 values[1] = input_vertex_size; 4426 values[2] = num_tcs_input_cp; 4427 values[3] = num_tcs_output_cp; 4428 4429 values[4] = output_patch_size; 4430 values[5] = output_vertex_size; 4431 values[6] = output_patch0_offset; 4432 values[7] = perpatch_output_offset; 4433 4434 /* docs say HS_NUM_WAVES - CEIL((LS_HS_CONFIG.NUM_PATCHES * 4435 LS_HS_CONFIG.HS_NUM_OUTPUT_CP) / (NUM_GOOD_PIPES * 16)) */ 4436 num_waves = ceilf((float)(*num_patches * num_tcs_output_cp) / (float)wave_divisor); 4437 4438 rctx->lds_alloc = (lds_size | (num_waves << 14)); 4439 4440 rctx->last_ls = ls; 4441 rctx->last_tcs = tcs; 4442 rctx->last_num_tcs_input_cp = num_tcs_input_cp; 4443 4444 constbuf.user_buffer = values; 4445 constbuf.buffer_size = 8 * 4; 4446 4447 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX, 4448 R600_LDS_INFO_CONST_BUFFER, &constbuf); 4449 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_CTRL, 4450 R600_LDS_INFO_CONST_BUFFER, &constbuf); 4451 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL, 4452 R600_LDS_INFO_CONST_BUFFER, &constbuf); 4453 pipe_resource_reference(&constbuf.buffer, NULL); 4454 } 4455 4456 uint32_t evergreen_get_ls_hs_config(struct r600_context *rctx, 4457 const struct pipe_draw_info *info, 4458 unsigned num_patches) 4459 { 4460 unsigned num_output_cp; 4461 4462 if (!rctx->tes_shader) 4463 return 0; 4464 4465 num_output_cp = rctx->tcs_shader ? 4466 rctx->tcs_shader->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT] : 4467 info->vertices_per_patch; 4468 4469 return S_028B58_NUM_PATCHES(num_patches) | 4470 S_028B58_HS_NUM_INPUT_CP(info->vertices_per_patch) | 4471 S_028B58_HS_NUM_OUTPUT_CP(num_output_cp); 4472 } 4473 4474 void evergreen_set_ls_hs_config(struct r600_context *rctx, 4475 struct radeon_winsys_cs *cs, 4476 uint32_t ls_hs_config) 4477 { 4478 radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config); 4479 } 4480 4481 void evergreen_set_lds_alloc(struct r600_context *rctx, 4482 struct radeon_winsys_cs *cs, 4483 uint32_t lds_alloc) 4484 { 4485 radeon_set_context_reg(cs, R_0288E8_SQ_LDS_ALLOC, lds_alloc); 4486 } 4487 4488 /* on evergreen if you are running tessellation you need to disable dynamic 4489 GPRs to workaround a hardware bug.*/ 4490 bool evergreen_adjust_gprs(struct r600_context *rctx) 4491 { 4492 unsigned num_gprs[EG_NUM_HW_STAGES]; 4493 unsigned def_gprs[EG_NUM_HW_STAGES]; 4494 unsigned cur_gprs[EG_NUM_HW_STAGES]; 4495 unsigned new_gprs[EG_NUM_HW_STAGES]; 4496 unsigned def_num_clause_temp_gprs = rctx->r6xx_num_clause_temp_gprs; 4497 unsigned max_gprs; 4498 unsigned i; 4499 unsigned total_gprs; 4500 unsigned tmp[3]; 4501 bool rework = false, set_default = false, set_dirty = false; 4502 max_gprs = 0; 4503 for (i = 0; i < EG_NUM_HW_STAGES; i++) { 4504 def_gprs[i] = rctx->default_gprs[i]; 4505 max_gprs += def_gprs[i]; 4506 } 4507 max_gprs += def_num_clause_temp_gprs * 2; 4508 4509 /* if we have no TESS and dyn gpr is enabled then do nothing. */ 4510 if (!rctx->hw_shader_stages[EG_HW_STAGE_HS].shader) { 4511 if (rctx->config_state.dyn_gpr_enabled) 4512 return true; 4513 4514 /* transition back to dyn gpr enabled state */ 4515 rctx->config_state.dyn_gpr_enabled = true; 4516 r600_mark_atom_dirty(rctx, &rctx->config_state.atom); 4517 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; 4518 return true; 4519 } 4520 4521 4522 /* gather required shader gprs */ 4523 for (i = 0; i < EG_NUM_HW_STAGES; i++) { 4524 if (rctx->hw_shader_stages[i].shader) 4525 num_gprs[i] = rctx->hw_shader_stages[i].shader->shader.bc.ngpr; 4526 else 4527 num_gprs[i] = 0; 4528 } 4529 4530 cur_gprs[R600_HW_STAGE_PS] = G_008C04_NUM_PS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1); 4531 cur_gprs[R600_HW_STAGE_VS] = G_008C04_NUM_VS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1); 4532 cur_gprs[R600_HW_STAGE_GS] = G_008C08_NUM_GS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2); 4533 cur_gprs[R600_HW_STAGE_ES] = G_008C08_NUM_ES_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2); 4534 cur_gprs[EG_HW_STAGE_LS] = G_008C0C_NUM_LS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_3); 4535 cur_gprs[EG_HW_STAGE_HS] = G_008C0C_NUM_HS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_3); 4536 4537 total_gprs = 0; 4538 for (i = 0; i < EG_NUM_HW_STAGES; i++) { 4539 new_gprs[i] = num_gprs[i]; 4540 total_gprs += num_gprs[i]; 4541 } 4542 4543 if (total_gprs > (max_gprs - (2 * def_num_clause_temp_gprs))) 4544 return false; 4545 4546 for (i = 0; i < EG_NUM_HW_STAGES; i++) { 4547 if (new_gprs[i] > cur_gprs[i]) { 4548 rework = true; 4549 break; 4550 } 4551 } 4552 4553 if (rctx->config_state.dyn_gpr_enabled) { 4554 set_dirty = true; 4555 rctx->config_state.dyn_gpr_enabled = false; 4556 } 4557 4558 if (rework) { 4559 set_default = true; 4560 for (i = 0; i < EG_NUM_HW_STAGES; i++) { 4561 if (new_gprs[i] > def_gprs[i]) 4562 set_default = false; 4563 } 4564 4565 if (set_default) { 4566 for (i = 0; i < EG_NUM_HW_STAGES; i++) { 4567 new_gprs[i] = def_gprs[i]; 4568 } 4569 } else { 4570 unsigned ps_value = max_gprs; 4571 4572 ps_value -= (def_num_clause_temp_gprs * 2); 4573 for (i = R600_HW_STAGE_VS; i < EG_NUM_HW_STAGES; i++) 4574 ps_value -= new_gprs[i]; 4575 4576 new_gprs[R600_HW_STAGE_PS] = ps_value; 4577 } 4578 4579 tmp[0] = S_008C04_NUM_PS_GPRS(new_gprs[R600_HW_STAGE_PS]) | 4580 S_008C04_NUM_VS_GPRS(new_gprs[R600_HW_STAGE_VS]) | 4581 S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs); 4582 4583 tmp[1] = S_008C08_NUM_ES_GPRS(new_gprs[R600_HW_STAGE_ES]) | 4584 S_008C08_NUM_GS_GPRS(new_gprs[R600_HW_STAGE_GS]); 4585 4586 tmp[2] = S_008C0C_NUM_HS_GPRS(new_gprs[EG_HW_STAGE_HS]) | 4587 S_008C0C_NUM_LS_GPRS(new_gprs[EG_HW_STAGE_LS]); 4588 4589 if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp[0] || 4590 rctx->config_state.sq_gpr_resource_mgmt_2 != tmp[1] || 4591 rctx->config_state.sq_gpr_resource_mgmt_3 != tmp[2]) { 4592 rctx->config_state.sq_gpr_resource_mgmt_1 = tmp[0]; 4593 rctx->config_state.sq_gpr_resource_mgmt_2 = tmp[1]; 4594 rctx->config_state.sq_gpr_resource_mgmt_3 = tmp[2]; 4595 set_dirty = true; 4596 } 4597 } 4598 4599 4600 if (set_dirty) { 4601 r600_mark_atom_dirty(rctx, &rctx->config_state.atom); 4602 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; 4603 } 4604 return true; 4605 } 4606 4607 #define AC_ENCODE_TRACE_POINT(id) (0xcafe0000 | ((id) & 0xffff)) 4608 4609 void eg_trace_emit(struct r600_context *rctx) 4610 { 4611 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 4612 unsigned reloc; 4613 4614 if (rctx->b.chip_class < EVERGREEN) 4615 return; 4616 4617 /* This must be done after r600_need_cs_space. */ 4618 reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, 4619 (struct r600_resource*)rctx->trace_buf, RADEON_USAGE_WRITE, 4620 RADEON_PRIO_CP_DMA); 4621 4622 rctx->trace_id++; 4623 radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rctx->trace_buf, 4624 RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE); 4625 radeon_emit(cs, PKT3(PKT3_MEM_WRITE, 3, 0)); 4626 radeon_emit(cs, rctx->trace_buf->gpu_address); 4627 radeon_emit(cs, rctx->trace_buf->gpu_address >> 32 | MEM_WRITE_32_BITS | MEM_WRITE_CONFIRM); 4628 radeon_emit(cs, rctx->trace_id); 4629 radeon_emit(cs, 0); 4630 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 4631 radeon_emit(cs, reloc); 4632 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 4633 radeon_emit(cs, AC_ENCODE_TRACE_POINT(rctx->trace_id)); 4634 } 4635 4636 static void evergreen_emit_set_append_cnt(struct r600_context *rctx, 4637 struct r600_shader_atomic *atomic, 4638 struct r600_resource *resource, 4639 uint32_t pkt_flags) 4640 { 4641 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 4642 unsigned reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, 4643 resource, 4644 RADEON_USAGE_READ, 4645 RADEON_PRIO_SHADER_RW_BUFFER); 4646 uint64_t dst_offset = resource->gpu_address + (atomic->start * 4); 4647 uint32_t base_reg_0 = R_02872C_GDS_APPEND_COUNT_0; 4648 4649 uint32_t reg_val = (base_reg_0 + atomic->hw_idx * 4 - EVERGREEN_CONTEXT_REG_OFFSET) >> 2; 4650 4651 radeon_emit(cs, PKT3(PKT3_SET_APPEND_CNT, 2, 0) | pkt_flags); 4652 radeon_emit(cs, (reg_val << 16) | 0x3); 4653 radeon_emit(cs, dst_offset & 0xfffffffc); 4654 radeon_emit(cs, (dst_offset >> 32) & 0xff); 4655 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 4656 radeon_emit(cs, reloc); 4657 } 4658 4659 static void evergreen_emit_event_write_eos(struct r600_context *rctx, 4660 struct r600_shader_atomic *atomic, 4661 struct r600_resource *resource, 4662 uint32_t pkt_flags) 4663 { 4664 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 4665 uint32_t event = EVENT_TYPE_PS_DONE; 4666 uint32_t base_reg_0 = R_02872C_GDS_APPEND_COUNT_0; 4667 uint32_t reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, 4668 resource, 4669 RADEON_USAGE_WRITE, 4670 RADEON_PRIO_SHADER_RW_BUFFER); 4671 uint64_t dst_offset = resource->gpu_address + (atomic->start * 4); 4672 uint32_t reg_val = (base_reg_0 + atomic->hw_idx * 4) >> 2; 4673 4674 if (pkt_flags == RADEON_CP_PACKET3_COMPUTE_MODE) 4675 event = EVENT_TYPE_CS_DONE; 4676 4677 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, 0) | pkt_flags); 4678 radeon_emit(cs, EVENT_TYPE(event) | EVENT_INDEX(6)); 4679 radeon_emit(cs, (dst_offset) & 0xffffffff); 4680 radeon_emit(cs, (0 << 29) | ((dst_offset >> 32) & 0xff)); 4681 radeon_emit(cs, reg_val); 4682 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 4683 radeon_emit(cs, reloc); 4684 } 4685 4686 static void cayman_emit_event_write_eos(struct r600_context *rctx, 4687 struct r600_shader_atomic *atomic, 4688 struct r600_resource *resource, 4689 uint32_t pkt_flags) 4690 { 4691 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 4692 uint32_t event = EVENT_TYPE_PS_DONE; 4693 uint32_t reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, 4694 resource, 4695 RADEON_USAGE_WRITE, 4696 RADEON_PRIO_SHADER_RW_BUFFER); 4697 uint64_t dst_offset = resource->gpu_address + (atomic->start * 4); 4698 4699 if (pkt_flags == RADEON_CP_PACKET3_COMPUTE_MODE) 4700 event = EVENT_TYPE_CS_DONE; 4701 4702 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, 0) | pkt_flags); 4703 radeon_emit(cs, EVENT_TYPE(event) | EVENT_INDEX(6)); 4704 radeon_emit(cs, (dst_offset) & 0xffffffff); 4705 radeon_emit(cs, (1 << 29) | ((dst_offset >> 32) & 0xff)); 4706 radeon_emit(cs, (atomic->hw_idx) | (1 << 16)); 4707 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 4708 radeon_emit(cs, reloc); 4709 } 4710 4711 /* writes count from a buffer into GDS */ 4712 static void cayman_write_count_to_gds(struct r600_context *rctx, 4713 struct r600_shader_atomic *atomic, 4714 struct r600_resource *resource, 4715 uint32_t pkt_flags) 4716 { 4717 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 4718 unsigned reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, 4719 resource, 4720 RADEON_USAGE_READ, 4721 RADEON_PRIO_SHADER_RW_BUFFER); 4722 uint64_t dst_offset = resource->gpu_address + (atomic->start * 4); 4723 4724 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0) | pkt_flags); 4725 radeon_emit(cs, dst_offset & 0xffffffff); 4726 radeon_emit(cs, PKT3_CP_DMA_CP_SYNC | PKT3_CP_DMA_DST_SEL(1) | ((dst_offset >> 32) & 0xff));// GDS 4727 radeon_emit(cs, atomic->hw_idx * 4); 4728 radeon_emit(cs, 0); 4729 radeon_emit(cs, PKT3_CP_DMA_CMD_DAS | 4); 4730 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 4731 radeon_emit(cs, reloc); 4732 } 4733 4734 bool evergreen_emit_atomic_buffer_setup(struct r600_context *rctx, 4735 struct r600_pipe_shader *cs_shader, 4736 struct r600_shader_atomic *combined_atomics, 4737 uint8_t *atomic_used_mask_p) 4738 { 4739 struct r600_atomic_buffer_state *astate = &rctx->atomic_buffer_state; 4740 unsigned pkt_flags = 0; 4741 uint8_t atomic_used_mask = 0; 4742 int i, j, k; 4743 bool is_compute = cs_shader ? true : false; 4744 4745 if (is_compute) 4746 pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE; 4747 4748 for (i = 0; i < (is_compute ? 1 : EG_NUM_HW_STAGES); i++) { 4749 uint8_t num_atomic_stage; 4750 struct r600_pipe_shader *pshader; 4751 4752 if (is_compute) 4753 pshader = cs_shader; 4754 else 4755 pshader = rctx->hw_shader_stages[i].shader; 4756 if (!pshader) 4757 continue; 4758 4759 num_atomic_stage = pshader->shader.nhwatomic_ranges; 4760 if (!num_atomic_stage) 4761 continue; 4762 4763 for (j = 0; j < num_atomic_stage; j++) { 4764 struct r600_shader_atomic *atomic = &pshader->shader.atomics[j]; 4765 int natomics = atomic->end - atomic->start + 1; 4766 4767 for (k = 0; k < natomics; k++) { 4768 /* seen this in a previous stage */ 4769 if (atomic_used_mask & (1u << (atomic->hw_idx + k))) 4770 continue; 4771 4772 combined_atomics[atomic->hw_idx + k].hw_idx = atomic->hw_idx + k; 4773 combined_atomics[atomic->hw_idx + k].buffer_id = atomic->buffer_id; 4774 combined_atomics[atomic->hw_idx + k].start = atomic->start + k; 4775 combined_atomics[atomic->hw_idx + k].end = combined_atomics[atomic->hw_idx + k].start + 1; 4776 atomic_used_mask |= (1u << (atomic->hw_idx + k)); 4777 } 4778 } 4779 } 4780 4781 uint32_t mask = atomic_used_mask; 4782 while (mask) { 4783 unsigned atomic_index = u_bit_scan(&mask); 4784 struct r600_shader_atomic *atomic = &combined_atomics[atomic_index]; 4785 struct r600_resource *resource = r600_resource(astate->buffer[atomic->buffer_id].buffer); 4786 assert(resource); 4787 4788 if (rctx->b.chip_class == CAYMAN) 4789 cayman_write_count_to_gds(rctx, atomic, resource, pkt_flags); 4790 else 4791 evergreen_emit_set_append_cnt(rctx, atomic, resource, pkt_flags); 4792 } 4793 *atomic_used_mask_p = atomic_used_mask; 4794 return true; 4795 } 4796 4797 void evergreen_emit_atomic_buffer_save(struct r600_context *rctx, 4798 bool is_compute, 4799 struct r600_shader_atomic *combined_atomics, 4800 uint8_t *atomic_used_mask_p) 4801 { 4802 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 4803 struct r600_atomic_buffer_state *astate = &rctx->atomic_buffer_state; 4804 uint32_t pkt_flags = 0; 4805 uint32_t event = EVENT_TYPE_PS_DONE; 4806 uint32_t mask = astate->enabled_mask; 4807 uint64_t dst_offset; 4808 unsigned reloc; 4809 4810 if (is_compute) 4811 pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE; 4812 4813 mask = *atomic_used_mask_p; 4814 if (!mask) 4815 return; 4816 4817 while (mask) { 4818 unsigned atomic_index = u_bit_scan(&mask); 4819 struct r600_shader_atomic *atomic = &combined_atomics[atomic_index]; 4820 struct r600_resource *resource = r600_resource(astate->buffer[atomic->buffer_id].buffer); 4821 assert(resource); 4822 4823 if (rctx->b.chip_class == CAYMAN) 4824 cayman_emit_event_write_eos(rctx, atomic, resource, pkt_flags); 4825 else 4826 evergreen_emit_event_write_eos(rctx, atomic, resource, pkt_flags); 4827 } 4828 4829 if (pkt_flags == RADEON_CP_PACKET3_COMPUTE_MODE) 4830 event = EVENT_TYPE_CS_DONE; 4831 4832 ++rctx->append_fence_id; 4833 reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, 4834 r600_resource(rctx->append_fence), 4835 RADEON_USAGE_READWRITE, 4836 RADEON_PRIO_SHADER_RW_BUFFER); 4837 dst_offset = r600_resource(rctx->append_fence)->gpu_address; 4838 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, 0) | pkt_flags); 4839 radeon_emit(cs, EVENT_TYPE(event) | EVENT_INDEX(6)); 4840 radeon_emit(cs, dst_offset & 0xffffffff); 4841 radeon_emit(cs, (2 << 29) | ((dst_offset >> 32) & 0xff)); 4842 radeon_emit(cs, rctx->append_fence_id); 4843 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 4844 radeon_emit(cs, reloc); 4845 4846 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0) | pkt_flags); 4847 radeon_emit(cs, WAIT_REG_MEM_GEQUAL | WAIT_REG_MEM_MEMORY | (1 << 8)); 4848 radeon_emit(cs, dst_offset & 0xffffffff); 4849 radeon_emit(cs, ((dst_offset >> 32) & 0xff)); 4850 radeon_emit(cs, rctx->append_fence_id); 4851 radeon_emit(cs, 0xffffffff); 4852 radeon_emit(cs, 0xa); 4853 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 4854 radeon_emit(cs, reloc); 4855 } 4856