1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */ 2 3 /* 4 * Copyright (C) 2014 Rob Clark <robclark (at) freedesktop.org> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 * SOFTWARE. 24 * 25 * Authors: 26 * Rob Clark <robclark (at) freedesktop.org> 27 */ 28 29 #include "pipe/p_state.h" 30 #include "util/u_string.h" 31 #include "util/u_memory.h" 32 #include "util/u_inlines.h" 33 #include "util/u_format.h" 34 #include "tgsi/tgsi_dump.h" 35 #include "tgsi/tgsi_parse.h" 36 37 #include "freedreno_context.h" 38 #include "freedreno_util.h" 39 40 #include "ir3_shader.h" 41 #include "ir3_compiler.h" 42 #include "ir3_nir.h" 43 44 int 45 ir3_glsl_type_size(const struct glsl_type *type) 46 { 47 return glsl_count_attribute_slots(type, false); 48 } 49 50 static void 51 delete_variant(struct ir3_shader_variant *v) 52 { 53 if (v->ir) 54 ir3_destroy(v->ir); 55 if (v->bo) 56 fd_bo_del(v->bo); 57 free(v); 58 } 59 60 /* for vertex shader, the inputs are loaded into registers before the shader 61 * is executed, so max_regs from the shader instructions might not properly 62 * reflect the # of registers actually used, especially in case passthrough 63 * varyings. 64 * 65 * Likewise, for fragment shader, we can have some regs which are passed 66 * input values but never touched by the resulting shader (ie. as result 67 * of dead code elimination or simply because we don't know how to turn 68 * the reg off. 69 */ 70 static void 71 fixup_regfootprint(struct ir3_shader_variant *v) 72 { 73 if (v->type == SHADER_VERTEX) { 74 unsigned i; 75 for (i = 0; i < v->inputs_count; i++) { 76 /* skip frag inputs fetch via bary.f since their reg's are 77 * not written by gpu before shader starts (and in fact the 78 * regid's might not even be valid) 79 */ 80 if (v->inputs[i].bary) 81 continue; 82 83 if (v->inputs[i].compmask) { 84 int32_t regid = (v->inputs[i].regid + 3) >> 2; 85 v->info.max_reg = MAX2(v->info.max_reg, regid); 86 } 87 } 88 for (i = 0; i < v->outputs_count; i++) { 89 int32_t regid = (v->outputs[i].regid + 3) >> 2; 90 v->info.max_reg = MAX2(v->info.max_reg, regid); 91 } 92 } else if (v->type == SHADER_FRAGMENT) { 93 /* NOTE: not sure how to turn pos_regid off.. but this could 94 * be, for example, r1.x while max reg used by the shader is 95 * r0.*, in which case we need to fixup the reg footprint: 96 */ 97 v->info.max_reg = MAX2(v->info.max_reg, v->pos_regid >> 2); 98 if (v->frag_coord) 99 debug_assert(v->info.max_reg >= 0); /* hard coded r0.x */ 100 if (v->frag_face) 101 debug_assert(v->info.max_half_reg >= 0); /* hr0.x */ 102 } 103 } 104 105 /* wrapper for ir3_assemble() which does some info fixup based on 106 * shader state. Non-static since used by ir3_cmdline too. 107 */ 108 void * ir3_shader_assemble(struct ir3_shader_variant *v, uint32_t gpu_id) 109 { 110 void *bin; 111 112 bin = ir3_assemble(v->ir, &v->info, gpu_id); 113 if (!bin) 114 return NULL; 115 116 if (gpu_id >= 400) { 117 v->instrlen = v->info.sizedwords / (2 * 16); 118 } else { 119 v->instrlen = v->info.sizedwords / (2 * 4); 120 } 121 122 /* NOTE: if relative addressing is used, we set constlen in 123 * the compiler (to worst-case value) since we don't know in 124 * the assembler what the max addr reg value can be: 125 */ 126 v->constlen = MIN2(255, MAX2(v->constlen, v->info.max_const + 1)); 127 128 fixup_regfootprint(v); 129 130 return bin; 131 } 132 133 static void 134 assemble_variant(struct ir3_shader_variant *v) 135 { 136 struct ir3_compiler *compiler = v->shader->compiler; 137 uint32_t gpu_id = compiler->gpu_id; 138 uint32_t sz, *bin; 139 140 bin = ir3_shader_assemble(v, gpu_id); 141 sz = v->info.sizedwords * 4; 142 143 v->bo = fd_bo_new(compiler->dev, sz, 144 DRM_FREEDRENO_GEM_CACHE_WCOMBINE | 145 DRM_FREEDRENO_GEM_TYPE_KMEM); 146 147 memcpy(fd_bo_map(v->bo), bin, sz); 148 149 if (fd_mesa_debug & FD_DBG_DISASM) { 150 struct ir3_shader_key key = v->key; 151 DBG("disassemble: type=%d, k={bp=%u,cts=%u,hp=%u}", v->type, 152 key.binning_pass, key.color_two_side, key.half_precision); 153 ir3_shader_disasm(v, bin); 154 } 155 156 free(bin); 157 158 /* no need to keep the ir around beyond this point: */ 159 ir3_destroy(v->ir); 160 v->ir = NULL; 161 } 162 163 static void 164 dump_shader_info(struct ir3_shader_variant *v, struct pipe_debug_callback *debug) 165 { 166 if (!unlikely(fd_mesa_debug & FD_DBG_SHADERDB)) 167 return; 168 169 pipe_debug_message(debug, SHADER_INFO, "\n" 170 "SHADER-DB: %s prog %d/%d: %u instructions, %u dwords\n" 171 "SHADER-DB: %s prog %d/%d: %u half, %u full\n" 172 "SHADER-DB: %s prog %d/%d: %u const, %u constlen\n", 173 ir3_shader_stage(v->shader), 174 v->shader->id, v->id, 175 v->info.instrs_count, 176 v->info.sizedwords, 177 ir3_shader_stage(v->shader), 178 v->shader->id, v->id, 179 v->info.max_half_reg + 1, 180 v->info.max_reg + 1, 181 ir3_shader_stage(v->shader), 182 v->shader->id, v->id, 183 v->info.max_const + 1, 184 v->constlen); 185 } 186 187 static struct ir3_shader_variant * 188 create_variant(struct ir3_shader *shader, struct ir3_shader_key key) 189 { 190 struct ir3_shader_variant *v = CALLOC_STRUCT(ir3_shader_variant); 191 int ret; 192 193 if (!v) 194 return NULL; 195 196 v->id = ++shader->variant_count; 197 v->shader = shader; 198 v->key = key; 199 v->type = shader->type; 200 201 ret = ir3_compile_shader_nir(shader->compiler, v); 202 if (ret) { 203 debug_error("compile failed!"); 204 goto fail; 205 } 206 207 assemble_variant(v); 208 if (!v->bo) { 209 debug_error("assemble failed!"); 210 goto fail; 211 } 212 213 return v; 214 215 fail: 216 delete_variant(v); 217 return NULL; 218 } 219 220 struct ir3_shader_variant * 221 ir3_shader_variant(struct ir3_shader *shader, struct ir3_shader_key key, 222 struct pipe_debug_callback *debug) 223 { 224 struct ir3_shader_variant *v; 225 226 /* some shader key values only apply to vertex or frag shader, 227 * so normalize the key to avoid constructing multiple identical 228 * variants: 229 */ 230 switch (shader->type) { 231 case SHADER_FRAGMENT: 232 key.binning_pass = false; 233 if (key.has_per_samp) { 234 key.vsaturate_s = 0; 235 key.vsaturate_t = 0; 236 key.vsaturate_r = 0; 237 key.vastc_srgb = 0; 238 } 239 break; 240 case SHADER_VERTEX: 241 key.color_two_side = false; 242 key.half_precision = false; 243 key.rasterflat = false; 244 if (key.has_per_samp) { 245 key.fsaturate_s = 0; 246 key.fsaturate_t = 0; 247 key.fsaturate_r = 0; 248 key.fastc_srgb = 0; 249 } 250 break; 251 default: 252 /* TODO */ 253 break; 254 } 255 256 for (v = shader->variants; v; v = v->next) 257 if (ir3_shader_key_equal(&key, &v->key)) 258 return v; 259 260 /* compile new variant if it doesn't exist already: */ 261 v = create_variant(shader, key); 262 if (v) { 263 v->next = shader->variants; 264 shader->variants = v; 265 dump_shader_info(v, debug); 266 } 267 268 return v; 269 } 270 271 272 void 273 ir3_shader_destroy(struct ir3_shader *shader) 274 { 275 struct ir3_shader_variant *v, *t; 276 for (v = shader->variants; v; ) { 277 t = v; 278 v = v->next; 279 delete_variant(t); 280 } 281 ralloc_free(shader->nir); 282 free(shader); 283 } 284 285 struct ir3_shader * 286 ir3_shader_create(struct ir3_compiler *compiler, 287 const struct pipe_shader_state *cso, enum shader_t type, 288 struct pipe_debug_callback *debug) 289 { 290 struct ir3_shader *shader = CALLOC_STRUCT(ir3_shader); 291 shader->compiler = compiler; 292 shader->id = ++shader->compiler->shader_count; 293 shader->type = type; 294 295 nir_shader *nir; 296 if (cso->type == PIPE_SHADER_IR_NIR) { 297 /* we take ownership of the reference: */ 298 nir = cso->ir.nir; 299 300 NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 301 (nir_lower_io_options)0); 302 } else { 303 debug_assert(cso->type == PIPE_SHADER_IR_TGSI); 304 if (fd_mesa_debug & FD_DBG_DISASM) { 305 DBG("dump tgsi: type=%d", shader->type); 306 tgsi_dump(cso->tokens, 0); 307 } 308 nir = ir3_tgsi_to_nir(cso->tokens); 309 } 310 /* do first pass optimization, ignoring the key: */ 311 shader->nir = ir3_optimize_nir(shader, nir, NULL); 312 if (fd_mesa_debug & FD_DBG_DISASM) { 313 DBG("dump nir%d: type=%d", shader->id, shader->type); 314 nir_print_shader(shader->nir, stdout); 315 } 316 317 shader->stream_output = cso->stream_output; 318 if (fd_mesa_debug & FD_DBG_SHADERDB) { 319 /* if shader-db run, create a standard variant immediately 320 * (as otherwise nothing will trigger the shader to be 321 * actually compiled) 322 */ 323 static struct ir3_shader_key key; 324 memset(&key, 0, sizeof(key)); 325 ir3_shader_variant(shader, key, debug); 326 } 327 return shader; 328 } 329 330 /* a bit annoying that compute-shader and normal shader state objects 331 * aren't a bit more aligned. 332 */ 333 struct ir3_shader * 334 ir3_shader_create_compute(struct ir3_compiler *compiler, 335 const struct pipe_compute_state *cso, 336 struct pipe_debug_callback *debug) 337 { 338 struct ir3_shader *shader = CALLOC_STRUCT(ir3_shader); 339 340 shader->compiler = compiler; 341 shader->id = ++shader->compiler->shader_count; 342 shader->type = SHADER_COMPUTE; 343 344 nir_shader *nir; 345 if (cso->ir_type == PIPE_SHADER_IR_NIR) { 346 /* we take ownership of the reference: */ 347 nir = (nir_shader *)cso->prog; 348 349 NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 350 (nir_lower_io_options)0); 351 } else { 352 debug_assert(cso->ir_type == PIPE_SHADER_IR_TGSI); 353 if (fd_mesa_debug & FD_DBG_DISASM) { 354 DBG("dump tgsi: type=%d", shader->type); 355 tgsi_dump(cso->prog, 0); 356 } 357 nir = ir3_tgsi_to_nir(cso->prog); 358 } 359 360 /* do first pass optimization, ignoring the key: */ 361 shader->nir = ir3_optimize_nir(shader, nir, NULL); 362 if (fd_mesa_debug & FD_DBG_DISASM) { 363 DBG("dump nir%d: type=%d", shader->id, shader->type); 364 nir_print_shader(shader->nir, stdout); 365 } 366 367 return shader; 368 } 369 370 static void dump_reg(const char *name, uint32_t r) 371 { 372 if (r != regid(63,0)) 373 debug_printf("; %s: r%d.%c\n", name, r >> 2, "xyzw"[r & 0x3]); 374 } 375 376 static void dump_output(struct ir3_shader_variant *so, 377 unsigned slot, const char *name) 378 { 379 uint32_t regid; 380 regid = ir3_find_output_regid(so, slot); 381 dump_reg(name, regid); 382 } 383 384 void 385 ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin) 386 { 387 struct ir3 *ir = so->ir; 388 struct ir3_register *reg; 389 const char *type = ir3_shader_stage(so->shader); 390 uint8_t regid; 391 unsigned i; 392 393 for (i = 0; i < ir->ninputs; i++) { 394 if (!ir->inputs[i]) { 395 debug_printf("; in%d unused\n", i); 396 continue; 397 } 398 reg = ir->inputs[i]->regs[0]; 399 regid = reg->num; 400 debug_printf("@in(%sr%d.%c)\tin%d\n", 401 (reg->flags & IR3_REG_HALF) ? "h" : "", 402 (regid >> 2), "xyzw"[regid & 0x3], i); 403 } 404 405 for (i = 0; i < ir->noutputs; i++) { 406 if (!ir->outputs[i]) { 407 debug_printf("; out%d unused\n", i); 408 continue; 409 } 410 /* kill shows up as a virtual output.. skip it! */ 411 if (is_kill(ir->outputs[i])) 412 continue; 413 reg = ir->outputs[i]->regs[0]; 414 regid = reg->num; 415 debug_printf("@out(%sr%d.%c)\tout%d\n", 416 (reg->flags & IR3_REG_HALF) ? "h" : "", 417 (regid >> 2), "xyzw"[regid & 0x3], i); 418 } 419 420 for (i = 0; i < so->immediates_count; i++) { 421 debug_printf("@const(c%d.x)\t", so->constbase.immediate + i); 422 debug_printf("0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 423 so->immediates[i].val[0], 424 so->immediates[i].val[1], 425 so->immediates[i].val[2], 426 so->immediates[i].val[3]); 427 } 428 429 disasm_a3xx(bin, so->info.sizedwords, 0, so->type); 430 431 switch (so->type) { 432 case SHADER_VERTEX: 433 debug_printf("; %s: outputs:", type); 434 for (i = 0; i < so->outputs_count; i++) { 435 uint8_t regid = so->outputs[i].regid; 436 debug_printf(" r%d.%c (%s)", 437 (regid >> 2), "xyzw"[regid & 0x3], 438 gl_varying_slot_name(so->outputs[i].slot)); 439 } 440 debug_printf("\n"); 441 debug_printf("; %s: inputs:", type); 442 for (i = 0; i < so->inputs_count; i++) { 443 uint8_t regid = so->inputs[i].regid; 444 debug_printf(" r%d.%c (cm=%x,il=%u,b=%u)", 445 (regid >> 2), "xyzw"[regid & 0x3], 446 so->inputs[i].compmask, 447 so->inputs[i].inloc, 448 so->inputs[i].bary); 449 } 450 debug_printf("\n"); 451 break; 452 case SHADER_FRAGMENT: 453 debug_printf("; %s: outputs:", type); 454 for (i = 0; i < so->outputs_count; i++) { 455 uint8_t regid = so->outputs[i].regid; 456 debug_printf(" r%d.%c (%s)", 457 (regid >> 2), "xyzw"[regid & 0x3], 458 gl_frag_result_name(so->outputs[i].slot)); 459 } 460 debug_printf("\n"); 461 debug_printf("; %s: inputs:", type); 462 for (i = 0; i < so->inputs_count; i++) { 463 uint8_t regid = so->inputs[i].regid; 464 debug_printf(" r%d.%c (%s,cm=%x,il=%u,b=%u)", 465 (regid >> 2), "xyzw"[regid & 0x3], 466 gl_varying_slot_name(so->inputs[i].slot), 467 so->inputs[i].compmask, 468 so->inputs[i].inloc, 469 so->inputs[i].bary); 470 } 471 debug_printf("\n"); 472 break; 473 default: 474 /* TODO */ 475 break; 476 } 477 478 /* print generic shader info: */ 479 debug_printf("; %s prog %d/%d: %u instructions, %d half, %d full\n", 480 type, so->shader->id, so->id, 481 so->info.instrs_count, 482 so->info.max_half_reg + 1, 483 so->info.max_reg + 1); 484 485 debug_printf("; %d const, %u constlen\n", 486 so->info.max_const + 1, 487 so->constlen); 488 489 /* print shader type specific info: */ 490 switch (so->type) { 491 case SHADER_VERTEX: 492 dump_output(so, VARYING_SLOT_POS, "pos"); 493 dump_output(so, VARYING_SLOT_PSIZ, "psize"); 494 break; 495 case SHADER_FRAGMENT: 496 dump_reg("pos (bary)", so->pos_regid); 497 dump_output(so, FRAG_RESULT_DEPTH, "posz"); 498 if (so->color0_mrt) { 499 dump_output(so, FRAG_RESULT_COLOR, "color"); 500 } else { 501 dump_output(so, FRAG_RESULT_DATA0, "data0"); 502 dump_output(so, FRAG_RESULT_DATA1, "data1"); 503 dump_output(so, FRAG_RESULT_DATA2, "data2"); 504 dump_output(so, FRAG_RESULT_DATA3, "data3"); 505 dump_output(so, FRAG_RESULT_DATA4, "data4"); 506 dump_output(so, FRAG_RESULT_DATA5, "data5"); 507 dump_output(so, FRAG_RESULT_DATA6, "data6"); 508 dump_output(so, FRAG_RESULT_DATA7, "data7"); 509 } 510 /* these two are hard-coded since we don't know how to 511 * program them to anything but all 0's... 512 */ 513 if (so->frag_coord) 514 debug_printf("; fragcoord: r0.x\n"); 515 if (so->frag_face) 516 debug_printf("; fragface: hr0.x\n"); 517 break; 518 default: 519 /* TODO */ 520 break; 521 } 522 523 debug_printf("\n"); 524 } 525 526 uint64_t 527 ir3_shader_outputs(const struct ir3_shader *so) 528 { 529 return so->nir->info.outputs_written; 530 } 531 532 /* This has to reach into the fd_context a bit more than the rest of 533 * ir3, but it needs to be aligned with the compiler, so both agree 534 * on which const regs hold what. And the logic is identical between 535 * a3xx/a4xx, the only difference is small details in the actual 536 * CP_LOAD_STATE packets (which is handled inside the generation 537 * specific ctx->emit_const(_bo)() fxns) 538 */ 539 540 #include "freedreno_resource.h" 541 542 static void 543 emit_user_consts(struct fd_context *ctx, const struct ir3_shader_variant *v, 544 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf) 545 { 546 const unsigned index = 0; /* user consts are index 0 */ 547 /* TODO save/restore dirty_mask for binning pass instead: */ 548 uint32_t dirty_mask = constbuf->enabled_mask; 549 550 if (dirty_mask & (1 << index)) { 551 struct pipe_constant_buffer *cb = &constbuf->cb[index]; 552 unsigned size = align(cb->buffer_size, 4) / 4; /* size in dwords */ 553 554 /* in particular, with binning shader we may end up with 555 * unused consts, ie. we could end up w/ constlen that is 556 * smaller than first_driver_param. In that case truncate 557 * the user consts early to avoid HLSQ lockup caused by 558 * writing too many consts 559 */ 560 uint32_t max_const = MIN2(v->num_uniforms, v->constlen); 561 562 // I expect that size should be a multiple of vec4's: 563 assert(size == align(size, 4)); 564 565 /* and even if the start of the const buffer is before 566 * first_immediate, the end may not be: 567 */ 568 size = MIN2(size, 4 * max_const); 569 570 if (size > 0) { 571 fd_wfi(ctx->batch, ring); 572 ctx->emit_const(ring, v->type, 0, 573 cb->buffer_offset, size, 574 cb->user_buffer, cb->buffer); 575 constbuf->dirty_mask &= ~(1 << index); 576 } 577 } 578 } 579 580 static void 581 emit_ubos(struct fd_context *ctx, const struct ir3_shader_variant *v, 582 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf) 583 { 584 uint32_t offset = v->constbase.ubo; 585 if (v->constlen > offset) { 586 uint32_t params = v->num_ubos; 587 uint32_t offsets[params]; 588 struct pipe_resource *prscs[params]; 589 590 for (uint32_t i = 0; i < params; i++) { 591 const uint32_t index = i + 1; /* UBOs start at index 1 */ 592 struct pipe_constant_buffer *cb = &constbuf->cb[index]; 593 assert(!cb->user_buffer); 594 595 if ((constbuf->enabled_mask & (1 << index)) && cb->buffer) { 596 offsets[i] = cb->buffer_offset; 597 prscs[i] = cb->buffer; 598 } else { 599 offsets[i] = 0; 600 prscs[i] = NULL; 601 } 602 } 603 604 fd_wfi(ctx->batch, ring); 605 ctx->emit_const_bo(ring, v->type, false, offset * 4, params, prscs, offsets); 606 } 607 } 608 609 static void 610 emit_ssbo_sizes(struct fd_context *ctx, const struct ir3_shader_variant *v, 611 struct fd_ringbuffer *ring, struct fd_shaderbuf_stateobj *sb) 612 { 613 uint32_t offset = v->constbase.ssbo_sizes; 614 if (v->constlen > offset) { 615 uint32_t sizes[align(v->const_layout.ssbo_size.count, 4)]; 616 unsigned mask = v->const_layout.ssbo_size.mask; 617 618 while (mask) { 619 unsigned index = u_bit_scan(&mask); 620 unsigned off = v->const_layout.ssbo_size.off[index]; 621 sizes[off] = sb->sb[index].buffer_size; 622 } 623 624 fd_wfi(ctx->batch, ring); 625 ctx->emit_const(ring, v->type, offset * 4, 626 0, ARRAY_SIZE(sizes), sizes, NULL); 627 } 628 } 629 630 static void 631 emit_image_dims(struct fd_context *ctx, const struct ir3_shader_variant *v, 632 struct fd_ringbuffer *ring, struct fd_shaderimg_stateobj *si) 633 { 634 uint32_t offset = v->constbase.image_dims; 635 if (v->constlen > offset) { 636 uint32_t dims[align(v->const_layout.image_dims.count, 4)]; 637 unsigned mask = v->const_layout.image_dims.mask; 638 639 while (mask) { 640 struct pipe_image_view *img; 641 struct fd_resource *rsc; 642 unsigned index = u_bit_scan(&mask); 643 unsigned off = v->const_layout.image_dims.off[index]; 644 645 img = &si->si[index]; 646 rsc = fd_resource(img->resource); 647 648 dims[off + 0] = rsc->cpp; 649 if (img->resource->target != PIPE_BUFFER) { 650 unsigned lvl = img->u.tex.level; 651 dims[off + 1] = rsc->slices[lvl].pitch * rsc->cpp; 652 dims[off + 2] = rsc->slices[lvl].size0; 653 } 654 } 655 656 fd_wfi(ctx->batch, ring); 657 ctx->emit_const(ring, v->type, offset * 4, 658 0, ARRAY_SIZE(dims), dims, NULL); 659 } 660 } 661 662 static void 663 emit_immediates(struct fd_context *ctx, const struct ir3_shader_variant *v, 664 struct fd_ringbuffer *ring) 665 { 666 int size = v->immediates_count; 667 uint32_t base = v->constbase.immediate; 668 669 /* truncate size to avoid writing constants that shader 670 * does not use: 671 */ 672 size = MIN2(size + base, v->constlen) - base; 673 674 /* convert out of vec4: */ 675 base *= 4; 676 size *= 4; 677 678 if (size > 0) { 679 fd_wfi(ctx->batch, ring); 680 ctx->emit_const(ring, v->type, base, 681 0, size, v->immediates[0].val, NULL); 682 } 683 } 684 685 /* emit stream-out buffers: */ 686 static void 687 emit_tfbos(struct fd_context *ctx, const struct ir3_shader_variant *v, 688 struct fd_ringbuffer *ring) 689 { 690 /* streamout addresses after driver-params: */ 691 uint32_t offset = v->constbase.tfbo; 692 if (v->constlen > offset) { 693 struct fd_streamout_stateobj *so = &ctx->streamout; 694 struct pipe_stream_output_info *info = &v->shader->stream_output; 695 uint32_t params = 4; 696 uint32_t offsets[params]; 697 struct pipe_resource *prscs[params]; 698 699 for (uint32_t i = 0; i < params; i++) { 700 struct pipe_stream_output_target *target = so->targets[i]; 701 702 if (target) { 703 offsets[i] = (so->offsets[i] * info->stride[i] * 4) + 704 target->buffer_offset; 705 prscs[i] = target->buffer; 706 } else { 707 offsets[i] = 0; 708 prscs[i] = NULL; 709 } 710 } 711 712 fd_wfi(ctx->batch, ring); 713 ctx->emit_const_bo(ring, v->type, true, offset * 4, params, prscs, offsets); 714 } 715 } 716 717 static uint32_t 718 max_tf_vtx(struct fd_context *ctx, const struct ir3_shader_variant *v) 719 { 720 struct fd_streamout_stateobj *so = &ctx->streamout; 721 struct pipe_stream_output_info *info = &v->shader->stream_output; 722 uint32_t maxvtxcnt = 0x7fffffff; 723 724 if (ctx->screen->gpu_id >= 500) 725 return 0; 726 if (v->key.binning_pass) 727 return 0; 728 if (v->shader->stream_output.num_outputs == 0) 729 return 0; 730 if (so->num_targets == 0) 731 return 0; 732 733 /* offset to write to is: 734 * 735 * total_vtxcnt = vtxcnt + offsets[i] 736 * offset = total_vtxcnt * stride[i] 737 * 738 * offset = vtxcnt * stride[i] ; calculated in shader 739 * + offsets[i] * stride[i] ; calculated at emit_tfbos() 740 * 741 * assuming for each vtx, each target buffer will have data written 742 * up to 'offset + stride[i]', that leaves maxvtxcnt as: 743 * 744 * buffer_size = (maxvtxcnt * stride[i]) + stride[i] 745 * maxvtxcnt = (buffer_size - stride[i]) / stride[i] 746 * 747 * but shader is actually doing a less-than (rather than less-than- 748 * equal) check, so we can drop the -stride[i]. 749 * 750 * TODO is assumption about `offset + stride[i]` legit? 751 */ 752 for (unsigned i = 0; i < so->num_targets; i++) { 753 struct pipe_stream_output_target *target = so->targets[i]; 754 unsigned stride = info->stride[i] * 4; /* convert dwords->bytes */ 755 if (target) { 756 uint32_t max = target->buffer_size / stride; 757 maxvtxcnt = MIN2(maxvtxcnt, max); 758 } 759 } 760 761 return maxvtxcnt; 762 } 763 764 static void 765 emit_common_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring, 766 struct fd_context *ctx, enum pipe_shader_type t) 767 { 768 enum fd_dirty_shader_state dirty = ctx->dirty_shader[t]; 769 770 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_CONST)) { 771 struct fd_constbuf_stateobj *constbuf; 772 bool shader_dirty; 773 774 constbuf = &ctx->constbuf[t]; 775 shader_dirty = !!(dirty & FD_DIRTY_SHADER_PROG); 776 777 emit_user_consts(ctx, v, ring, constbuf); 778 emit_ubos(ctx, v, ring, constbuf); 779 if (shader_dirty) 780 emit_immediates(ctx, v, ring); 781 } 782 783 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_SSBO)) { 784 struct fd_shaderbuf_stateobj *sb = &ctx->shaderbuf[t]; 785 emit_ssbo_sizes(ctx, v, ring, sb); 786 } 787 788 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_IMAGE)) { 789 struct fd_shaderimg_stateobj *si = &ctx->shaderimg[t]; 790 emit_image_dims(ctx, v, ring, si); 791 } 792 } 793 794 void 795 ir3_emit_vs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring, 796 struct fd_context *ctx, const struct pipe_draw_info *info) 797 { 798 debug_assert(v->type == SHADER_VERTEX); 799 800 emit_common_consts(v, ring, ctx, PIPE_SHADER_VERTEX); 801 802 /* emit driver params every time: */ 803 /* TODO skip emit if shader doesn't use driver params to avoid WFI.. */ 804 if (info) { 805 uint32_t offset = v->constbase.driver_param; 806 if (v->constlen > offset) { 807 uint32_t vertex_params[IR3_DP_VS_COUNT] = { 808 [IR3_DP_VTXID_BASE] = info->index_size ? 809 info->index_bias : info->start, 810 [IR3_DP_VTXCNT_MAX] = max_tf_vtx(ctx, v), 811 }; 812 /* if no user-clip-planes, we don't need to emit the 813 * entire thing: 814 */ 815 uint32_t vertex_params_size = 4; 816 817 if (v->key.ucp_enables) { 818 struct pipe_clip_state *ucp = &ctx->ucp; 819 unsigned pos = IR3_DP_UCP0_X; 820 for (unsigned i = 0; pos <= IR3_DP_UCP7_W; i++) { 821 for (unsigned j = 0; j < 4; j++) { 822 vertex_params[pos] = fui(ucp->ucp[i][j]); 823 pos++; 824 } 825 } 826 vertex_params_size = ARRAY_SIZE(vertex_params); 827 } 828 829 fd_wfi(ctx->batch, ring); 830 831 bool needs_vtxid_base = 832 ir3_find_sysval_regid(v, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) != regid(63, 0); 833 834 /* for indirect draw, we need to copy VTXID_BASE from 835 * indirect-draw parameters buffer.. which is annoying 836 * and means we can't easily emit these consts in cmd 837 * stream so need to copy them to bo. 838 */ 839 if (info->indirect && needs_vtxid_base) { 840 struct pipe_draw_indirect_info *indirect = info->indirect; 841 struct pipe_resource *vertex_params_rsc = 842 pipe_buffer_create(&ctx->screen->base, 843 PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STREAM, 844 vertex_params_size * 4); 845 unsigned src_off = info->indirect->offset;; 846 void *ptr; 847 848 ptr = fd_bo_map(fd_resource(vertex_params_rsc)->bo); 849 memcpy(ptr, vertex_params, vertex_params_size * 4); 850 851 if (info->index_size) { 852 /* indexed draw, index_bias is 4th field: */ 853 src_off += 3 * 4; 854 } else { 855 /* non-indexed draw, start is 3rd field: */ 856 src_off += 2 * 4; 857 } 858 859 /* copy index_bias or start from draw params: */ 860 ctx->mem_to_mem(ring, vertex_params_rsc, 0, 861 indirect->buffer, src_off, 1); 862 863 ctx->emit_const(ring, SHADER_VERTEX, offset * 4, 0, 864 vertex_params_size, NULL, vertex_params_rsc); 865 866 pipe_resource_reference(&vertex_params_rsc, NULL); 867 } else { 868 ctx->emit_const(ring, SHADER_VERTEX, offset * 4, 0, 869 vertex_params_size, vertex_params, NULL); 870 } 871 872 /* if needed, emit stream-out buffer addresses: */ 873 if (vertex_params[IR3_DP_VTXCNT_MAX] > 0) { 874 emit_tfbos(ctx, v, ring); 875 } 876 } 877 } 878 } 879 880 void 881 ir3_emit_fs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring, 882 struct fd_context *ctx) 883 { 884 debug_assert(v->type == SHADER_FRAGMENT); 885 886 emit_common_consts(v, ring, ctx, PIPE_SHADER_FRAGMENT); 887 } 888 889 /* emit compute-shader consts: */ 890 void 891 ir3_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring, 892 struct fd_context *ctx, const struct pipe_grid_info *info) 893 { 894 debug_assert(v->type == SHADER_COMPUTE); 895 896 emit_common_consts(v, ring, ctx, PIPE_SHADER_COMPUTE); 897 898 /* emit compute-shader driver-params: */ 899 uint32_t offset = v->constbase.driver_param; 900 if (v->constlen > offset) { 901 fd_wfi(ctx->batch, ring); 902 903 if (info->indirect) { 904 struct pipe_resource *indirect = NULL; 905 unsigned indirect_offset; 906 907 /* This is a bit awkward, but CP_LOAD_STATE.EXT_SRC_ADDR needs 908 * to be aligned more strongly than 4 bytes. So in this case 909 * we need a temporary buffer to copy NumWorkGroups.xyz to. 910 * 911 * TODO if previous compute job is writing to info->indirect, 912 * we might need a WFI.. but since we currently flush for each 913 * compute job, we are probably ok for now. 914 */ 915 if (info->indirect_offset & 0xf) { 916 indirect = pipe_buffer_create(&ctx->screen->base, 917 PIPE_BIND_COMMAND_ARGS_BUFFER, PIPE_USAGE_STREAM, 918 0x1000); 919 indirect_offset = 0; 920 921 ctx->mem_to_mem(ring, indirect, 0, info->indirect, 922 info->indirect_offset, 3); 923 } else { 924 pipe_resource_reference(&indirect, info->indirect); 925 indirect_offset = info->indirect_offset; 926 } 927 928 ctx->emit_const(ring, SHADER_COMPUTE, offset * 4, 929 indirect_offset, 4, NULL, indirect); 930 931 pipe_resource_reference(&indirect, NULL); 932 } else { 933 uint32_t compute_params[IR3_DP_CS_COUNT] = { 934 [IR3_DP_NUM_WORK_GROUPS_X] = info->grid[0], 935 [IR3_DP_NUM_WORK_GROUPS_Y] = info->grid[1], 936 [IR3_DP_NUM_WORK_GROUPS_Z] = info->grid[2], 937 /* do we need work-group-size? */ 938 }; 939 940 ctx->emit_const(ring, SHADER_COMPUTE, offset * 4, 0, 941 ARRAY_SIZE(compute_params), compute_params, NULL); 942 } 943 } 944 } 945