1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */ 2 3 /* 4 * Copyright (C) 2014 Rob Clark <robclark (at) freedesktop.org> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 * SOFTWARE. 24 * 25 * Authors: 26 * Rob Clark <robclark (at) freedesktop.org> 27 */ 28 29 #include "pipe/p_state.h" 30 #include "util/u_string.h" 31 #include "util/u_memory.h" 32 #include "util/u_inlines.h" 33 #include "util/u_format.h" 34 #include "tgsi/tgsi_dump.h" 35 #include "tgsi/tgsi_parse.h" 36 37 #include "freedreno_context.h" 38 #include "freedreno_util.h" 39 40 #include "ir3_shader.h" 41 #include "ir3_compiler.h" 42 #include "ir3_nir.h" 43 44 static void 45 delete_variant(struct ir3_shader_variant *v) 46 { 47 if (v->ir) 48 ir3_destroy(v->ir); 49 if (v->bo) 50 fd_bo_del(v->bo); 51 free(v); 52 } 53 54 /* for vertex shader, the inputs are loaded into registers before the shader 55 * is executed, so max_regs from the shader instructions might not properly 56 * reflect the # of registers actually used, especially in case passthrough 57 * varyings. 58 * 59 * Likewise, for fragment shader, we can have some regs which are passed 60 * input values but never touched by the resulting shader (ie. as result 61 * of dead code elimination or simply because we don't know how to turn 62 * the reg off. 63 */ 64 static void 65 fixup_regfootprint(struct ir3_shader_variant *v) 66 { 67 if (v->type == SHADER_VERTEX) { 68 unsigned i; 69 for (i = 0; i < v->inputs_count; i++) { 70 /* skip frag inputs fetch via bary.f since their reg's are 71 * not written by gpu before shader starts (and in fact the 72 * regid's might not even be valid) 73 */ 74 if (v->inputs[i].bary) 75 continue; 76 77 if (v->inputs[i].compmask) { 78 int32_t regid = (v->inputs[i].regid + 3) >> 2; 79 v->info.max_reg = MAX2(v->info.max_reg, regid); 80 } 81 } 82 for (i = 0; i < v->outputs_count; i++) { 83 int32_t regid = (v->outputs[i].regid + 3) >> 2; 84 v->info.max_reg = MAX2(v->info.max_reg, regid); 85 } 86 } else if (v->type == SHADER_FRAGMENT) { 87 /* NOTE: not sure how to turn pos_regid off.. but this could 88 * be, for example, r1.x while max reg used by the shader is 89 * r0.*, in which case we need to fixup the reg footprint: 90 */ 91 v->info.max_reg = MAX2(v->info.max_reg, v->pos_regid >> 2); 92 if (v->frag_coord) 93 debug_assert(v->info.max_reg >= 0); /* hard coded r0.x */ 94 if (v->frag_face) 95 debug_assert(v->info.max_half_reg >= 0); /* hr0.x */ 96 } 97 } 98 99 /* wrapper for ir3_assemble() which does some info fixup based on 100 * shader state. Non-static since used by ir3_cmdline too. 101 */ 102 void * ir3_shader_assemble(struct ir3_shader_variant *v, uint32_t gpu_id) 103 { 104 void *bin; 105 106 bin = ir3_assemble(v->ir, &v->info, gpu_id); 107 if (!bin) 108 return NULL; 109 110 if (gpu_id >= 400) { 111 v->instrlen = v->info.sizedwords / (2 * 16); 112 } else { 113 v->instrlen = v->info.sizedwords / (2 * 4); 114 } 115 116 /* NOTE: if relative addressing is used, we set constlen in 117 * the compiler (to worst-case value) since we don't know in 118 * the assembler what the max addr reg value can be: 119 */ 120 v->constlen = MIN2(255, MAX2(v->constlen, v->info.max_const + 1)); 121 122 fixup_regfootprint(v); 123 124 return bin; 125 } 126 127 static void 128 assemble_variant(struct ir3_shader_variant *v) 129 { 130 struct ir3_compiler *compiler = v->shader->compiler; 131 uint32_t gpu_id = compiler->gpu_id; 132 uint32_t sz, *bin; 133 134 bin = ir3_shader_assemble(v, gpu_id); 135 sz = v->info.sizedwords * 4; 136 137 v->bo = fd_bo_new(compiler->dev, sz, 138 DRM_FREEDRENO_GEM_CACHE_WCOMBINE | 139 DRM_FREEDRENO_GEM_TYPE_KMEM); 140 141 memcpy(fd_bo_map(v->bo), bin, sz); 142 143 if (fd_mesa_debug & FD_DBG_DISASM) { 144 struct ir3_shader_key key = v->key; 145 DBG("disassemble: type=%d, k={bp=%u,cts=%u,hp=%u}", v->type, 146 key.binning_pass, key.color_two_side, key.half_precision); 147 ir3_shader_disasm(v, bin); 148 } 149 150 free(bin); 151 152 /* no need to keep the ir around beyond this point: */ 153 ir3_destroy(v->ir); 154 v->ir = NULL; 155 } 156 157 static void 158 dump_shader_info(struct ir3_shader_variant *v, struct pipe_debug_callback *debug) 159 { 160 if (!unlikely(fd_mesa_debug & FD_DBG_SHADERDB)) 161 return; 162 163 pipe_debug_message(debug, SHADER_INFO, "\n" 164 "SHADER-DB: %s prog %d/%d: %u instructions, %u dwords\n" 165 "SHADER-DB: %s prog %d/%d: %u half, %u full\n" 166 "SHADER-DB: %s prog %d/%d: %u const, %u constlen\n", 167 ir3_shader_stage(v->shader), 168 v->shader->id, v->id, 169 v->info.instrs_count, 170 v->info.sizedwords, 171 ir3_shader_stage(v->shader), 172 v->shader->id, v->id, 173 v->info.max_half_reg + 1, 174 v->info.max_reg + 1, 175 ir3_shader_stage(v->shader), 176 v->shader->id, v->id, 177 v->info.max_const + 1, 178 v->constlen); 179 } 180 181 static struct ir3_shader_variant * 182 create_variant(struct ir3_shader *shader, struct ir3_shader_key key) 183 { 184 struct ir3_shader_variant *v = CALLOC_STRUCT(ir3_shader_variant); 185 int ret; 186 187 if (!v) 188 return NULL; 189 190 v->id = ++shader->variant_count; 191 v->shader = shader; 192 v->key = key; 193 v->type = shader->type; 194 195 ret = ir3_compile_shader_nir(shader->compiler, v); 196 if (ret) { 197 debug_error("compile failed!"); 198 goto fail; 199 } 200 201 assemble_variant(v); 202 if (!v->bo) { 203 debug_error("assemble failed!"); 204 goto fail; 205 } 206 207 return v; 208 209 fail: 210 delete_variant(v); 211 return NULL; 212 } 213 214 struct ir3_shader_variant * 215 ir3_shader_variant(struct ir3_shader *shader, struct ir3_shader_key key, 216 struct pipe_debug_callback *debug) 217 { 218 struct ir3_shader_variant *v; 219 220 /* some shader key values only apply to vertex or frag shader, 221 * so normalize the key to avoid constructing multiple identical 222 * variants: 223 */ 224 switch (shader->type) { 225 case SHADER_FRAGMENT: 226 case SHADER_COMPUTE: 227 key.binning_pass = false; 228 if (key.has_per_samp) { 229 key.vsaturate_s = 0; 230 key.vsaturate_t = 0; 231 key.vsaturate_r = 0; 232 key.vastc_srgb = 0; 233 } 234 break; 235 case SHADER_VERTEX: 236 key.color_two_side = false; 237 key.half_precision = false; 238 key.rasterflat = false; 239 if (key.has_per_samp) { 240 key.fsaturate_s = 0; 241 key.fsaturate_t = 0; 242 key.fsaturate_r = 0; 243 key.fastc_srgb = 0; 244 } 245 break; 246 } 247 248 for (v = shader->variants; v; v = v->next) 249 if (ir3_shader_key_equal(&key, &v->key)) 250 return v; 251 252 /* compile new variant if it doesn't exist already: */ 253 v = create_variant(shader, key); 254 if (v) { 255 v->next = shader->variants; 256 shader->variants = v; 257 dump_shader_info(v, debug); 258 } 259 260 return v; 261 } 262 263 264 void 265 ir3_shader_destroy(struct ir3_shader *shader) 266 { 267 struct ir3_shader_variant *v, *t; 268 for (v = shader->variants; v; ) { 269 t = v; 270 v = v->next; 271 delete_variant(t); 272 } 273 ralloc_free(shader->nir); 274 free(shader); 275 } 276 277 struct ir3_shader * 278 ir3_shader_create(struct ir3_compiler *compiler, 279 const struct pipe_shader_state *cso, enum shader_t type, 280 struct pipe_debug_callback *debug) 281 { 282 struct ir3_shader *shader = CALLOC_STRUCT(ir3_shader); 283 shader->compiler = compiler; 284 shader->id = ++shader->compiler->shader_count; 285 shader->type = type; 286 287 nir_shader *nir; 288 if (cso->type == PIPE_SHADER_IR_NIR) { 289 /* we take ownership of the reference: */ 290 nir = cso->ir.nir; 291 } else { 292 if (fd_mesa_debug & FD_DBG_DISASM) { 293 DBG("dump tgsi: type=%d", shader->type); 294 tgsi_dump(cso->tokens, 0); 295 } 296 nir = ir3_tgsi_to_nir(cso->tokens); 297 } 298 /* do first pass optimization, ignoring the key: */ 299 shader->nir = ir3_optimize_nir(shader, nir, NULL); 300 if (fd_mesa_debug & FD_DBG_DISASM) { 301 DBG("dump nir%d: type=%d", shader->id, shader->type); 302 nir_print_shader(shader->nir, stdout); 303 } 304 305 shader->stream_output = cso->stream_output; 306 if (fd_mesa_debug & FD_DBG_SHADERDB) { 307 /* if shader-db run, create a standard variant immediately 308 * (as otherwise nothing will trigger the shader to be 309 * actually compiled) 310 */ 311 static struct ir3_shader_key key; 312 memset(&key, 0, sizeof(key)); 313 ir3_shader_variant(shader, key, debug); 314 } 315 return shader; 316 } 317 318 static void dump_reg(const char *name, uint32_t r) 319 { 320 if (r != regid(63,0)) 321 debug_printf("; %s: r%d.%c\n", name, r >> 2, "xyzw"[r & 0x3]); 322 } 323 324 static void dump_output(struct ir3_shader_variant *so, 325 unsigned slot, const char *name) 326 { 327 uint32_t regid; 328 regid = ir3_find_output_regid(so, slot); 329 dump_reg(name, regid); 330 } 331 332 void 333 ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin) 334 { 335 struct ir3 *ir = so->ir; 336 struct ir3_register *reg; 337 const char *type = ir3_shader_stage(so->shader); 338 uint8_t regid; 339 unsigned i; 340 341 for (i = 0; i < ir->ninputs; i++) { 342 if (!ir->inputs[i]) { 343 debug_printf("; in%d unused\n", i); 344 continue; 345 } 346 reg = ir->inputs[i]->regs[0]; 347 regid = reg->num; 348 debug_printf("@in(%sr%d.%c)\tin%d\n", 349 (reg->flags & IR3_REG_HALF) ? "h" : "", 350 (regid >> 2), "xyzw"[regid & 0x3], i); 351 } 352 353 for (i = 0; i < ir->noutputs; i++) { 354 if (!ir->outputs[i]) { 355 debug_printf("; out%d unused\n", i); 356 continue; 357 } 358 /* kill shows up as a virtual output.. skip it! */ 359 if (is_kill(ir->outputs[i])) 360 continue; 361 reg = ir->outputs[i]->regs[0]; 362 regid = reg->num; 363 debug_printf("@out(%sr%d.%c)\tout%d\n", 364 (reg->flags & IR3_REG_HALF) ? "h" : "", 365 (regid >> 2), "xyzw"[regid & 0x3], i); 366 } 367 368 for (i = 0; i < so->immediates_count; i++) { 369 debug_printf("@const(c%d.x)\t", so->constbase.immediate + i); 370 debug_printf("0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 371 so->immediates[i].val[0], 372 so->immediates[i].val[1], 373 so->immediates[i].val[2], 374 so->immediates[i].val[3]); 375 } 376 377 disasm_a3xx(bin, so->info.sizedwords, 0, so->type); 378 379 switch (so->type) { 380 case SHADER_VERTEX: 381 debug_printf("; %s: outputs:", type); 382 for (i = 0; i < so->outputs_count; i++) { 383 uint8_t regid = so->outputs[i].regid; 384 debug_printf(" r%d.%c (%s)", 385 (regid >> 2), "xyzw"[regid & 0x3], 386 gl_varying_slot_name(so->outputs[i].slot)); 387 } 388 debug_printf("\n"); 389 debug_printf("; %s: inputs:", type); 390 for (i = 0; i < so->inputs_count; i++) { 391 uint8_t regid = so->inputs[i].regid; 392 debug_printf(" r%d.%c (cm=%x,il=%u,b=%u)", 393 (regid >> 2), "xyzw"[regid & 0x3], 394 so->inputs[i].compmask, 395 so->inputs[i].inloc, 396 so->inputs[i].bary); 397 } 398 debug_printf("\n"); 399 break; 400 case SHADER_FRAGMENT: 401 debug_printf("; %s: outputs:", type); 402 for (i = 0; i < so->outputs_count; i++) { 403 uint8_t regid = so->outputs[i].regid; 404 debug_printf(" r%d.%c (%s)", 405 (regid >> 2), "xyzw"[regid & 0x3], 406 gl_frag_result_name(so->outputs[i].slot)); 407 } 408 debug_printf("\n"); 409 debug_printf("; %s: inputs:", type); 410 for (i = 0; i < so->inputs_count; i++) { 411 uint8_t regid = so->inputs[i].regid; 412 debug_printf(" r%d.%c (%s,cm=%x,il=%u,b=%u)", 413 (regid >> 2), "xyzw"[regid & 0x3], 414 gl_varying_slot_name(so->inputs[i].slot), 415 so->inputs[i].compmask, 416 so->inputs[i].inloc, 417 so->inputs[i].bary); 418 } 419 debug_printf("\n"); 420 break; 421 case SHADER_COMPUTE: 422 break; 423 } 424 425 /* print generic shader info: */ 426 debug_printf("; %s prog %d/%d: %u instructions, %d half, %d full\n", 427 type, so->shader->id, so->id, 428 so->info.instrs_count, 429 so->info.max_half_reg + 1, 430 so->info.max_reg + 1); 431 432 debug_printf("; %d const, %u constlen\n", 433 so->info.max_const + 1, 434 so->constlen); 435 436 /* print shader type specific info: */ 437 switch (so->type) { 438 case SHADER_VERTEX: 439 dump_output(so, VARYING_SLOT_POS, "pos"); 440 dump_output(so, VARYING_SLOT_PSIZ, "psize"); 441 break; 442 case SHADER_FRAGMENT: 443 dump_reg("pos (bary)", so->pos_regid); 444 dump_output(so, FRAG_RESULT_DEPTH, "posz"); 445 if (so->color0_mrt) { 446 dump_output(so, FRAG_RESULT_COLOR, "color"); 447 } else { 448 dump_output(so, FRAG_RESULT_DATA0, "data0"); 449 dump_output(so, FRAG_RESULT_DATA1, "data1"); 450 dump_output(so, FRAG_RESULT_DATA2, "data2"); 451 dump_output(so, FRAG_RESULT_DATA3, "data3"); 452 dump_output(so, FRAG_RESULT_DATA4, "data4"); 453 dump_output(so, FRAG_RESULT_DATA5, "data5"); 454 dump_output(so, FRAG_RESULT_DATA6, "data6"); 455 dump_output(so, FRAG_RESULT_DATA7, "data7"); 456 } 457 /* these two are hard-coded since we don't know how to 458 * program them to anything but all 0's... 459 */ 460 if (so->frag_coord) 461 debug_printf("; fragcoord: r0.x\n"); 462 if (so->frag_face) 463 debug_printf("; fragface: hr0.x\n"); 464 break; 465 case SHADER_COMPUTE: 466 break; 467 } 468 469 debug_printf("\n"); 470 } 471 472 uint64_t 473 ir3_shader_outputs(const struct ir3_shader *so) 474 { 475 return so->nir->info->outputs_written; 476 } 477 478 /* This has to reach into the fd_context a bit more than the rest of 479 * ir3, but it needs to be aligned with the compiler, so both agree 480 * on which const regs hold what. And the logic is identical between 481 * a3xx/a4xx, the only difference is small details in the actual 482 * CP_LOAD_STATE packets (which is handled inside the generation 483 * specific ctx->emit_const(_bo)() fxns) 484 */ 485 486 #include "freedreno_resource.h" 487 488 static void 489 emit_user_consts(struct fd_context *ctx, const struct ir3_shader_variant *v, 490 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf) 491 { 492 const unsigned index = 0; /* user consts are index 0 */ 493 /* TODO save/restore dirty_mask for binning pass instead: */ 494 uint32_t dirty_mask = constbuf->enabled_mask; 495 496 if (dirty_mask & (1 << index)) { 497 struct pipe_constant_buffer *cb = &constbuf->cb[index]; 498 unsigned size = align(cb->buffer_size, 4) / 4; /* size in dwords */ 499 500 /* in particular, with binning shader we may end up with 501 * unused consts, ie. we could end up w/ constlen that is 502 * smaller than first_driver_param. In that case truncate 503 * the user consts early to avoid HLSQ lockup caused by 504 * writing too many consts 505 */ 506 uint32_t max_const = MIN2(v->num_uniforms, v->constlen); 507 508 // I expect that size should be a multiple of vec4's: 509 assert(size == align(size, 4)); 510 511 /* and even if the start of the const buffer is before 512 * first_immediate, the end may not be: 513 */ 514 size = MIN2(size, 4 * max_const); 515 516 if (size > 0) { 517 fd_wfi(ctx->batch, ring); 518 ctx->emit_const(ring, v->type, 0, 519 cb->buffer_offset, size, 520 cb->user_buffer, cb->buffer); 521 constbuf->dirty_mask &= ~(1 << index); 522 } 523 } 524 } 525 526 static void 527 emit_ubos(struct fd_context *ctx, const struct ir3_shader_variant *v, 528 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf) 529 { 530 uint32_t offset = v->constbase.ubo; 531 if (v->constlen > offset) { 532 uint32_t params = v->num_ubos; 533 uint32_t offsets[params]; 534 struct pipe_resource *prscs[params]; 535 536 for (uint32_t i = 0; i < params; i++) { 537 const uint32_t index = i + 1; /* UBOs start at index 1 */ 538 struct pipe_constant_buffer *cb = &constbuf->cb[index]; 539 assert(!cb->user_buffer); 540 541 if ((constbuf->enabled_mask & (1 << index)) && cb->buffer) { 542 offsets[i] = cb->buffer_offset; 543 prscs[i] = cb->buffer; 544 } else { 545 offsets[i] = 0; 546 prscs[i] = NULL; 547 } 548 } 549 550 fd_wfi(ctx->batch, ring); 551 ctx->emit_const_bo(ring, v->type, false, offset * 4, params, prscs, offsets); 552 } 553 } 554 555 static void 556 emit_immediates(struct fd_context *ctx, const struct ir3_shader_variant *v, 557 struct fd_ringbuffer *ring) 558 { 559 int size = v->immediates_count; 560 uint32_t base = v->constbase.immediate; 561 562 /* truncate size to avoid writing constants that shader 563 * does not use: 564 */ 565 size = MIN2(size + base, v->constlen) - base; 566 567 /* convert out of vec4: */ 568 base *= 4; 569 size *= 4; 570 571 if (size > 0) { 572 fd_wfi(ctx->batch, ring); 573 ctx->emit_const(ring, v->type, base, 574 0, size, v->immediates[0].val, NULL); 575 } 576 } 577 578 /* emit stream-out buffers: */ 579 static void 580 emit_tfbos(struct fd_context *ctx, const struct ir3_shader_variant *v, 581 struct fd_ringbuffer *ring) 582 { 583 /* streamout addresses after driver-params: */ 584 uint32_t offset = v->constbase.tfbo; 585 if (v->constlen > offset) { 586 struct fd_streamout_stateobj *so = &ctx->streamout; 587 struct pipe_stream_output_info *info = &v->shader->stream_output; 588 uint32_t params = 4; 589 uint32_t offsets[params]; 590 struct pipe_resource *prscs[params]; 591 592 for (uint32_t i = 0; i < params; i++) { 593 struct pipe_stream_output_target *target = so->targets[i]; 594 595 if (target) { 596 offsets[i] = (so->offsets[i] * info->stride[i] * 4) + 597 target->buffer_offset; 598 prscs[i] = target->buffer; 599 } else { 600 offsets[i] = 0; 601 prscs[i] = NULL; 602 } 603 } 604 605 fd_wfi(ctx->batch, ring); 606 ctx->emit_const_bo(ring, v->type, true, offset * 4, params, prscs, offsets); 607 } 608 } 609 610 static uint32_t 611 max_tf_vtx(struct fd_context *ctx, const struct ir3_shader_variant *v) 612 { 613 struct fd_streamout_stateobj *so = &ctx->streamout; 614 struct pipe_stream_output_info *info = &v->shader->stream_output; 615 uint32_t maxvtxcnt = 0x7fffffff; 616 617 if (ctx->screen->gpu_id >= 500) 618 return 0; 619 if (v->key.binning_pass) 620 return 0; 621 if (v->shader->stream_output.num_outputs == 0) 622 return 0; 623 if (so->num_targets == 0) 624 return 0; 625 626 /* offset to write to is: 627 * 628 * total_vtxcnt = vtxcnt + offsets[i] 629 * offset = total_vtxcnt * stride[i] 630 * 631 * offset = vtxcnt * stride[i] ; calculated in shader 632 * + offsets[i] * stride[i] ; calculated at emit_tfbos() 633 * 634 * assuming for each vtx, each target buffer will have data written 635 * up to 'offset + stride[i]', that leaves maxvtxcnt as: 636 * 637 * buffer_size = (maxvtxcnt * stride[i]) + stride[i] 638 * maxvtxcnt = (buffer_size - stride[i]) / stride[i] 639 * 640 * but shader is actually doing a less-than (rather than less-than- 641 * equal) check, so we can drop the -stride[i]. 642 * 643 * TODO is assumption about `offset + stride[i]` legit? 644 */ 645 for (unsigned i = 0; i < so->num_targets; i++) { 646 struct pipe_stream_output_target *target = so->targets[i]; 647 unsigned stride = info->stride[i] * 4; /* convert dwords->bytes */ 648 if (target) { 649 uint32_t max = target->buffer_size / stride; 650 maxvtxcnt = MIN2(maxvtxcnt, max); 651 } 652 } 653 654 return maxvtxcnt; 655 } 656 657 void 658 ir3_emit_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring, 659 struct fd_context *ctx, const struct pipe_draw_info *info, uint32_t dirty) 660 { 661 if (dirty & (FD_DIRTY_PROG | FD_DIRTY_CONSTBUF)) { 662 struct fd_constbuf_stateobj *constbuf; 663 bool shader_dirty; 664 665 if (v->type == SHADER_VERTEX) { 666 constbuf = &ctx->constbuf[PIPE_SHADER_VERTEX]; 667 shader_dirty = !!(dirty & FD_SHADER_DIRTY_VP); 668 } else if (v->type == SHADER_FRAGMENT) { 669 constbuf = &ctx->constbuf[PIPE_SHADER_FRAGMENT]; 670 shader_dirty = !!(dirty & FD_SHADER_DIRTY_FP); 671 } else { 672 unreachable("bad shader type"); 673 return; 674 } 675 676 emit_user_consts(ctx, v, ring, constbuf); 677 emit_ubos(ctx, v, ring, constbuf); 678 if (shader_dirty) 679 emit_immediates(ctx, v, ring); 680 } 681 682 /* emit driver params every time: */ 683 /* TODO skip emit if shader doesn't use driver params to avoid WFI.. */ 684 if (info && (v->type == SHADER_VERTEX)) { 685 uint32_t offset = v->constbase.driver_param; 686 if (v->constlen > offset) { 687 uint32_t vertex_params[IR3_DP_COUNT] = { 688 [IR3_DP_VTXID_BASE] = info->indexed ? 689 info->index_bias : info->start, 690 [IR3_DP_VTXCNT_MAX] = max_tf_vtx(ctx, v), 691 }; 692 /* if no user-clip-planes, we don't need to emit the 693 * entire thing: 694 */ 695 uint32_t vertex_params_size = 4; 696 697 if (v->key.ucp_enables) { 698 struct pipe_clip_state *ucp = &ctx->ucp; 699 unsigned pos = IR3_DP_UCP0_X; 700 for (unsigned i = 0; pos <= IR3_DP_UCP7_W; i++) { 701 for (unsigned j = 0; j < 4; j++) { 702 vertex_params[pos] = fui(ucp->ucp[i][j]); 703 pos++; 704 } 705 } 706 vertex_params_size = ARRAY_SIZE(vertex_params); 707 } 708 709 fd_wfi(ctx->batch, ring); 710 ctx->emit_const(ring, SHADER_VERTEX, offset * 4, 0, 711 vertex_params_size, vertex_params, NULL); 712 713 /* if needed, emit stream-out buffer addresses: */ 714 if (vertex_params[IR3_DP_VTXCNT_MAX] > 0) { 715 emit_tfbos(ctx, v, ring); 716 } 717 } 718 } 719 } 720