1 /* 2 * Copyright 2011 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "util/register_allocate.h" 25 #include "brw_vec4.h" 26 #include "brw_cfg.h" 27 28 using namespace brw; 29 30 namespace brw { 31 32 static void 33 assign(unsigned int *reg_hw_locations, backend_reg *reg) 34 { 35 if (reg->file == VGRF) { 36 reg->nr = reg_hw_locations[reg->nr] + reg->offset / REG_SIZE; 37 reg->offset %= REG_SIZE; 38 } 39 } 40 41 bool 42 vec4_visitor::reg_allocate_trivial() 43 { 44 unsigned int hw_reg_mapping[this->alloc.count]; 45 bool virtual_grf_used[this->alloc.count]; 46 int next; 47 48 /* Calculate which virtual GRFs are actually in use after whatever 49 * optimization passes have occurred. 50 */ 51 for (unsigned i = 0; i < this->alloc.count; i++) { 52 virtual_grf_used[i] = false; 53 } 54 55 foreach_block_and_inst(block, vec4_instruction, inst, cfg) { 56 if (inst->dst.file == VGRF) 57 virtual_grf_used[inst->dst.nr] = true; 58 59 for (unsigned i = 0; i < 3; i++) { 60 if (inst->src[i].file == VGRF) 61 virtual_grf_used[inst->src[i].nr] = true; 62 } 63 } 64 65 hw_reg_mapping[0] = this->first_non_payload_grf; 66 next = hw_reg_mapping[0] + this->alloc.sizes[0]; 67 for (unsigned i = 1; i < this->alloc.count; i++) { 68 if (virtual_grf_used[i]) { 69 hw_reg_mapping[i] = next; 70 next += this->alloc.sizes[i]; 71 } 72 } 73 prog_data->total_grf = next; 74 75 foreach_block_and_inst(block, vec4_instruction, inst, cfg) { 76 assign(hw_reg_mapping, &inst->dst); 77 assign(hw_reg_mapping, &inst->src[0]); 78 assign(hw_reg_mapping, &inst->src[1]); 79 assign(hw_reg_mapping, &inst->src[2]); 80 } 81 82 if (prog_data->total_grf > max_grf) { 83 fail("Ran out of regs on trivial allocator (%d/%d)\n", 84 prog_data->total_grf, max_grf); 85 return false; 86 } 87 88 return true; 89 } 90 91 extern "C" void 92 brw_vec4_alloc_reg_set(struct brw_compiler *compiler) 93 { 94 int base_reg_count = 95 compiler->devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF; 96 97 /* After running split_virtual_grfs(), almost all VGRFs will be of size 1. 98 * SEND-from-GRF sources cannot be split, so we also need classes for each 99 * potential message length. 100 */ 101 const int class_count = MAX_VGRF_SIZE; 102 int class_sizes[MAX_VGRF_SIZE]; 103 104 for (int i = 0; i < class_count; i++) 105 class_sizes[i] = i + 1; 106 107 /* Compute the total number of registers across all classes. */ 108 int ra_reg_count = 0; 109 for (int i = 0; i < class_count; i++) { 110 ra_reg_count += base_reg_count - (class_sizes[i] - 1); 111 } 112 113 ralloc_free(compiler->vec4_reg_set.ra_reg_to_grf); 114 compiler->vec4_reg_set.ra_reg_to_grf = ralloc_array(compiler, uint8_t, ra_reg_count); 115 ralloc_free(compiler->vec4_reg_set.regs); 116 compiler->vec4_reg_set.regs = ra_alloc_reg_set(compiler, ra_reg_count, false); 117 if (compiler->devinfo->gen >= 6) 118 ra_set_allocate_round_robin(compiler->vec4_reg_set.regs); 119 ralloc_free(compiler->vec4_reg_set.classes); 120 compiler->vec4_reg_set.classes = ralloc_array(compiler, int, class_count); 121 122 /* Now, add the registers to their classes, and add the conflicts 123 * between them and the base GRF registers (and also each other). 124 */ 125 int reg = 0; 126 unsigned *q_values[MAX_VGRF_SIZE]; 127 for (int i = 0; i < class_count; i++) { 128 int class_reg_count = base_reg_count - (class_sizes[i] - 1); 129 compiler->vec4_reg_set.classes[i] = ra_alloc_reg_class(compiler->vec4_reg_set.regs); 130 131 q_values[i] = new unsigned[MAX_VGRF_SIZE]; 132 133 for (int j = 0; j < class_reg_count; j++) { 134 ra_class_add_reg(compiler->vec4_reg_set.regs, compiler->vec4_reg_set.classes[i], reg); 135 136 compiler->vec4_reg_set.ra_reg_to_grf[reg] = j; 137 138 for (int base_reg = j; 139 base_reg < j + class_sizes[i]; 140 base_reg++) { 141 ra_add_reg_conflict(compiler->vec4_reg_set.regs, base_reg, reg); 142 } 143 144 reg++; 145 } 146 147 for (int j = 0; j < class_count; j++) { 148 /* Calculate the q values manually because the algorithm used by 149 * ra_set_finalize() to do it has higher complexity affecting the 150 * start-up time of some applications. q(i, j) is just the maximum 151 * number of registers from class i a register from class j can 152 * conflict with. 153 */ 154 q_values[i][j] = class_sizes[i] + class_sizes[j] - 1; 155 } 156 } 157 assert(reg == ra_reg_count); 158 159 for (int reg = 0; reg < base_reg_count; reg++) 160 ra_make_reg_conflicts_transitive(compiler->vec4_reg_set.regs, reg); 161 162 ra_set_finalize(compiler->vec4_reg_set.regs, q_values); 163 164 for (int i = 0; i < MAX_VGRF_SIZE; i++) 165 delete[] q_values[i]; 166 } 167 168 void 169 vec4_visitor::setup_payload_interference(struct ra_graph *g, 170 int first_payload_node, 171 int reg_node_count) 172 { 173 int payload_node_count = this->first_non_payload_grf; 174 175 for (int i = 0; i < payload_node_count; i++) { 176 /* Mark each payload reg node as being allocated to its physical register. 177 * 178 * The alternative would be to have per-physical register classes, which 179 * would just be silly. 180 */ 181 ra_set_node_reg(g, first_payload_node + i, i); 182 183 /* For now, just mark each payload node as interfering with every other 184 * node to be allocated. 185 */ 186 for (int j = 0; j < reg_node_count; j++) { 187 ra_add_node_interference(g, first_payload_node + i, j); 188 } 189 } 190 } 191 192 bool 193 vec4_visitor::reg_allocate() 194 { 195 unsigned int hw_reg_mapping[alloc.count]; 196 int payload_reg_count = this->first_non_payload_grf; 197 198 /* Using the trivial allocator can be useful in debugging undefined 199 * register access as a result of broken optimization passes. 200 */ 201 if (0) 202 return reg_allocate_trivial(); 203 204 calculate_live_intervals(); 205 206 int node_count = alloc.count; 207 int first_payload_node = node_count; 208 node_count += payload_reg_count; 209 struct ra_graph *g = 210 ra_alloc_interference_graph(compiler->vec4_reg_set.regs, node_count); 211 212 for (unsigned i = 0; i < alloc.count; i++) { 213 int size = this->alloc.sizes[i]; 214 assert(size >= 1 && size <= MAX_VGRF_SIZE); 215 ra_set_node_class(g, i, compiler->vec4_reg_set.classes[size - 1]); 216 217 for (unsigned j = 0; j < i; j++) { 218 if (virtual_grf_interferes(i, j)) { 219 ra_add_node_interference(g, i, j); 220 } 221 } 222 } 223 224 /* Certain instructions can't safely use the same register for their 225 * sources and destination. Add interference. 226 */ 227 foreach_block_and_inst(block, vec4_instruction, inst, cfg) { 228 if (inst->dst.file == VGRF && inst->has_source_and_destination_hazard()) { 229 for (unsigned i = 0; i < 3; i++) { 230 if (inst->src[i].file == VGRF) { 231 ra_add_node_interference(g, inst->dst.nr, inst->src[i].nr); 232 } 233 } 234 } 235 } 236 237 setup_payload_interference(g, first_payload_node, node_count); 238 239 if (!ra_allocate(g)) { 240 /* Failed to allocate registers. Spill a reg, and the caller will 241 * loop back into here to try again. 242 */ 243 int reg = choose_spill_reg(g); 244 if (this->no_spills) { 245 fail("Failure to register allocate. Reduce number of live " 246 "values to avoid this."); 247 } else if (reg == -1) { 248 fail("no register to spill\n"); 249 } else { 250 spill_reg(reg); 251 } 252 ralloc_free(g); 253 return false; 254 } 255 256 /* Get the chosen virtual registers for each node, and map virtual 257 * regs in the register classes back down to real hardware reg 258 * numbers. 259 */ 260 prog_data->total_grf = payload_reg_count; 261 for (unsigned i = 0; i < alloc.count; i++) { 262 int reg = ra_get_node_reg(g, i); 263 264 hw_reg_mapping[i] = compiler->vec4_reg_set.ra_reg_to_grf[reg]; 265 prog_data->total_grf = MAX2(prog_data->total_grf, 266 hw_reg_mapping[i] + alloc.sizes[i]); 267 } 268 269 foreach_block_and_inst(block, vec4_instruction, inst, cfg) { 270 assign(hw_reg_mapping, &inst->dst); 271 assign(hw_reg_mapping, &inst->src[0]); 272 assign(hw_reg_mapping, &inst->src[1]); 273 assign(hw_reg_mapping, &inst->src[2]); 274 } 275 276 ralloc_free(g); 277 278 return true; 279 } 280 281 /** 282 * When we decide to spill a register, instead of blindly spilling every use, 283 * save unspills when the spill register is used (read) in consecutive 284 * instructions. This can potentially save a bunch of unspills that would 285 * have very little impact in register allocation anyway. 286 * 287 * Notice that we need to account for this behavior when spilling a register 288 * and when evaluating spilling costs. This function is designed so it can 289 * be called from both places and avoid repeating the logic. 290 * 291 * - When we call this function from spill_reg(), we pass in scratch_reg the 292 * actual unspill/spill register that we want to reuse in the current 293 * instruction. 294 * 295 * - When we call this from evaluate_spill_costs(), we pass the register for 296 * which we are evaluating spilling costs. 297 * 298 * In either case, we check if the previous instructions read scratch_reg until 299 * we find one that writes to it with a compatible mask or does not read/write 300 * scratch_reg at all. 301 */ 302 static bool 303 can_use_scratch_for_source(const vec4_instruction *inst, unsigned i, 304 unsigned scratch_reg) 305 { 306 assert(inst->src[i].file == VGRF); 307 bool prev_inst_read_scratch_reg = false; 308 309 /* See if any previous source in the same instructions reads scratch_reg */ 310 for (unsigned n = 0; n < i; n++) { 311 if (inst->src[n].file == VGRF && inst->src[n].nr == scratch_reg) 312 prev_inst_read_scratch_reg = true; 313 } 314 315 /* Now check if previous instructions read/write scratch_reg */ 316 for (vec4_instruction *prev_inst = (vec4_instruction *) inst->prev; 317 !prev_inst->is_head_sentinel(); 318 prev_inst = (vec4_instruction *) prev_inst->prev) { 319 320 /* If the previous instruction writes to scratch_reg then we can reuse 321 * it if the write is not conditional and the channels we write are 322 * compatible with our read mask 323 */ 324 if (prev_inst->dst.file == VGRF && prev_inst->dst.nr == scratch_reg) { 325 return (!prev_inst->predicate || prev_inst->opcode == BRW_OPCODE_SEL) && 326 (brw_mask_for_swizzle(inst->src[i].swizzle) & 327 ~prev_inst->dst.writemask) == 0; 328 } 329 330 /* Skip scratch read/writes so that instructions generated by spilling 331 * other registers (that won't read/write scratch_reg) do not stop us from 332 * reusing scratch_reg for this instruction. 333 */ 334 if (prev_inst->opcode == SHADER_OPCODE_GEN4_SCRATCH_WRITE || 335 prev_inst->opcode == SHADER_OPCODE_GEN4_SCRATCH_READ) 336 continue; 337 338 /* If the previous instruction does not write to scratch_reg, then check 339 * if it reads it 340 */ 341 int n; 342 for (n = 0; n < 3; n++) { 343 if (prev_inst->src[n].file == VGRF && 344 prev_inst->src[n].nr == scratch_reg) { 345 prev_inst_read_scratch_reg = true; 346 break; 347 } 348 } 349 if (n == 3) { 350 /* The previous instruction does not read scratch_reg. At this point, 351 * if no previous instruction has read scratch_reg it means that we 352 * will need to unspill it here and we can't reuse it (so we return 353 * false). Otherwise, if we found at least one consecutive instruction 354 * that read scratch_reg, then we know that we got here from 355 * evaluate_spill_costs (since for the spill_reg path any block of 356 * consecutive instructions using scratch_reg must start with a write 357 * to that register, so we would've exited the loop in the check for 358 * the write that we have at the start of this loop), and in that case 359 * it means that we found the point at which the scratch_reg would be 360 * unspilled. Since we always unspill a full vec4, it means that we 361 * have all the channels available and we can just return true to 362 * signal that we can reuse the register in the current instruction 363 * too. 364 */ 365 return prev_inst_read_scratch_reg; 366 } 367 } 368 369 return prev_inst_read_scratch_reg; 370 } 371 372 static inline unsigned 373 spill_cost_for_type(enum brw_reg_type type) 374 { 375 /* Spilling of a 64-bit register involves emitting 2 32-bit scratch 376 * messages plus the 64b/32b shuffling code. 377 */ 378 return type_sz(type) == 8 ? 2.25f : 1.0f; 379 } 380 381 void 382 vec4_visitor::evaluate_spill_costs(float *spill_costs, bool *no_spill) 383 { 384 float loop_scale = 1.0; 385 386 unsigned *reg_type_size = (unsigned *) 387 ralloc_size(NULL, this->alloc.count * sizeof(unsigned)); 388 389 for (unsigned i = 0; i < this->alloc.count; i++) { 390 spill_costs[i] = 0.0; 391 no_spill[i] = alloc.sizes[i] != 1 && alloc.sizes[i] != 2; 392 reg_type_size[i] = 0; 393 } 394 395 /* Calculate costs for spilling nodes. Call it a cost of 1 per 396 * spill/unspill we'll have to do, and guess that the insides of 397 * loops run 10 times. 398 */ 399 foreach_block_and_inst(block, vec4_instruction, inst, cfg) { 400 for (unsigned int i = 0; i < 3; i++) { 401 if (inst->src[i].file == VGRF && !no_spill[inst->src[i].nr]) { 402 /* We will only unspill src[i] it it wasn't unspilled for the 403 * previous instruction, in which case we'll just reuse the scratch 404 * reg for this instruction. 405 */ 406 if (!can_use_scratch_for_source(inst, i, inst->src[i].nr)) { 407 spill_costs[inst->src[i].nr] += 408 loop_scale * spill_cost_for_type(inst->src[i].type); 409 if (inst->src[i].reladdr || 410 inst->src[i].offset >= REG_SIZE) 411 no_spill[inst->src[i].nr] = true; 412 413 /* We don't support unspills of partial DF reads. 414 * 415 * Our 64-bit unspills are implemented with two 32-bit scratch 416 * messages, each one reading that for both SIMD4x2 threads that 417 * we need to shuffle into correct 64-bit data. Ensure that we 418 * are reading data for both threads. 419 */ 420 if (type_sz(inst->src[i].type) == 8 && inst->exec_size != 8) 421 no_spill[inst->src[i].nr] = true; 422 } 423 424 /* We can't spill registers that mix 32-bit and 64-bit access (that 425 * contain 64-bit data that is operated on via 32-bit instructions) 426 */ 427 unsigned type_size = type_sz(inst->src[i].type); 428 if (reg_type_size[inst->src[i].nr] == 0) 429 reg_type_size[inst->src[i].nr] = type_size; 430 else if (reg_type_size[inst->src[i].nr] != type_size) 431 no_spill[inst->src[i].nr] = true; 432 } 433 } 434 435 if (inst->dst.file == VGRF && !no_spill[inst->dst.nr]) { 436 spill_costs[inst->dst.nr] += 437 loop_scale * spill_cost_for_type(inst->dst.type); 438 if (inst->dst.reladdr || inst->dst.offset >= REG_SIZE) 439 no_spill[inst->dst.nr] = true; 440 441 /* We don't support spills of partial DF writes. 442 * 443 * Our 64-bit spills are implemented with two 32-bit scratch messages, 444 * each one writing that for both SIMD4x2 threads. Ensure that we 445 * are writing data for both threads. 446 */ 447 if (type_sz(inst->dst.type) == 8 && inst->exec_size != 8) 448 no_spill[inst->dst.nr] = true; 449 450 /* FROM_DOUBLE opcodes are setup so that they use a dst register 451 * with a size of 2 even if they only produce a single-precison 452 * result (this is so that the opcode can use the larger register to 453 * produce a 64-bit aligned intermediary result as required by the 454 * hardware during the conversion process). This creates a problem for 455 * spilling though, because when we attempt to emit a spill for the 456 * dst we see a 32-bit destination and emit a scratch write that 457 * allocates a single spill register. 458 */ 459 if (inst->opcode == VEC4_OPCODE_FROM_DOUBLE) 460 no_spill[inst->dst.nr] = true; 461 462 /* We can't spill registers that mix 32-bit and 64-bit access (that 463 * contain 64-bit data that is operated on via 32-bit instructions) 464 */ 465 unsigned type_size = type_sz(inst->dst.type); 466 if (reg_type_size[inst->dst.nr] == 0) 467 reg_type_size[inst->dst.nr] = type_size; 468 else if (reg_type_size[inst->dst.nr] != type_size) 469 no_spill[inst->dst.nr] = true; 470 } 471 472 switch (inst->opcode) { 473 474 case BRW_OPCODE_DO: 475 loop_scale *= 10; 476 break; 477 478 case BRW_OPCODE_WHILE: 479 loop_scale /= 10; 480 break; 481 482 case SHADER_OPCODE_GEN4_SCRATCH_READ: 483 case SHADER_OPCODE_GEN4_SCRATCH_WRITE: 484 for (int i = 0; i < 3; i++) { 485 if (inst->src[i].file == VGRF) 486 no_spill[inst->src[i].nr] = true; 487 } 488 if (inst->dst.file == VGRF) 489 no_spill[inst->dst.nr] = true; 490 break; 491 492 default: 493 break; 494 } 495 } 496 497 ralloc_free(reg_type_size); 498 } 499 500 int 501 vec4_visitor::choose_spill_reg(struct ra_graph *g) 502 { 503 float spill_costs[this->alloc.count]; 504 bool no_spill[this->alloc.count]; 505 506 evaluate_spill_costs(spill_costs, no_spill); 507 508 for (unsigned i = 0; i < this->alloc.count; i++) { 509 if (!no_spill[i]) 510 ra_set_node_spill_cost(g, i, spill_costs[i]); 511 } 512 513 return ra_get_best_spill_node(g); 514 } 515 516 void 517 vec4_visitor::spill_reg(int spill_reg_nr) 518 { 519 assert(alloc.sizes[spill_reg_nr] == 1 || alloc.sizes[spill_reg_nr] == 2); 520 unsigned int spill_offset = last_scratch; 521 last_scratch += alloc.sizes[spill_reg_nr]; 522 523 /* Generate spill/unspill instructions for the objects being spilled. */ 524 int scratch_reg = -1; 525 foreach_block_and_inst(block, vec4_instruction, inst, cfg) { 526 for (unsigned int i = 0; i < 3; i++) { 527 if (inst->src[i].file == VGRF && inst->src[i].nr == spill_reg_nr) { 528 if (scratch_reg == -1 || 529 !can_use_scratch_for_source(inst, i, scratch_reg)) { 530 /* We need to unspill anyway so make sure we read the full vec4 531 * in any case. This way, the cached register can be reused 532 * for consecutive instructions that read different channels of 533 * the same vec4. 534 */ 535 scratch_reg = alloc.allocate(alloc.sizes[spill_reg_nr]); 536 src_reg temp = inst->src[i]; 537 temp.nr = scratch_reg; 538 temp.offset = 0; 539 temp.swizzle = BRW_SWIZZLE_XYZW; 540 emit_scratch_read(block, inst, 541 dst_reg(temp), inst->src[i], spill_offset); 542 temp.offset = inst->src[i].offset; 543 } 544 assert(scratch_reg != -1); 545 inst->src[i].nr = scratch_reg; 546 } 547 } 548 549 if (inst->dst.file == VGRF && inst->dst.nr == spill_reg_nr) { 550 emit_scratch_write(block, inst, spill_offset); 551 scratch_reg = inst->dst.nr; 552 } 553 } 554 555 invalidate_live_intervals(); 556 } 557 558 } /* namespace brw */ 559