Home | History | Annotate | Download | only in vc4
      1 /*
      2  * Copyright  2010 Intel Corporation
      3  * Copyright  2014-2015 Broadcom
      4  *
      5  * Permission is hereby granted, free of charge, to any person obtaining a
      6  * copy of this software and associated documentation files (the "Software"),
      7  * to deal in the Software without restriction, including without limitation
      8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      9  * and/or sell copies of the Software, and to permit persons to whom the
     10  * Software is furnished to do so, subject to the following conditions:
     11  *
     12  * The above copyright notice and this permission notice (including the next
     13  * paragraph) shall be included in all copies or substantial portions of the
     14  * Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     22  * IN THE SOFTWARE.
     23  */
     24 
     25 /**
     26  * @file vc4_qir_schedule.c
     27  *
     28  * The basic model of the list scheduler is to take a basic block, compute a
     29  * DAG of the dependencies from the bottom up, and make a list of the DAG
     30  * heads.  Heuristically pick a DAG head and schedule (remove) it, then put
     31  * all the parents that are now DAG heads into the list of things to
     32  * schedule.
     33  *
     34  * The goal of scheduling here, before register allocation and conversion to
     35  * QPU instructions, is to reduce register pressure by reordering instructions
     36  * to consume values when possible.
     37  */
     38 
     39 #include "vc4_qir.h"
     40 
     41 static bool debug;
     42 
     43 struct schedule_node {
     44         struct list_head link;
     45         struct qinst *inst;
     46 
     47         struct schedule_node **children;
     48         uint32_t child_count;
     49         uint32_t child_array_size;
     50         uint32_t parent_count;
     51 
     52         /* Length of the longest (latency) chain from a DAG head to the this
     53          * instruction.
     54          */
     55         uint32_t delay;
     56 
     57         /* Longest time + latency_between(parent, this) of any parent of this
     58          * node.
     59          */
     60         uint32_t unblocked_time;
     61 };
     62 
     63 struct schedule_state {
     64         /* List of struct schedule_node *.  This starts out with all
     65          * instructions, and after dependency updates it's trimmed to be just
     66          * the DAG heads.
     67          */
     68         struct list_head worklist;
     69 
     70         uint32_t time;
     71 
     72         uint32_t *temp_writes;
     73 
     74         BITSET_WORD *temp_live;
     75 };
     76 
     77 /* When walking the instructions in reverse, we need to swap before/after in
     78  * add_dep().
     79  */
     80 enum direction { F, R };
     81 
     82 /**
     83  * Marks a dependency between two intructions, that \p after must appear after
     84  * \p before.
     85  *
     86  * Our dependencies are tracked as a DAG.  Since we're scheduling bottom-up,
     87  * the latest instructions with nothing left to schedule are the DAG heads,
     88  * and their inputs are their children.
     89  */
     90 static void
     91 add_dep(enum direction dir,
     92         struct schedule_node *before,
     93         struct schedule_node *after)
     94 {
     95         if (!before || !after)
     96                 return;
     97 
     98         assert(before != after);
     99 
    100         if (dir == R) {
    101                 struct schedule_node *t = before;
    102                 before = after;
    103                 after = t;
    104         }
    105 
    106         for (int i = 0; i < after->child_count; i++) {
    107                 if (after->children[i] == after)
    108                         return;
    109         }
    110 
    111         if (after->child_array_size <= after->child_count) {
    112                 after->child_array_size = MAX2(after->child_array_size * 2, 16);
    113                 after->children = reralloc(after, after->children,
    114                                            struct schedule_node *,
    115                                            after->child_array_size);
    116         }
    117 
    118         after->children[after->child_count] = before;
    119         after->child_count++;
    120         before->parent_count++;
    121 }
    122 
    123 static void
    124 add_write_dep(enum direction dir,
    125               struct schedule_node **before,
    126               struct schedule_node *after)
    127 {
    128         add_dep(dir, *before, after);
    129         *before = after;
    130 }
    131 
    132 struct schedule_setup_state {
    133         struct schedule_node **last_temp_write;
    134         struct schedule_node *last_sf;
    135         struct schedule_node *last_vary_read;
    136         struct schedule_node *last_vpm_read;
    137         struct schedule_node *last_vpm_write;
    138         struct schedule_node *last_tex_coord;
    139         struct schedule_node *last_tex_result;
    140         struct schedule_node *last_tlb;
    141         struct schedule_node *last_uniforms_reset;
    142         enum direction dir;
    143 
    144 	/**
    145          * Texture FIFO tracking.  This is done top-to-bottom, and is used to
    146          * track the QOP_TEX_RESULTs and add dependencies on previous ones
    147          * when trying to submit texture coords with TFREQ full or new texture
    148          * fetches with TXRCV full.
    149          */
    150         struct {
    151                 struct schedule_node *node;
    152                 int coords;
    153         } tex_fifo[8];
    154         int tfreq_count; /**< Number of texture coords outstanding. */
    155         int tfrcv_count; /**< Number of texture results outstanding. */
    156         int tex_fifo_pos;
    157 };
    158 
    159 static void
    160 block_until_tex_result(struct schedule_setup_state *state, struct schedule_node *n)
    161 {
    162         add_dep(state->dir, state->tex_fifo[0].node, n);
    163 
    164         state->tfreq_count -= state->tex_fifo[0].coords;
    165         state->tfrcv_count--;
    166 
    167         memmove(&state->tex_fifo[0],
    168                 &state->tex_fifo[1],
    169                 state->tex_fifo_pos * sizeof(state->tex_fifo[0]));
    170         state->tex_fifo_pos--;
    171 }
    172 
    173 /**
    174  * Common code for dependencies that need to be tracked both forward and
    175  * backward.
    176  *
    177  * This is for things like "all VPM reads have to happen in order."
    178  */
    179 static void
    180 calculate_deps(struct schedule_setup_state *state, struct schedule_node *n)
    181 {
    182         struct qinst *inst = n->inst;
    183         enum direction dir = state->dir;
    184 
    185 
    186         /* Add deps for temp registers and varyings accesses.  Note that we
    187          * ignore uniforms accesses, because qir_reorder_uniforms() happens
    188          * after this.
    189          */
    190         for (int i = 0; i < qir_get_nsrc(inst); i++) {
    191                 switch (inst->src[i].file) {
    192                 case QFILE_TEMP:
    193                         add_dep(dir,
    194                                 state->last_temp_write[inst->src[i].index], n);
    195                         break;
    196 
    197                 case QFILE_VARY:
    198                         add_write_dep(dir, &state->last_vary_read, n);
    199                         break;
    200 
    201                 case QFILE_VPM:
    202                         add_write_dep(dir, &state->last_vpm_read, n);
    203                         break;
    204 
    205                 default:
    206                         break;
    207                 }
    208         }
    209 
    210         switch (inst->op) {
    211         case QOP_VARY_ADD_C:
    212                 add_dep(dir, state->last_vary_read, n);
    213                 break;
    214 
    215         case QOP_TEX_RESULT:
    216                 /* Results have to be fetched in order. */
    217                 add_write_dep(dir, &state->last_tex_result, n);
    218                 break;
    219 
    220         case QOP_THRSW:
    221                 /* After a new THRSW, one must collect all texture samples
    222                  * queued since the previous THRSW/program start.  For now, we
    223                  * have one THRSW in between each texture setup and its
    224                  * results collection as our input, and we just make sure that
    225                  * that ordering is maintained.
    226                  */
    227                 add_write_dep(dir, &state->last_tex_coord, n);
    228                 add_write_dep(dir, &state->last_tex_result, n);
    229 
    230                 /* accumulators and flags are lost across thread switches. */
    231                 add_write_dep(dir, &state->last_sf, n);
    232 
    233                 /* Setup, like the varyings, will need to be drained before we
    234                  * thread switch.
    235                  */
    236                 add_write_dep(dir, &state->last_vary_read, n);
    237 
    238                 /* The TLB-locking operations have to stay after the last
    239                  * thread switch.
    240                  */
    241                 add_write_dep(dir, &state->last_tlb, n);
    242                 break;
    243 
    244         case QOP_TLB_COLOR_READ:
    245         case QOP_MS_MASK:
    246                 add_write_dep(dir, &state->last_tlb, n);
    247                 break;
    248 
    249         default:
    250                 break;
    251         }
    252 
    253         switch (inst->dst.file) {
    254         case QFILE_VPM:
    255                 add_write_dep(dir, &state->last_vpm_write, n);
    256                 break;
    257 
    258         case QFILE_TEMP:
    259                 add_write_dep(dir, &state->last_temp_write[inst->dst.index], n);
    260                 break;
    261 
    262         case QFILE_TLB_COLOR_WRITE:
    263         case QFILE_TLB_COLOR_WRITE_MS:
    264         case QFILE_TLB_Z_WRITE:
    265         case QFILE_TLB_STENCIL_SETUP:
    266                 add_write_dep(dir, &state->last_tlb, n);
    267                 break;
    268 
    269         case QFILE_TEX_S_DIRECT:
    270         case QFILE_TEX_S:
    271         case QFILE_TEX_T:
    272         case QFILE_TEX_R:
    273         case QFILE_TEX_B:
    274                 /* Texturing setup gets scheduled in order, because
    275                  * the uniforms referenced by them have to land in a
    276                  * specific order.
    277                  */
    278                 add_write_dep(dir, &state->last_tex_coord, n);
    279                 break;
    280 
    281         default:
    282                 break;
    283         }
    284 
    285         if (qir_depends_on_flags(inst))
    286                 add_dep(dir, state->last_sf, n);
    287 
    288         if (inst->sf)
    289                 add_write_dep(dir, &state->last_sf, n);
    290 }
    291 
    292 static void
    293 calculate_forward_deps(struct vc4_compile *c, void *mem_ctx,
    294                        struct list_head *schedule_list)
    295 {
    296         struct schedule_setup_state state;
    297 
    298         memset(&state, 0, sizeof(state));
    299         state.last_temp_write = rzalloc_array(mem_ctx, struct schedule_node *,
    300                                               c->num_temps);
    301         state.dir = F;
    302 
    303         list_for_each_entry(struct schedule_node, n, schedule_list, link) {
    304                 struct qinst *inst = n->inst;
    305 
    306                 calculate_deps(&state, n);
    307 
    308                 for (int i = 0; i < qir_get_nsrc(inst); i++) {
    309                         switch (inst->src[i].file) {
    310                         case QFILE_UNIF:
    311                                 add_dep(state.dir, state.last_uniforms_reset, n);
    312                                 break;
    313                         default:
    314                                 break;
    315                         }
    316                 }
    317 
    318                 switch (inst->dst.file) {
    319                 case QFILE_TEX_S_DIRECT:
    320                 case QFILE_TEX_S:
    321                 case QFILE_TEX_T:
    322                 case QFILE_TEX_R:
    323                 case QFILE_TEX_B:
    324                         /* From the VC4 spec:
    325                          *
    326                          *     "The TFREQ input FIFO holds two full lots of s,
    327                          *      t, r, b data, plus associated setup data, per
    328                          *      QPU, that is, there are eight data slots. For
    329                          *      each texture request, slots are only consumed
    330                          *      for the components of s, t, r, and b actually
    331                          *      written. Thus the FIFO can hold four requests
    332                          *      of just (s, t) data, or eight requests of just
    333                          *      s data (for direct addressed data lookups).
    334                          *
    335                          *      Note that there is one FIFO per QPU, and the
    336                          *      FIFO has no concept of threads - that is,
    337                          *      multi-threaded shaders must be careful to use
    338                          *      only 1/2 the FIFO depth before reading
    339                          *      back. Multi-threaded programs must also
    340                          *      therefore always thread switch on texture
    341                          *      fetch as the other thread may have data
    342                          *      waiting in the FIFO."
    343                          *
    344                          * If the texture coordinate fifo is full, block this
    345                          * on the last QOP_TEX_RESULT.
    346                          */
    347                         if (state.tfreq_count == (c->fs_threaded ? 4 : 8)) {
    348                                 block_until_tex_result(&state, n);
    349                         }
    350 
    351                         /* From the VC4 spec:
    352                          *
    353                          *     "Since the maximum number of texture requests
    354                          *      in the input (TFREQ) FIFO is four lots of (s,
    355                          *      t) data, the output (TFRCV) FIFO is sized to
    356                          *      holds four lots of max-size color data per
    357                          *      QPU. For non-float color, reads are packed
    358                          *      RGBA8888 data (one read per pixel). For 16-bit
    359                          *      float color, two reads are necessary per
    360                          *      pixel, with reads packed as RG1616 then
    361                          *      BA1616. So per QPU there are eight color slots
    362                          *      in the TFRCV FIFO."
    363                          *
    364                          * If the texture result fifo is full, block adding
    365                          * any more to it until the last QOP_TEX_RESULT.
    366                          */
    367                         if (inst->dst.file == QFILE_TEX_S ||
    368                             inst->dst.file == QFILE_TEX_S_DIRECT) {
    369                                 if (state.tfrcv_count ==
    370                                     (c->fs_threaded ? 2 : 4))
    371                                         block_until_tex_result(&state, n);
    372                                 state.tfrcv_count++;
    373                         }
    374 
    375                         state.tex_fifo[state.tex_fifo_pos].coords++;
    376                         state.tfreq_count++;
    377                         break;
    378 
    379                 default:
    380                         break;
    381                 }
    382 
    383                 switch (inst->op) {
    384                 case QOP_TEX_RESULT:
    385                         /* Results have to be fetched after the
    386                          * coordinate setup.  Note that we're assuming
    387                          * here that our input shader has the texture
    388                          * coord setup and result fetch in order,
    389                          * which is true initially but not of our
    390                          * instruction stream after this pass.
    391                          */
    392                         add_dep(state.dir, state.last_tex_coord, n);
    393 
    394                         state.tex_fifo[state.tex_fifo_pos].node = n;
    395 
    396                         state.tex_fifo_pos++;
    397                         memset(&state.tex_fifo[state.tex_fifo_pos], 0,
    398                                sizeof(state.tex_fifo[0]));
    399                         break;
    400 
    401                 case QOP_UNIFORMS_RESET:
    402                         add_write_dep(state.dir, &state.last_uniforms_reset, n);
    403                         break;
    404 
    405                 default:
    406                         break;
    407                 }
    408         }
    409 }
    410 
    411 static void
    412 calculate_reverse_deps(struct vc4_compile *c, void *mem_ctx,
    413                        struct list_head *schedule_list)
    414 {
    415         struct schedule_setup_state state;
    416 
    417         memset(&state, 0, sizeof(state));
    418         state.dir = R;
    419         state.last_temp_write = rzalloc_array(mem_ctx, struct schedule_node *,
    420                                               c->num_temps);
    421 
    422         list_for_each_entry_rev(struct schedule_node, n, schedule_list, link) {
    423                 calculate_deps(&state, n);
    424         }
    425 }
    426 
    427 static int
    428 get_register_pressure_cost(struct schedule_state *state, struct qinst *inst)
    429 {
    430         int cost = 0;
    431 
    432         if (inst->dst.file == QFILE_TEMP &&
    433             state->temp_writes[inst->dst.index] == 1)
    434                 cost--;
    435 
    436         for (int i = 0; i < qir_get_nsrc(inst); i++) {
    437                 if (inst->src[i].file != QFILE_TEMP ||
    438                     BITSET_TEST(state->temp_live, inst->src[i].index)) {
    439                         continue;
    440                 }
    441 
    442                 bool already_counted = false;
    443                 for (int j = 0; j < i; j++) {
    444                         if (inst->src[i].file == inst->src[j].file &&
    445                             inst->src[i].index == inst->src[j].index) {
    446                                 already_counted = true;
    447                         }
    448                 }
    449                 if (!already_counted)
    450                         cost++;
    451         }
    452 
    453         return cost;
    454 }
    455 
    456 static bool
    457 locks_scoreboard(struct qinst *inst)
    458 {
    459         if (inst->op == QOP_TLB_COLOR_READ)
    460                 return true;
    461 
    462         switch (inst->dst.file) {
    463         case QFILE_TLB_Z_WRITE:
    464         case QFILE_TLB_COLOR_WRITE:
    465         case QFILE_TLB_COLOR_WRITE_MS:
    466                 return true;
    467         default:
    468                 return false;
    469         }
    470 }
    471 
    472 static struct schedule_node *
    473 choose_instruction(struct schedule_state *state)
    474 {
    475         struct schedule_node *chosen = NULL;
    476 
    477         list_for_each_entry(struct schedule_node, n, &state->worklist, link) {
    478                 /* The branches aren't being tracked as dependencies.  Make
    479                  * sure that they stay scheduled as the last instruction of
    480                  * the block, which is to say the first one we choose to
    481                  * schedule.
    482                  */
    483                 if (n->inst->op == QOP_BRANCH)
    484                         return n;
    485 
    486                 if (!chosen) {
    487                         chosen = n;
    488                         continue;
    489                 }
    490 
    491                 /* Prefer scheduling things that lock the scoreboard, so that
    492                  * they appear late in the program and we get more parallelism
    493                  * between shaders on multiple QPUs hitting the same fragment.
    494                  */
    495                 if (locks_scoreboard(n->inst) &&
    496                     !locks_scoreboard(chosen->inst)) {
    497                         chosen = n;
    498                         continue;
    499                 } else if (!locks_scoreboard(n->inst) &&
    500                            locks_scoreboard(chosen->inst)) {
    501                         continue;
    502                 }
    503 
    504                 /* If we would block on the previously chosen node, but would
    505                  * block less on this one, then prefer it.
    506                  */
    507                 if (chosen->unblocked_time > state->time &&
    508                     n->unblocked_time < chosen->unblocked_time) {
    509                         chosen = n;
    510                         continue;
    511                 } else if (n->unblocked_time > state->time &&
    512                            n->unblocked_time > chosen->unblocked_time) {
    513                         continue;
    514                 }
    515 
    516                 /* If we can definitely reduce register pressure, do so
    517                  * immediately.
    518                  */
    519                 int register_pressure_cost =
    520                         get_register_pressure_cost(state, n->inst);
    521                 int chosen_register_pressure_cost =
    522                         get_register_pressure_cost(state, chosen->inst);
    523 
    524                 if (register_pressure_cost < chosen_register_pressure_cost) {
    525                         chosen = n;
    526                         continue;
    527                 } else if (register_pressure_cost >
    528                            chosen_register_pressure_cost) {
    529                         continue;
    530                 }
    531 
    532                 /* Otherwise, prefer instructions with the deepest chain to
    533                  * the end of the program.  This avoids the problem of
    534                  * "everything generates a temp, nothing finishes freeing one,
    535                  * guess I'll just keep emitting varying mul/adds".
    536                  */
    537                 if (n->delay > chosen->delay) {
    538                         chosen = n;
    539                         continue;
    540                 } else if (n->delay < chosen->delay) {
    541                         continue;
    542                 }
    543         }
    544 
    545         return chosen;
    546 }
    547 
    548 static void
    549 dump_state(struct vc4_compile *c, struct schedule_state *state)
    550 {
    551         uint32_t i = 0;
    552         list_for_each_entry(struct schedule_node, n, &state->worklist, link) {
    553                 fprintf(stderr, "%3d: ", i++);
    554                 qir_dump_inst(c, n->inst);
    555                 fprintf(stderr, " (%d cost)\n",
    556                         get_register_pressure_cost(state, n->inst));
    557 
    558                 for (int i = 0; i < n->child_count; i++) {
    559                         struct schedule_node *child = n->children[i];
    560                         fprintf(stderr, "   - ");
    561                         qir_dump_inst(c, child->inst);
    562                         fprintf(stderr, " (%d parents)\n", child->parent_count);
    563                 }
    564         }
    565 }
    566 
    567 /* Estimate of how many instructions we should schedule between operations.
    568  *
    569  * These aren't in real cycle counts, because we're just estimating cycle
    570  * times anyway.  QIR instructions will get paired up when turned into QPU
    571  * instructions, or extra NOP delays will have to be added due to register
    572  * allocation choices.
    573  */
    574 static uint32_t
    575 latency_between(struct schedule_node *before, struct schedule_node *after)
    576 {
    577         if ((before->inst->dst.file == QFILE_TEX_S ||
    578              before->inst->dst.file == QFILE_TEX_S_DIRECT) &&
    579             after->inst->op == QOP_TEX_RESULT)
    580                 return 100;
    581 
    582         switch (before->inst->op) {
    583         case QOP_RCP:
    584         case QOP_RSQ:
    585         case QOP_EXP2:
    586         case QOP_LOG2:
    587                 for (int i = 0; i < qir_get_nsrc(after->inst); i++) {
    588                         if (after->inst->src[i].file ==
    589                             before->inst->dst.file &&
    590                             after->inst->src[i].index ==
    591                             before->inst->dst.index) {
    592                                 /* There are two QPU delay slots before we can
    593                                  * read a math result, which could be up to 4
    594                                  * QIR instructions if they packed well.
    595                                  */
    596                                 return 4;
    597                         }
    598                 }
    599                 break;
    600         default:
    601                 break;
    602         }
    603 
    604         return 1;
    605 }
    606 
    607 /** Recursive computation of the delay member of a node. */
    608 static void
    609 compute_delay(struct schedule_node *n)
    610 {
    611         if (!n->child_count) {
    612                 /* The color read needs to be scheduled late, to avoid locking
    613                  * the scoreboard early.  This is our best tool for
    614                  * encouraging that.  The other scoreboard locking ops will
    615                  * have this happen by default, since they are generally the
    616                  * DAG heads or close to them.
    617                  */
    618                 if (n->inst->op == QOP_TLB_COLOR_READ)
    619                         n->delay = 1000;
    620                 else
    621                         n->delay = 1;
    622         } else {
    623                 for (int i = 0; i < n->child_count; i++) {
    624                         if (!n->children[i]->delay)
    625                                 compute_delay(n->children[i]);
    626                         n->delay = MAX2(n->delay,
    627                                         n->children[i]->delay +
    628                                         latency_between(n->children[i], n));
    629                 }
    630         }
    631 }
    632 
    633 static void
    634 schedule_instructions(struct vc4_compile *c,
    635                       struct qblock *block, struct schedule_state *state)
    636 {
    637         if (debug) {
    638                 fprintf(stderr, "initial deps:\n");
    639                 dump_state(c, state);
    640         }
    641 
    642         /* Remove non-DAG heads from the list. */
    643         list_for_each_entry_safe(struct schedule_node, n,
    644                                  &state->worklist, link) {
    645                 if (n->parent_count != 0)
    646                         list_del(&n->link);
    647         }
    648 
    649         state->time = 0;
    650         while (!list_empty(&state->worklist)) {
    651                 struct schedule_node *chosen = choose_instruction(state);
    652                 struct qinst *inst = chosen->inst;
    653 
    654                 if (debug) {
    655                         fprintf(stderr, "current list:\n");
    656                         dump_state(c, state);
    657                         fprintf(stderr, "chose: ");
    658                         qir_dump_inst(c, inst);
    659                         fprintf(stderr, " (%d cost)\n",
    660                                 get_register_pressure_cost(state, inst));
    661                 }
    662 
    663                 state->time = MAX2(state->time, chosen->unblocked_time);
    664 
    665                 /* Schedule this instruction back onto the QIR list. */
    666                 list_del(&chosen->link);
    667                 list_add(&inst->link, &block->instructions);
    668 
    669                 /* Now that we've scheduled a new instruction, some of its
    670                  * children can be promoted to the list of instructions ready to
    671                  * be scheduled.  Update the children's unblocked time for this
    672                  * DAG edge as we do so.
    673                  */
    674                 for (int i = chosen->child_count - 1; i >= 0; i--) {
    675                         struct schedule_node *child = chosen->children[i];
    676 
    677                         child->unblocked_time = MAX2(child->unblocked_time,
    678                                                      state->time +
    679                                                      latency_between(child,
    680                                                                      chosen));
    681                         child->parent_count--;
    682                         if (child->parent_count == 0)
    683                                 list_add(&child->link, &state->worklist);
    684                 }
    685 
    686                 /* Update our tracking of register pressure. */
    687                 for (int i = 0; i < qir_get_nsrc(inst); i++) {
    688                         if (inst->src[i].file == QFILE_TEMP)
    689                                 BITSET_SET(state->temp_live, inst->src[i].index);
    690                 }
    691                 if (inst->dst.file == QFILE_TEMP) {
    692                         state->temp_writes[inst->dst.index]--;
    693                         if (state->temp_writes[inst->dst.index] == 0)
    694                                 BITSET_CLEAR(state->temp_live, inst->dst.index);
    695                 }
    696 
    697                 state->time++;
    698         }
    699 }
    700 
    701 static void
    702 qir_schedule_instructions_block(struct vc4_compile *c,
    703                                 struct qblock *block)
    704 {
    705         void *mem_ctx = ralloc_context(NULL);
    706         struct schedule_state state = { { 0 } };
    707 
    708         state.temp_writes = rzalloc_array(mem_ctx, uint32_t, c->num_temps);
    709         state.temp_live = rzalloc_array(mem_ctx, BITSET_WORD,
    710                                         BITSET_WORDS(c->num_temps));
    711         list_inithead(&state.worklist);
    712 
    713         /* Wrap each instruction in a scheduler structure. */
    714         qir_for_each_inst_safe(inst, block) {
    715                 struct schedule_node *n = rzalloc(mem_ctx, struct schedule_node);
    716 
    717                 n->inst = inst;
    718                 list_del(&inst->link);
    719                 list_addtail(&n->link, &state.worklist);
    720 
    721                 if (inst->dst.file == QFILE_TEMP)
    722                         state.temp_writes[inst->dst.index]++;
    723         }
    724 
    725         /* Dependencies tracked top-to-bottom. */
    726         calculate_forward_deps(c, mem_ctx, &state.worklist);
    727         /* Dependencies tracked bottom-to-top. */
    728         calculate_reverse_deps(c, mem_ctx, &state.worklist);
    729 
    730         list_for_each_entry(struct schedule_node, n, &state.worklist, link)
    731                 compute_delay(n);
    732 
    733         schedule_instructions(c, block, &state);
    734 
    735         ralloc_free(mem_ctx);
    736 }
    737 
    738 void
    739 qir_schedule_instructions(struct vc4_compile *c)
    740 {
    741 
    742         if (debug) {
    743                 fprintf(stderr, "Pre-schedule instructions\n");
    744                 qir_dump(c);
    745         }
    746 
    747         qir_for_each_block(block, c)
    748                 qir_schedule_instructions_block(c, block);
    749 
    750         if (debug) {
    751                 fprintf(stderr, "Post-schedule instructions\n");
    752                 qir_dump(c);
    753         }
    754 }
    755