Home | History | Annotate | Download | only in codegen
      1 /*
      2  * Copyright 2011 Christoph Bumiller
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice shall be included in
     12  * all copies or substantial portions of the Software.
     13  *
     14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20  * OTHER DEALINGS IN THE SOFTWARE.
     21  */
     22 
     23 #include "codegen/nv50_ir.h"
     24 #include "codegen/nv50_ir_target.h"
     25 
     26 #include <algorithm>
     27 #include <stack>
     28 #include <limits>
     29 #if __cplusplus >= 201103L
     30 #include <unordered_map>
     31 #else
     32 #include <tr1/unordered_map>
     33 #endif
     34 
     35 namespace nv50_ir {
     36 
     37 #if __cplusplus >= 201103L
     38 using std::hash;
     39 using std::unordered_map;
     40 #elif !defined(ANDROID)
     41 using std::tr1::hash;
     42 using std::tr1::unordered_map;
     43 #else
     44 #error Android release before Lollipop is not supported!
     45 #endif
     46 
     47 #define MAX_REGISTER_FILE_SIZE 256
     48 
     49 class RegisterSet
     50 {
     51 public:
     52    RegisterSet(const Target *);
     53 
     54    void init(const Target *);
     55    void reset(DataFile, bool resetMax = false);
     56 
     57    void periodicMask(DataFile f, uint32_t lock, uint32_t unlock);
     58    void intersect(DataFile f, const RegisterSet *);
     59 
     60    bool assign(int32_t& reg, DataFile f, unsigned int size);
     61    void release(DataFile f, int32_t reg, unsigned int size);
     62    void occupy(DataFile f, int32_t reg, unsigned int size);
     63    void occupy(const Value *);
     64    void occupyMask(DataFile f, int32_t reg, uint8_t mask);
     65    bool isOccupied(DataFile f, int32_t reg, unsigned int size) const;
     66    bool testOccupy(const Value *);
     67    bool testOccupy(DataFile f, int32_t reg, unsigned int size);
     68 
     69    inline int getMaxAssigned(DataFile f) const { return fill[f]; }
     70 
     71    inline unsigned int getFileSize(DataFile f, uint8_t regSize) const
     72    {
     73       if (restrictedGPR16Range && f == FILE_GPR && regSize == 2)
     74          return (last[f] + 1) / 2;
     75       return last[f] + 1;
     76    }
     77 
     78    inline unsigned int units(DataFile f, unsigned int size) const
     79    {
     80       return size >> unit[f];
     81    }
     82    // for regs of size >= 4, id is counted in 4-byte words (like nv50/c0 binary)
     83    inline unsigned int idToBytes(const Value *v) const
     84    {
     85       return v->reg.data.id * MIN2(v->reg.size, 4);
     86    }
     87    inline unsigned int idToUnits(const Value *v) const
     88    {
     89       return units(v->reg.file, idToBytes(v));
     90    }
     91    inline int bytesToId(Value *v, unsigned int bytes) const
     92    {
     93       if (v->reg.size < 4)
     94          return units(v->reg.file, bytes);
     95       return bytes / 4;
     96    }
     97    inline int unitsToId(DataFile f, int u, uint8_t size) const
     98    {
     99       if (u < 0)
    100          return -1;
    101       return (size < 4) ? u : ((u << unit[f]) / 4);
    102    }
    103 
    104    void print(DataFile f) const;
    105 
    106    const bool restrictedGPR16Range;
    107 
    108 private:
    109    BitSet bits[LAST_REGISTER_FILE + 1];
    110 
    111    int unit[LAST_REGISTER_FILE + 1]; // log2 of allocation granularity
    112 
    113    int last[LAST_REGISTER_FILE + 1];
    114    int fill[LAST_REGISTER_FILE + 1];
    115 };
    116 
    117 void
    118 RegisterSet::reset(DataFile f, bool resetMax)
    119 {
    120    bits[f].fill(0);
    121    if (resetMax)
    122       fill[f] = -1;
    123 }
    124 
    125 void
    126 RegisterSet::init(const Target *targ)
    127 {
    128    for (unsigned int rf = 0; rf <= FILE_ADDRESS; ++rf) {
    129       DataFile f = static_cast<DataFile>(rf);
    130       last[rf] = targ->getFileSize(f) - 1;
    131       unit[rf] = targ->getFileUnit(f);
    132       fill[rf] = -1;
    133       assert(last[rf] < MAX_REGISTER_FILE_SIZE);
    134       bits[rf].allocate(last[rf] + 1, true);
    135    }
    136 }
    137 
    138 RegisterSet::RegisterSet(const Target *targ)
    139   : restrictedGPR16Range(targ->getChipset() < 0xc0)
    140 {
    141    init(targ);
    142    for (unsigned int i = 0; i <= LAST_REGISTER_FILE; ++i)
    143       reset(static_cast<DataFile>(i));
    144 }
    145 
    146 void
    147 RegisterSet::periodicMask(DataFile f, uint32_t lock, uint32_t unlock)
    148 {
    149    bits[f].periodicMask32(lock, unlock);
    150 }
    151 
    152 void
    153 RegisterSet::intersect(DataFile f, const RegisterSet *set)
    154 {
    155    bits[f] |= set->bits[f];
    156 }
    157 
    158 void
    159 RegisterSet::print(DataFile f) const
    160 {
    161    INFO("GPR:");
    162    bits[f].print();
    163    INFO("\n");
    164 }
    165 
    166 bool
    167 RegisterSet::assign(int32_t& reg, DataFile f, unsigned int size)
    168 {
    169    reg = bits[f].findFreeRange(size);
    170    if (reg < 0)
    171       return false;
    172    fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
    173    return true;
    174 }
    175 
    176 bool
    177 RegisterSet::isOccupied(DataFile f, int32_t reg, unsigned int size) const
    178 {
    179    return bits[f].testRange(reg, size);
    180 }
    181 
    182 void
    183 RegisterSet::occupy(const Value *v)
    184 {
    185    occupy(v->reg.file, idToUnits(v), v->reg.size >> unit[v->reg.file]);
    186 }
    187 
    188 void
    189 RegisterSet::occupyMask(DataFile f, int32_t reg, uint8_t mask)
    190 {
    191    bits[f].setMask(reg & ~31, static_cast<uint32_t>(mask) << (reg % 32));
    192 }
    193 
    194 void
    195 RegisterSet::occupy(DataFile f, int32_t reg, unsigned int size)
    196 {
    197    bits[f].setRange(reg, size);
    198 
    199    INFO_DBG(0, REG_ALLOC, "reg occupy: %u[%i] %u\n", f, reg, size);
    200 
    201    fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
    202 }
    203 
    204 bool
    205 RegisterSet::testOccupy(const Value *v)
    206 {
    207    return testOccupy(v->reg.file,
    208                      idToUnits(v), v->reg.size >> unit[v->reg.file]);
    209 }
    210 
    211 bool
    212 RegisterSet::testOccupy(DataFile f, int32_t reg, unsigned int size)
    213 {
    214    if (isOccupied(f, reg, size))
    215       return false;
    216    occupy(f, reg, size);
    217    return true;
    218 }
    219 
    220 void
    221 RegisterSet::release(DataFile f, int32_t reg, unsigned int size)
    222 {
    223    bits[f].clrRange(reg, size);
    224 
    225    INFO_DBG(0, REG_ALLOC, "reg release: %u[%i] %u\n", f, reg, size);
    226 }
    227 
    228 class RegAlloc
    229 {
    230 public:
    231    RegAlloc(Program *program) : prog(program), sequence(0) { }
    232 
    233    bool exec();
    234    bool execFunc();
    235 
    236 private:
    237    class PhiMovesPass : public Pass {
    238    private:
    239       virtual bool visit(BasicBlock *);
    240       inline bool needNewElseBlock(BasicBlock *b, BasicBlock *p);
    241       inline void splitEdges(BasicBlock *b);
    242    };
    243 
    244    class ArgumentMovesPass : public Pass {
    245    private:
    246       virtual bool visit(BasicBlock *);
    247    };
    248 
    249    class BuildIntervalsPass : public Pass {
    250    private:
    251       virtual bool visit(BasicBlock *);
    252       void collectLiveValues(BasicBlock *);
    253       void addLiveRange(Value *, const BasicBlock *, int end);
    254    };
    255 
    256    class InsertConstraintsPass : public Pass {
    257    public:
    258       bool exec(Function *func);
    259    private:
    260       virtual bool visit(BasicBlock *);
    261 
    262       bool insertConstraintMoves();
    263 
    264       void condenseDefs(Instruction *);
    265       void condenseSrcs(Instruction *, const int first, const int last);
    266 
    267       void addHazard(Instruction *i, const ValueRef *src);
    268       void textureMask(TexInstruction *);
    269       void addConstraint(Instruction *, int s, int n);
    270       bool detectConflict(Instruction *, int s);
    271 
    272       // target specific functions, TODO: put in subclass or Target
    273       void texConstraintNV50(TexInstruction *);
    274       void texConstraintNVC0(TexInstruction *);
    275       void texConstraintNVE0(TexInstruction *);
    276       void texConstraintGM107(TexInstruction *);
    277 
    278       std::list<Instruction *> constrList;
    279 
    280       const Target *targ;
    281    };
    282 
    283    bool buildLiveSets(BasicBlock *);
    284 
    285 private:
    286    Program *prog;
    287    Function *func;
    288 
    289    // instructions in control flow / chronological order
    290    ArrayList insns;
    291 
    292    int sequence; // for manual passes through CFG
    293 };
    294 
    295 typedef std::pair<Value *, Value *> ValuePair;
    296 
    297 class SpillCodeInserter
    298 {
    299 public:
    300    SpillCodeInserter(Function *fn) : func(fn), stackSize(0), stackBase(0) { }
    301 
    302    bool run(const std::list<ValuePair>&);
    303 
    304    Symbol *assignSlot(const Interval&, const unsigned int size);
    305    Value *offsetSlot(Value *, const LValue *);
    306    inline int32_t getStackSize() const { return stackSize; }
    307 
    308 private:
    309    Function *func;
    310 
    311    struct SpillSlot
    312    {
    313       Interval occup;
    314       std::list<Value *> residents; // needed to recalculate occup
    315       Symbol *sym;
    316       int32_t offset;
    317       inline uint8_t size() const { return sym->reg.size; }
    318    };
    319    std::list<SpillSlot> slots;
    320    int32_t stackSize;
    321    int32_t stackBase;
    322 
    323    LValue *unspill(Instruction *usei, LValue *, Value *slot);
    324    void spill(Instruction *defi, Value *slot, LValue *);
    325 };
    326 
    327 void
    328 RegAlloc::BuildIntervalsPass::addLiveRange(Value *val,
    329                                            const BasicBlock *bb,
    330                                            int end)
    331 {
    332    Instruction *insn = val->getUniqueInsn();
    333 
    334    if (!insn)
    335       insn = bb->getFirst();
    336 
    337    assert(bb->getFirst()->serial <= bb->getExit()->serial);
    338    assert(bb->getExit()->serial + 1 >= end);
    339 
    340    int begin = insn->serial;
    341    if (begin < bb->getEntry()->serial || begin > bb->getExit()->serial)
    342       begin = bb->getEntry()->serial;
    343 
    344    INFO_DBG(prog->dbgFlags, REG_ALLOC, "%%%i <- live range [%i(%i), %i)\n",
    345             val->id, begin, insn->serial, end);
    346 
    347    if (begin != end) // empty ranges are only added as hazards for fixed regs
    348       val->livei.extend(begin, end);
    349 }
    350 
    351 bool
    352 RegAlloc::PhiMovesPass::needNewElseBlock(BasicBlock *b, BasicBlock *p)
    353 {
    354    if (b->cfg.incidentCount() <= 1)
    355       return false;
    356 
    357    int n = 0;
    358    for (Graph::EdgeIterator ei = p->cfg.outgoing(); !ei.end(); ei.next())
    359       if (ei.getType() == Graph::Edge::TREE ||
    360           ei.getType() == Graph::Edge::FORWARD)
    361          ++n;
    362    return (n == 2);
    363 }
    364 
    365 struct PhiMapHash {
    366    size_t operator()(const std::pair<Instruction *, BasicBlock *>& val) const {
    367       return hash<Instruction*>()(val.first) * 31 +
    368          hash<BasicBlock*>()(val.second);
    369    }
    370 };
    371 
    372 typedef unordered_map<
    373    std::pair<Instruction *, BasicBlock *>, Value *, PhiMapHash> PhiMap;
    374 
    375 // Critical edges need to be split up so that work can be inserted along
    376 // specific edge transitions. Unfortunately manipulating incident edges into a
    377 // BB invalidates all the PHI nodes since their sources are implicitly ordered
    378 // by incident edge order.
    379 //
    380 // TODO: Make it so that that is not the case, and PHI nodes store pointers to
    381 // the original BBs.
    382 void
    383 RegAlloc::PhiMovesPass::splitEdges(BasicBlock *bb)
    384 {
    385    BasicBlock *pb, *pn;
    386    Instruction *phi;
    387    Graph::EdgeIterator ei;
    388    std::stack<BasicBlock *> stack;
    389    int j = 0;
    390 
    391    for (ei = bb->cfg.incident(); !ei.end(); ei.next()) {
    392       pb = BasicBlock::get(ei.getNode());
    393       assert(pb);
    394       if (needNewElseBlock(bb, pb))
    395          stack.push(pb);
    396    }
    397 
    398    // No critical edges were found, no need to perform any work.
    399    if (stack.empty())
    400       return;
    401 
    402    // We're about to, potentially, reorder the inbound edges. This means that
    403    // we need to hold on to the (phi, bb) -> src mapping, and fix up the phi
    404    // nodes after the graph has been modified.
    405    PhiMap phis;
    406 
    407    j = 0;
    408    for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
    409       pb = BasicBlock::get(ei.getNode());
    410       for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next)
    411          phis.insert(std::make_pair(std::make_pair(phi, pb), phi->getSrc(j)));
    412    }
    413 
    414    while (!stack.empty()) {
    415       pb = stack.top();
    416       pn = new BasicBlock(func);
    417       stack.pop();
    418 
    419       pb->cfg.detach(&bb->cfg);
    420       pb->cfg.attach(&pn->cfg, Graph::Edge::TREE);
    421       pn->cfg.attach(&bb->cfg, Graph::Edge::FORWARD);
    422 
    423       assert(pb->getExit()->op != OP_CALL);
    424       if (pb->getExit()->asFlow()->target.bb == bb)
    425          pb->getExit()->asFlow()->target.bb = pn;
    426 
    427       for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
    428          PhiMap::iterator it = phis.find(std::make_pair(phi, pb));
    429          assert(it != phis.end());
    430          phis.insert(std::make_pair(std::make_pair(phi, pn), it->second));
    431          phis.erase(it);
    432       }
    433    }
    434 
    435    // Now go through and fix up all of the phi node sources.
    436    j = 0;
    437    for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
    438       pb = BasicBlock::get(ei.getNode());
    439       for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
    440          PhiMap::const_iterator it = phis.find(std::make_pair(phi, pb));
    441          assert(it != phis.end());
    442 
    443          phi->setSrc(j, it->second);
    444       }
    445    }
    446 }
    447 
    448 // For each operand of each PHI in b, generate a new value by inserting a MOV
    449 // at the end of the block it is coming from and replace the operand with its
    450 // result. This eliminates liveness conflicts and enables us to let values be
    451 // copied to the right register if such a conflict exists nonetheless.
    452 //
    453 // These MOVs are also crucial in making sure the live intervals of phi srces
    454 // are extended until the end of the loop, since they are not included in the
    455 // live-in sets.
    456 bool
    457 RegAlloc::PhiMovesPass::visit(BasicBlock *bb)
    458 {
    459    Instruction *phi, *mov;
    460 
    461    splitEdges(bb);
    462 
    463    // insert MOVs (phi->src(j) should stem from j-th in-BB)
    464    int j = 0;
    465    for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
    466       BasicBlock *pb = BasicBlock::get(ei.getNode());
    467       if (!pb->isTerminated())
    468          pb->insertTail(new_FlowInstruction(func, OP_BRA, bb));
    469 
    470       for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
    471          LValue *tmp = new_LValue(func, phi->getDef(0)->asLValue());
    472          mov = new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
    473 
    474          mov->setSrc(0, phi->getSrc(j));
    475          mov->setDef(0, tmp);
    476          phi->setSrc(j, tmp);
    477 
    478          pb->insertBefore(pb->getExit(), mov);
    479       }
    480       ++j;
    481    }
    482 
    483    return true;
    484 }
    485 
    486 bool
    487 RegAlloc::ArgumentMovesPass::visit(BasicBlock *bb)
    488 {
    489    // Bind function call inputs/outputs to the same physical register
    490    // the callee uses, inserting moves as appropriate for the case a
    491    // conflict arises.
    492    for (Instruction *i = bb->getEntry(); i; i = i->next) {
    493       FlowInstruction *cal = i->asFlow();
    494       // TODO: Handle indirect calls.
    495       // Right now they should only be generated for builtins.
    496       if (!cal || cal->op != OP_CALL || cal->builtin || cal->indirect)
    497          continue;
    498       RegisterSet clobberSet(prog->getTarget());
    499 
    500       // Bind input values.
    501       for (int s = cal->indirect ? 1 : 0; cal->srcExists(s); ++s) {
    502          const int t = cal->indirect ? (s - 1) : s;
    503          LValue *tmp = new_LValue(func, cal->getSrc(s)->asLValue());
    504          tmp->reg.data.id = cal->target.fn->ins[t].rep()->reg.data.id;
    505 
    506          Instruction *mov =
    507             new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
    508          mov->setDef(0, tmp);
    509          mov->setSrc(0, cal->getSrc(s));
    510          cal->setSrc(s, tmp);
    511 
    512          bb->insertBefore(cal, mov);
    513       }
    514 
    515       // Bind output values.
    516       for (int d = 0; cal->defExists(d); ++d) {
    517          LValue *tmp = new_LValue(func, cal->getDef(d)->asLValue());
    518          tmp->reg.data.id = cal->target.fn->outs[d].rep()->reg.data.id;
    519 
    520          Instruction *mov =
    521             new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
    522          mov->setSrc(0, tmp);
    523          mov->setDef(0, cal->getDef(d));
    524          cal->setDef(d, tmp);
    525 
    526          bb->insertAfter(cal, mov);
    527          clobberSet.occupy(tmp);
    528       }
    529 
    530       // Bind clobbered values.
    531       for (std::deque<Value *>::iterator it = cal->target.fn->clobbers.begin();
    532            it != cal->target.fn->clobbers.end();
    533            ++it) {
    534          if (clobberSet.testOccupy(*it)) {
    535             Value *tmp = new_LValue(func, (*it)->asLValue());
    536             tmp->reg.data.id = (*it)->reg.data.id;
    537             cal->setDef(cal->defCount(), tmp);
    538          }
    539       }
    540    }
    541 
    542    // Update the clobber set of the function.
    543    if (BasicBlock::get(func->cfgExit) == bb) {
    544       func->buildDefSets();
    545       for (unsigned int i = 0; i < bb->defSet.getSize(); ++i)
    546          if (bb->defSet.test(i))
    547             func->clobbers.push_back(func->getLValue(i));
    548    }
    549 
    550    return true;
    551 }
    552 
    553 // Build the set of live-in variables of bb.
    554 bool
    555 RegAlloc::buildLiveSets(BasicBlock *bb)
    556 {
    557    Function *f = bb->getFunction();
    558    BasicBlock *bn;
    559    Instruction *i;
    560    unsigned int s, d;
    561 
    562    INFO_DBG(prog->dbgFlags, REG_ALLOC, "buildLiveSets(BB:%i)\n", bb->getId());
    563 
    564    bb->liveSet.allocate(func->allLValues.getSize(), false);
    565 
    566    int n = 0;
    567    for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
    568       bn = BasicBlock::get(ei.getNode());
    569       if (bn == bb)
    570          continue;
    571       if (bn->cfg.visit(sequence))
    572          if (!buildLiveSets(bn))
    573             return false;
    574       if (n++ || bb->liveSet.marker)
    575          bb->liveSet |= bn->liveSet;
    576       else
    577          bb->liveSet = bn->liveSet;
    578    }
    579    if (!n && !bb->liveSet.marker)
    580       bb->liveSet.fill(0);
    581    bb->liveSet.marker = true;
    582 
    583    if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
    584       INFO("BB:%i live set of out blocks:\n", bb->getId());
    585       bb->liveSet.print();
    586    }
    587 
    588    // if (!bb->getEntry())
    589    //   return true;
    590 
    591    if (bb == BasicBlock::get(f->cfgExit)) {
    592       for (std::deque<ValueRef>::iterator it = f->outs.begin();
    593            it != f->outs.end(); ++it) {
    594          assert(it->get()->asLValue());
    595          bb->liveSet.set(it->get()->id);
    596       }
    597    }
    598 
    599    for (i = bb->getExit(); i && i != bb->getEntry()->prev; i = i->prev) {
    600       for (d = 0; i->defExists(d); ++d)
    601          bb->liveSet.clr(i->getDef(d)->id);
    602       for (s = 0; i->srcExists(s); ++s)
    603          if (i->getSrc(s)->asLValue())
    604             bb->liveSet.set(i->getSrc(s)->id);
    605    }
    606    for (i = bb->getPhi(); i && i->op == OP_PHI; i = i->next)
    607       bb->liveSet.clr(i->getDef(0)->id);
    608 
    609    if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
    610       INFO("BB:%i live set after propagation:\n", bb->getId());
    611       bb->liveSet.print();
    612    }
    613 
    614    return true;
    615 }
    616 
    617 void
    618 RegAlloc::BuildIntervalsPass::collectLiveValues(BasicBlock *bb)
    619 {
    620    BasicBlock *bbA = NULL, *bbB = NULL;
    621 
    622    if (bb->cfg.outgoingCount()) {
    623       // trickery to save a loop of OR'ing liveSets
    624       // aliasing works fine with BitSet::setOr
    625       for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
    626          if (ei.getType() == Graph::Edge::DUMMY)
    627             continue;
    628          if (bbA) {
    629             bb->liveSet.setOr(&bbA->liveSet, &bbB->liveSet);
    630             bbA = bb;
    631          } else {
    632             bbA = bbB;
    633          }
    634          bbB = BasicBlock::get(ei.getNode());
    635       }
    636       bb->liveSet.setOr(&bbB->liveSet, bbA ? &bbA->liveSet : NULL);
    637    } else
    638    if (bb->cfg.incidentCount()) {
    639       bb->liveSet.fill(0);
    640    }
    641 }
    642 
    643 bool
    644 RegAlloc::BuildIntervalsPass::visit(BasicBlock *bb)
    645 {
    646    collectLiveValues(bb);
    647 
    648    INFO_DBG(prog->dbgFlags, REG_ALLOC, "BuildIntervals(BB:%i)\n", bb->getId());
    649 
    650    // go through out blocks and delete phi sources that do not originate from
    651    // the current block from the live set
    652    for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
    653       BasicBlock *out = BasicBlock::get(ei.getNode());
    654 
    655       for (Instruction *i = out->getPhi(); i && i->op == OP_PHI; i = i->next) {
    656          bb->liveSet.clr(i->getDef(0)->id);
    657 
    658          for (int s = 0; i->srcExists(s); ++s) {
    659             assert(i->src(s).getInsn());
    660             if (i->getSrc(s)->getUniqueInsn()->bb == bb) // XXX: reachableBy ?
    661                bb->liveSet.set(i->getSrc(s)->id);
    662             else
    663                bb->liveSet.clr(i->getSrc(s)->id);
    664          }
    665       }
    666    }
    667 
    668    // remaining live-outs are live until end
    669    if (bb->getExit()) {
    670       for (unsigned int j = 0; j < bb->liveSet.getSize(); ++j)
    671          if (bb->liveSet.test(j))
    672             addLiveRange(func->getLValue(j), bb, bb->getExit()->serial + 1);
    673    }
    674 
    675    for (Instruction *i = bb->getExit(); i && i->op != OP_PHI; i = i->prev) {
    676       for (int d = 0; i->defExists(d); ++d) {
    677          bb->liveSet.clr(i->getDef(d)->id);
    678          if (i->getDef(d)->reg.data.id >= 0) // add hazard for fixed regs
    679             i->getDef(d)->livei.extend(i->serial, i->serial);
    680       }
    681 
    682       for (int s = 0; i->srcExists(s); ++s) {
    683          if (!i->getSrc(s)->asLValue())
    684             continue;
    685          if (!bb->liveSet.test(i->getSrc(s)->id)) {
    686             bb->liveSet.set(i->getSrc(s)->id);
    687             addLiveRange(i->getSrc(s), bb, i->serial);
    688          }
    689       }
    690    }
    691 
    692    if (bb == BasicBlock::get(func->cfg.getRoot())) {
    693       for (std::deque<ValueDef>::iterator it = func->ins.begin();
    694            it != func->ins.end(); ++it) {
    695          if (it->get()->reg.data.id >= 0) // add hazard for fixed regs
    696             it->get()->livei.extend(0, 1);
    697       }
    698    }
    699 
    700    return true;
    701 }
    702 
    703 
    704 #define JOIN_MASK_PHI        (1 << 0)
    705 #define JOIN_MASK_UNION      (1 << 1)
    706 #define JOIN_MASK_MOV        (1 << 2)
    707 #define JOIN_MASK_TEX        (1 << 3)
    708 
    709 class GCRA
    710 {
    711 public:
    712    GCRA(Function *, SpillCodeInserter&);
    713    ~GCRA();
    714 
    715    bool allocateRegisters(ArrayList& insns);
    716 
    717    void printNodeInfo() const;
    718 
    719 private:
    720    class RIG_Node : public Graph::Node
    721    {
    722    public:
    723       RIG_Node();
    724 
    725       void init(const RegisterSet&, LValue *);
    726 
    727       void addInterference(RIG_Node *);
    728       void addRegPreference(RIG_Node *);
    729 
    730       inline LValue *getValue() const
    731       {
    732          return reinterpret_cast<LValue *>(data);
    733       }
    734       inline void setValue(LValue *lval) { data = lval; }
    735 
    736       inline uint8_t getCompMask() const
    737       {
    738          return ((1 << colors) - 1) << (reg & 7);
    739       }
    740 
    741       static inline RIG_Node *get(const Graph::EdgeIterator& ei)
    742       {
    743          return static_cast<RIG_Node *>(ei.getNode());
    744       }
    745 
    746    public:
    747       uint32_t degree;
    748       uint16_t degreeLimit; // if deg < degLimit, node is trivially colourable
    749       uint16_t colors;
    750 
    751       DataFile f;
    752       int32_t reg;
    753 
    754       float weight;
    755 
    756       // list pointers for simplify() phase
    757       RIG_Node *next;
    758       RIG_Node *prev;
    759 
    760       // union of the live intervals of all coalesced values (we want to retain
    761       //  the separate intervals for testing interference of compound values)
    762       Interval livei;
    763 
    764       std::list<RIG_Node *> prefRegs;
    765    };
    766 
    767 private:
    768    inline RIG_Node *getNode(const LValue *v) const { return &nodes[v->id]; }
    769 
    770    void buildRIG(ArrayList&);
    771    bool coalesce(ArrayList&);
    772    bool doCoalesce(ArrayList&, unsigned int mask);
    773    void calculateSpillWeights();
    774    bool simplify();
    775    bool selectRegisters();
    776    void cleanup(const bool success);
    777 
    778    void simplifyEdge(RIG_Node *, RIG_Node *);
    779    void simplifyNode(RIG_Node *);
    780 
    781    bool coalesceValues(Value *, Value *, bool force);
    782    void resolveSplitsAndMerges();
    783    void makeCompound(Instruction *, bool isSplit);
    784 
    785    inline void checkInterference(const RIG_Node *, Graph::EdgeIterator&);
    786 
    787    inline void insertOrderedTail(std::list<RIG_Node *>&, RIG_Node *);
    788    void checkList(std::list<RIG_Node *>&);
    789 
    790 private:
    791    std::stack<uint32_t> stack;
    792 
    793    // list headers for simplify() phase
    794    RIG_Node lo[2];
    795    RIG_Node hi;
    796 
    797    Graph RIG;
    798    RIG_Node *nodes;
    799    unsigned int nodeCount;
    800 
    801    Function *func;
    802    Program *prog;
    803 
    804    static uint8_t relDegree[17][17];
    805 
    806    RegisterSet regs;
    807 
    808    // need to fixup register id for participants of OP_MERGE/SPLIT
    809    std::list<Instruction *> merges;
    810    std::list<Instruction *> splits;
    811 
    812    SpillCodeInserter& spill;
    813    std::list<ValuePair> mustSpill;
    814 };
    815 
    816 uint8_t GCRA::relDegree[17][17];
    817 
    818 GCRA::RIG_Node::RIG_Node() : Node(NULL), next(this), prev(this)
    819 {
    820    colors = 0;
    821 }
    822 
    823 void
    824 GCRA::printNodeInfo() const
    825 {
    826    for (unsigned int i = 0; i < nodeCount; ++i) {
    827       if (!nodes[i].colors)
    828          continue;
    829       INFO("RIG_Node[%%%i]($[%u]%i): %u colors, weight %f, deg %u/%u\n X",
    830            i,
    831            nodes[i].f,nodes[i].reg,nodes[i].colors,
    832            nodes[i].weight,
    833            nodes[i].degree, nodes[i].degreeLimit);
    834 
    835       for (Graph::EdgeIterator ei = nodes[i].outgoing(); !ei.end(); ei.next())
    836          INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
    837       for (Graph::EdgeIterator ei = nodes[i].incident(); !ei.end(); ei.next())
    838          INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
    839       INFO("\n");
    840    }
    841 }
    842 
    843 static bool
    844 isShortRegOp(Instruction *insn)
    845 {
    846    // Immediates are always in src1. Every other situation can be resolved by
    847    // using a long encoding.
    848    return insn->srcExists(1) && insn->src(1).getFile() == FILE_IMMEDIATE;
    849 }
    850 
    851 // Check if this LValue is ever used in an instruction that can't be encoded
    852 // with long registers (i.e. > r63)
    853 static bool
    854 isShortRegVal(LValue *lval)
    855 {
    856    if (lval->getInsn() == NULL)
    857       return false;
    858    for (Value::DefCIterator def = lval->defs.begin();
    859         def != lval->defs.end(); ++def)
    860       if (isShortRegOp((*def)->getInsn()))
    861          return true;
    862    for (Value::UseCIterator use = lval->uses.begin();
    863         use != lval->uses.end(); ++use)
    864       if (isShortRegOp((*use)->getInsn()))
    865          return true;
    866    return false;
    867 }
    868 
    869 void
    870 GCRA::RIG_Node::init(const RegisterSet& regs, LValue *lval)
    871 {
    872    setValue(lval);
    873    if (lval->reg.data.id >= 0)
    874       lval->noSpill = lval->fixedReg = 1;
    875 
    876    colors = regs.units(lval->reg.file, lval->reg.size);
    877    f = lval->reg.file;
    878    reg = -1;
    879    if (lval->reg.data.id >= 0)
    880       reg = regs.idToUnits(lval);
    881 
    882    weight = std::numeric_limits<float>::infinity();
    883    degree = 0;
    884    int size = regs.getFileSize(f, lval->reg.size);
    885    // On nv50, we lose a bit of gpr encoding when there's an embedded
    886    // immediate.
    887    if (regs.restrictedGPR16Range && f == FILE_GPR && isShortRegVal(lval))
    888       size /= 2;
    889    degreeLimit = size;
    890    degreeLimit -= relDegree[1][colors] - 1;
    891 
    892    livei.insert(lval->livei);
    893 }
    894 
    895 bool
    896 GCRA::coalesceValues(Value *dst, Value *src, bool force)
    897 {
    898    LValue *rep = dst->join->asLValue();
    899    LValue *val = src->join->asLValue();
    900 
    901    if (!force && val->reg.data.id >= 0) {
    902       rep = src->join->asLValue();
    903       val = dst->join->asLValue();
    904    }
    905    RIG_Node *nRep = &nodes[rep->id];
    906    RIG_Node *nVal = &nodes[val->id];
    907 
    908    if (src->reg.file != dst->reg.file) {
    909       if (!force)
    910          return false;
    911       WARN("forced coalescing of values in different files !\n");
    912    }
    913    if (!force && dst->reg.size != src->reg.size)
    914       return false;
    915 
    916    if ((rep->reg.data.id >= 0) && (rep->reg.data.id != val->reg.data.id)) {
    917       if (force) {
    918          if (val->reg.data.id >= 0)
    919             WARN("forced coalescing of values in different fixed regs !\n");
    920       } else {
    921          if (val->reg.data.id >= 0)
    922             return false;
    923          // make sure that there is no overlap with the fixed register of rep
    924          for (ArrayList::Iterator it = func->allLValues.iterator();
    925               !it.end(); it.next()) {
    926             Value *reg = reinterpret_cast<Value *>(it.get())->asLValue();
    927             assert(reg);
    928             if (reg->interfers(rep) && reg->livei.overlaps(nVal->livei))
    929                return false;
    930          }
    931       }
    932    }
    933 
    934    if (!force && nRep->livei.overlaps(nVal->livei))
    935       return false;
    936 
    937    INFO_DBG(prog->dbgFlags, REG_ALLOC, "joining %%%i($%i) <- %%%i\n",
    938             rep->id, rep->reg.data.id, val->id);
    939 
    940    // set join pointer of all values joined with val
    941    for (Value::DefIterator def = val->defs.begin(); def != val->defs.end();
    942         ++def)
    943       (*def)->get()->join = rep;
    944    assert(rep->join == rep && val->join == rep);
    945 
    946    // add val's definitions to rep and extend the live interval of its RIG node
    947    rep->defs.insert(rep->defs.end(), val->defs.begin(), val->defs.end());
    948    nRep->livei.unify(nVal->livei);
    949    return true;
    950 }
    951 
    952 bool
    953 GCRA::coalesce(ArrayList& insns)
    954 {
    955    bool ret = doCoalesce(insns, JOIN_MASK_PHI);
    956    if (!ret)
    957       return false;
    958    switch (func->getProgram()->getTarget()->getChipset() & ~0xf) {
    959    case 0x50:
    960    case 0x80:
    961    case 0x90:
    962    case 0xa0:
    963       ret = doCoalesce(insns, JOIN_MASK_UNION | JOIN_MASK_TEX);
    964       break;
    965    case 0xc0:
    966    case 0xd0:
    967    case 0xe0:
    968    case 0xf0:
    969    case 0x100:
    970    case 0x110:
    971    case 0x120:
    972    case 0x130:
    973       ret = doCoalesce(insns, JOIN_MASK_UNION);
    974       break;
    975    default:
    976       break;
    977    }
    978    if (!ret)
    979       return false;
    980    return doCoalesce(insns, JOIN_MASK_MOV);
    981 }
    982 
    983 static inline uint8_t makeCompMask(int compSize, int base, int size)
    984 {
    985    uint8_t m = ((1 << size) - 1) << base;
    986 
    987    switch (compSize) {
    988    case 1:
    989       return 0xff;
    990    case 2:
    991       m |= (m << 2);
    992       return (m << 4) | m;
    993    case 3:
    994    case 4:
    995       return (m << 4) | m;
    996    default:
    997       assert(compSize <= 8);
    998       return m;
    999    }
   1000 }
   1001 
   1002 // Used when coalescing moves. The non-compound value will become one, e.g.:
   1003 // mov b32 $r0 $r2            / merge b64 $r0d { $r0 $r1 }
   1004 // split b64 { $r0 $r1 } $r0d / mov b64 $r0d f64 $r2d
   1005 static inline void copyCompound(Value *dst, Value *src)
   1006 {
   1007    LValue *ldst = dst->asLValue();
   1008    LValue *lsrc = src->asLValue();
   1009 
   1010    if (ldst->compound && !lsrc->compound) {
   1011       LValue *swap = lsrc;
   1012       lsrc = ldst;
   1013       ldst = swap;
   1014    }
   1015 
   1016    ldst->compound = lsrc->compound;
   1017    ldst->compMask = lsrc->compMask;
   1018 }
   1019 
   1020 void
   1021 GCRA::makeCompound(Instruction *insn, bool split)
   1022 {
   1023    LValue *rep = (split ? insn->getSrc(0) : insn->getDef(0))->asLValue();
   1024 
   1025    if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
   1026       INFO("makeCompound(split = %i): ", split);
   1027       insn->print();
   1028    }
   1029 
   1030    const unsigned int size = getNode(rep)->colors;
   1031    unsigned int base = 0;
   1032 
   1033    if (!rep->compound)
   1034       rep->compMask = 0xff;
   1035    rep->compound = 1;
   1036 
   1037    for (int c = 0; split ? insn->defExists(c) : insn->srcExists(c); ++c) {
   1038       LValue *val = (split ? insn->getDef(c) : insn->getSrc(c))->asLValue();
   1039 
   1040       val->compound = 1;
   1041       if (!val->compMask)
   1042          val->compMask = 0xff;
   1043       val->compMask &= makeCompMask(size, base, getNode(val)->colors);
   1044       assert(val->compMask);
   1045 
   1046       INFO_DBG(prog->dbgFlags, REG_ALLOC, "compound: %%%i:%02x <- %%%i:%02x\n",
   1047            rep->id, rep->compMask, val->id, val->compMask);
   1048 
   1049       base += getNode(val)->colors;
   1050    }
   1051    assert(base == size);
   1052 }
   1053 
   1054 bool
   1055 GCRA::doCoalesce(ArrayList& insns, unsigned int mask)
   1056 {
   1057    int c, n;
   1058 
   1059    for (n = 0; n < insns.getSize(); ++n) {
   1060       Instruction *i;
   1061       Instruction *insn = reinterpret_cast<Instruction *>(insns.get(n));
   1062 
   1063       switch (insn->op) {
   1064       case OP_PHI:
   1065          if (!(mask & JOIN_MASK_PHI))
   1066             break;
   1067          for (c = 0; insn->srcExists(c); ++c)
   1068             if (!coalesceValues(insn->getDef(0), insn->getSrc(c), false)) {
   1069                // this is bad
   1070                ERROR("failed to coalesce phi operands\n");
   1071                return false;
   1072             }
   1073          break;
   1074       case OP_UNION:
   1075       case OP_MERGE:
   1076          if (!(mask & JOIN_MASK_UNION))
   1077             break;
   1078          for (c = 0; insn->srcExists(c); ++c)
   1079             coalesceValues(insn->getDef(0), insn->getSrc(c), true);
   1080          if (insn->op == OP_MERGE) {
   1081             merges.push_back(insn);
   1082             if (insn->srcExists(1))
   1083                makeCompound(insn, false);
   1084          }
   1085          break;
   1086       case OP_SPLIT:
   1087          if (!(mask & JOIN_MASK_UNION))
   1088             break;
   1089          splits.push_back(insn);
   1090          for (c = 0; insn->defExists(c); ++c)
   1091             coalesceValues(insn->getSrc(0), insn->getDef(c), true);
   1092          makeCompound(insn, true);
   1093          break;
   1094       case OP_MOV:
   1095          if (!(mask & JOIN_MASK_MOV))
   1096             break;
   1097          i = NULL;
   1098          if (!insn->getDef(0)->uses.empty())
   1099             i = (*insn->getDef(0)->uses.begin())->getInsn();
   1100          // if this is a contraint-move there will only be a single use
   1101          if (i && i->op == OP_MERGE) // do we really still need this ?
   1102             break;
   1103          i = insn->getSrc(0)->getUniqueInsn();
   1104          if (i && !i->constrainedDefs()) {
   1105             if (coalesceValues(insn->getDef(0), insn->getSrc(0), false))
   1106                copyCompound(insn->getSrc(0), insn->getDef(0));
   1107          }
   1108          break;
   1109       case OP_TEX:
   1110       case OP_TXB:
   1111       case OP_TXL:
   1112       case OP_TXF:
   1113       case OP_TXQ:
   1114       case OP_TXD:
   1115       case OP_TXG:
   1116       case OP_TXLQ:
   1117       case OP_TEXCSAA:
   1118       case OP_TEXPREP:
   1119          if (!(mask & JOIN_MASK_TEX))
   1120             break;
   1121          for (c = 0; insn->srcExists(c) && c != insn->predSrc; ++c)
   1122             coalesceValues(insn->getDef(c), insn->getSrc(c), true);
   1123          break;
   1124       default:
   1125          break;
   1126       }
   1127    }
   1128    return true;
   1129 }
   1130 
   1131 void
   1132 GCRA::RIG_Node::addInterference(RIG_Node *node)
   1133 {
   1134    this->degree += relDegree[node->colors][colors];
   1135    node->degree += relDegree[colors][node->colors];
   1136 
   1137    this->attach(node, Graph::Edge::CROSS);
   1138 }
   1139 
   1140 void
   1141 GCRA::RIG_Node::addRegPreference(RIG_Node *node)
   1142 {
   1143    prefRegs.push_back(node);
   1144 }
   1145 
   1146 GCRA::GCRA(Function *fn, SpillCodeInserter& spill) :
   1147    func(fn),
   1148    regs(fn->getProgram()->getTarget()),
   1149    spill(spill)
   1150 {
   1151    prog = func->getProgram();
   1152 
   1153    // initialize relative degrees array - i takes away from j
   1154    for (int i = 1; i <= 16; ++i)
   1155       for (int j = 1; j <= 16; ++j)
   1156          relDegree[i][j] = j * ((i + j - 1) / j);
   1157 }
   1158 
   1159 GCRA::~GCRA()
   1160 {
   1161    if (nodes)
   1162       delete[] nodes;
   1163 }
   1164 
   1165 void
   1166 GCRA::checkList(std::list<RIG_Node *>& lst)
   1167 {
   1168    GCRA::RIG_Node *prev = NULL;
   1169 
   1170    for (std::list<RIG_Node *>::iterator it = lst.begin();
   1171         it != lst.end();
   1172         ++it) {
   1173       assert((*it)->getValue()->join == (*it)->getValue());
   1174       if (prev)
   1175          assert(prev->livei.begin() <= (*it)->livei.begin());
   1176       prev = *it;
   1177    }
   1178 }
   1179 
   1180 void
   1181 GCRA::insertOrderedTail(std::list<RIG_Node *>& list, RIG_Node *node)
   1182 {
   1183    if (node->livei.isEmpty())
   1184       return;
   1185    // only the intervals of joined values don't necessarily arrive in order
   1186    std::list<RIG_Node *>::iterator prev, it;
   1187    for (it = list.end(); it != list.begin(); it = prev) {
   1188       prev = it;
   1189       --prev;
   1190       if ((*prev)->livei.begin() <= node->livei.begin())
   1191          break;
   1192    }
   1193    list.insert(it, node);
   1194 }
   1195 
   1196 void
   1197 GCRA::buildRIG(ArrayList& insns)
   1198 {
   1199    std::list<RIG_Node *> values, active;
   1200 
   1201    for (std::deque<ValueDef>::iterator it = func->ins.begin();
   1202         it != func->ins.end(); ++it)
   1203       insertOrderedTail(values, getNode(it->get()->asLValue()));
   1204 
   1205    for (int i = 0; i < insns.getSize(); ++i) {
   1206       Instruction *insn = reinterpret_cast<Instruction *>(insns.get(i));
   1207       for (int d = 0; insn->defExists(d); ++d)
   1208          if (insn->getDef(d)->rep() == insn->getDef(d))
   1209             insertOrderedTail(values, getNode(insn->getDef(d)->asLValue()));
   1210    }
   1211    checkList(values);
   1212 
   1213    while (!values.empty()) {
   1214       RIG_Node *cur = values.front();
   1215 
   1216       for (std::list<RIG_Node *>::iterator it = active.begin();
   1217            it != active.end();) {
   1218          RIG_Node *node = *it;
   1219 
   1220          if (node->livei.end() <= cur->livei.begin()) {
   1221             it = active.erase(it);
   1222          } else {
   1223             if (node->f == cur->f && node->livei.overlaps(cur->livei))
   1224                cur->addInterference(node);
   1225             ++it;
   1226          }
   1227       }
   1228       values.pop_front();
   1229       active.push_back(cur);
   1230    }
   1231 }
   1232 
   1233 void
   1234 GCRA::calculateSpillWeights()
   1235 {
   1236    for (unsigned int i = 0; i < nodeCount; ++i) {
   1237       RIG_Node *const n = &nodes[i];
   1238       if (!nodes[i].colors || nodes[i].livei.isEmpty())
   1239          continue;
   1240       if (nodes[i].reg >= 0) {
   1241          // update max reg
   1242          regs.occupy(n->f, n->reg, n->colors);
   1243          continue;
   1244       }
   1245       LValue *val = nodes[i].getValue();
   1246 
   1247       if (!val->noSpill) {
   1248          int rc = 0;
   1249          for (Value::DefIterator it = val->defs.begin();
   1250               it != val->defs.end();
   1251               ++it)
   1252             rc += (*it)->get()->refCount();
   1253 
   1254          nodes[i].weight =
   1255             (float)rc * (float)rc / (float)nodes[i].livei.extent();
   1256       }
   1257 
   1258       if (nodes[i].degree < nodes[i].degreeLimit) {
   1259          int l = 0;
   1260          if (val->reg.size > 4)
   1261             l = 1;
   1262          DLLIST_ADDHEAD(&lo[l], &nodes[i]);
   1263       } else {
   1264          DLLIST_ADDHEAD(&hi, &nodes[i]);
   1265       }
   1266    }
   1267    if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
   1268       printNodeInfo();
   1269 }
   1270 
   1271 void
   1272 GCRA::simplifyEdge(RIG_Node *a, RIG_Node *b)
   1273 {
   1274    bool move = b->degree >= b->degreeLimit;
   1275 
   1276    INFO_DBG(prog->dbgFlags, REG_ALLOC,
   1277             "edge: (%%%i, deg %u/%u) >-< (%%%i, deg %u/%u)\n",
   1278             a->getValue()->id, a->degree, a->degreeLimit,
   1279             b->getValue()->id, b->degree, b->degreeLimit);
   1280 
   1281    b->degree -= relDegree[a->colors][b->colors];
   1282 
   1283    move = move && b->degree < b->degreeLimit;
   1284    if (move && !DLLIST_EMPTY(b)) {
   1285       int l = (b->getValue()->reg.size > 4) ? 1 : 0;
   1286       DLLIST_DEL(b);
   1287       DLLIST_ADDTAIL(&lo[l], b);
   1288    }
   1289 }
   1290 
   1291 void
   1292 GCRA::simplifyNode(RIG_Node *node)
   1293 {
   1294    for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
   1295       simplifyEdge(node, RIG_Node::get(ei));
   1296 
   1297    for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
   1298       simplifyEdge(node, RIG_Node::get(ei));
   1299 
   1300    DLLIST_DEL(node);
   1301    stack.push(node->getValue()->id);
   1302 
   1303    INFO_DBG(prog->dbgFlags, REG_ALLOC, "SIMPLIFY: pushed %%%i%s\n",
   1304             node->getValue()->id,
   1305             (node->degree < node->degreeLimit) ? "" : "(spill)");
   1306 }
   1307 
   1308 bool
   1309 GCRA::simplify()
   1310 {
   1311    for (;;) {
   1312       if (!DLLIST_EMPTY(&lo[0])) {
   1313          do {
   1314             simplifyNode(lo[0].next);
   1315          } while (!DLLIST_EMPTY(&lo[0]));
   1316       } else
   1317       if (!DLLIST_EMPTY(&lo[1])) {
   1318          simplifyNode(lo[1].next);
   1319       } else
   1320       if (!DLLIST_EMPTY(&hi)) {
   1321          RIG_Node *best = hi.next;
   1322          float bestScore = best->weight / (float)best->degree;
   1323          // spill candidate
   1324          for (RIG_Node *it = best->next; it != &hi; it = it->next) {
   1325             float score = it->weight / (float)it->degree;
   1326             if (score < bestScore) {
   1327                best = it;
   1328                bestScore = score;
   1329             }
   1330          }
   1331          if (isinf(bestScore)) {
   1332             ERROR("no viable spill candidates left\n");
   1333             return false;
   1334          }
   1335          simplifyNode(best);
   1336       } else {
   1337          return true;
   1338       }
   1339    }
   1340 }
   1341 
   1342 void
   1343 GCRA::checkInterference(const RIG_Node *node, Graph::EdgeIterator& ei)
   1344 {
   1345    const RIG_Node *intf = RIG_Node::get(ei);
   1346 
   1347    if (intf->reg < 0)
   1348       return;
   1349    const LValue *vA = node->getValue();
   1350    const LValue *vB = intf->getValue();
   1351 
   1352    const uint8_t intfMask = ((1 << intf->colors) - 1) << (intf->reg & 7);
   1353 
   1354    if (vA->compound | vB->compound) {
   1355       // NOTE: this only works for >aligned< register tuples !
   1356       for (Value::DefCIterator D = vA->defs.begin(); D != vA->defs.end(); ++D) {
   1357       for (Value::DefCIterator d = vB->defs.begin(); d != vB->defs.end(); ++d) {
   1358          const LValue *vD = (*D)->get()->asLValue();
   1359          const LValue *vd = (*d)->get()->asLValue();
   1360 
   1361          if (!vD->livei.overlaps(vd->livei)) {
   1362             INFO_DBG(prog->dbgFlags, REG_ALLOC, "(%%%i) X (%%%i): no overlap\n",
   1363                      vD->id, vd->id);
   1364             continue;
   1365          }
   1366 
   1367          uint8_t mask = vD->compound ? vD->compMask : ~0;
   1368          if (vd->compound) {
   1369             assert(vB->compound);
   1370             mask &= vd->compMask & vB->compMask;
   1371          } else {
   1372             mask &= intfMask;
   1373          }
   1374 
   1375          INFO_DBG(prog->dbgFlags, REG_ALLOC,
   1376                   "(%%%i)%02x X (%%%i)%02x & %02x: $r%i.%02x\n",
   1377                   vD->id,
   1378                   vD->compound ? vD->compMask : 0xff,
   1379                   vd->id,
   1380                   vd->compound ? vd->compMask : intfMask,
   1381                   vB->compMask, intf->reg & ~7, mask);
   1382          if (mask)
   1383             regs.occupyMask(node->f, intf->reg & ~7, mask);
   1384       }
   1385       }
   1386    } else {
   1387       INFO_DBG(prog->dbgFlags, REG_ALLOC,
   1388                "(%%%i) X (%%%i): $r%i + %u\n",
   1389                vA->id, vB->id, intf->reg, intf->colors);
   1390       regs.occupy(node->f, intf->reg, intf->colors);
   1391    }
   1392 }
   1393 
   1394 bool
   1395 GCRA::selectRegisters()
   1396 {
   1397    INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nSELECT phase\n");
   1398 
   1399    while (!stack.empty()) {
   1400       RIG_Node *node = &nodes[stack.top()];
   1401       stack.pop();
   1402 
   1403       regs.reset(node->f);
   1404 
   1405       INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nNODE[%%%i, %u colors]\n",
   1406                node->getValue()->id, node->colors);
   1407 
   1408       for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
   1409          checkInterference(node, ei);
   1410       for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
   1411          checkInterference(node, ei);
   1412 
   1413       if (!node->prefRegs.empty()) {
   1414          for (std::list<RIG_Node *>::const_iterator it = node->prefRegs.begin();
   1415               it != node->prefRegs.end();
   1416               ++it) {
   1417             if ((*it)->reg >= 0 &&
   1418                 regs.testOccupy(node->f, (*it)->reg, node->colors)) {
   1419                node->reg = (*it)->reg;
   1420                break;
   1421             }
   1422          }
   1423       }
   1424       if (node->reg >= 0)
   1425          continue;
   1426       LValue *lval = node->getValue();
   1427       if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
   1428          regs.print(node->f);
   1429       bool ret = regs.assign(node->reg, node->f, node->colors);
   1430       if (ret) {
   1431          INFO_DBG(prog->dbgFlags, REG_ALLOC, "assigned reg %i\n", node->reg);
   1432          lval->compMask = node->getCompMask();
   1433       } else {
   1434          INFO_DBG(prog->dbgFlags, REG_ALLOC, "must spill: %%%i (size %u)\n",
   1435                   lval->id, lval->reg.size);
   1436          Symbol *slot = NULL;
   1437          if (lval->reg.file == FILE_GPR)
   1438             slot = spill.assignSlot(node->livei, lval->reg.size);
   1439          mustSpill.push_back(ValuePair(lval, slot));
   1440       }
   1441    }
   1442    if (!mustSpill.empty())
   1443       return false;
   1444    for (unsigned int i = 0; i < nodeCount; ++i) {
   1445       LValue *lval = nodes[i].getValue();
   1446       if (nodes[i].reg >= 0 && nodes[i].colors > 0)
   1447          lval->reg.data.id =
   1448             regs.unitsToId(nodes[i].f, nodes[i].reg, lval->reg.size);
   1449    }
   1450    return true;
   1451 }
   1452 
   1453 bool
   1454 GCRA::allocateRegisters(ArrayList& insns)
   1455 {
   1456    bool ret;
   1457 
   1458    INFO_DBG(prog->dbgFlags, REG_ALLOC,
   1459             "allocateRegisters to %u instructions\n", insns.getSize());
   1460 
   1461    nodeCount = func->allLValues.getSize();
   1462    nodes = new RIG_Node[nodeCount];
   1463    if (!nodes)
   1464       return false;
   1465    for (unsigned int i = 0; i < nodeCount; ++i) {
   1466       LValue *lval = reinterpret_cast<LValue *>(func->allLValues.get(i));
   1467       if (lval) {
   1468          nodes[i].init(regs, lval);
   1469          RIG.insert(&nodes[i]);
   1470 
   1471          if (lval->inFile(FILE_GPR) && lval->getInsn() != NULL &&
   1472              prog->getTarget()->getChipset() < 0xc0) {
   1473             Instruction *insn = lval->getInsn();
   1474             if (insn->op == OP_MAD || insn->op == OP_SAD)
   1475                // Short encoding only possible if they're all GPRs, no need to
   1476                // affect them otherwise.
   1477                if (insn->flagsDef < 0 &&
   1478                    insn->src(0).getFile() == FILE_GPR &&
   1479                    insn->src(1).getFile() == FILE_GPR &&
   1480                    insn->src(2).getFile() == FILE_GPR)
   1481                   nodes[i].addRegPreference(getNode(insn->getSrc(2)->asLValue()));
   1482          }
   1483       }
   1484    }
   1485 
   1486    // coalesce first, we use only 1 RIG node for a group of joined values
   1487    ret = coalesce(insns);
   1488    if (!ret)
   1489       goto out;
   1490 
   1491    if (func->getProgram()->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
   1492       func->printLiveIntervals();
   1493 
   1494    buildRIG(insns);
   1495    calculateSpillWeights();
   1496    ret = simplify();
   1497    if (!ret)
   1498       goto out;
   1499 
   1500    ret = selectRegisters();
   1501    if (!ret) {
   1502       INFO_DBG(prog->dbgFlags, REG_ALLOC,
   1503                "selectRegisters failed, inserting spill code ...\n");
   1504       regs.reset(FILE_GPR, true);
   1505       spill.run(mustSpill);
   1506       if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
   1507          func->print();
   1508    } else {
   1509       prog->maxGPR = std::max(prog->maxGPR, regs.getMaxAssigned(FILE_GPR));
   1510    }
   1511 
   1512 out:
   1513    cleanup(ret);
   1514    return ret;
   1515 }
   1516 
   1517 void
   1518 GCRA::cleanup(const bool success)
   1519 {
   1520    mustSpill.clear();
   1521 
   1522    for (ArrayList::Iterator it = func->allLValues.iterator();
   1523         !it.end(); it.next()) {
   1524       LValue *lval =  reinterpret_cast<LValue *>(it.get());
   1525 
   1526       lval->livei.clear();
   1527 
   1528       lval->compound = 0;
   1529       lval->compMask = 0;
   1530 
   1531       if (lval->join == lval)
   1532          continue;
   1533 
   1534       if (success) {
   1535          lval->reg.data.id = lval->join->reg.data.id;
   1536       } else {
   1537          for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
   1538               ++d)
   1539             lval->join->defs.remove(*d);
   1540          lval->join = lval;
   1541       }
   1542    }
   1543 
   1544    if (success)
   1545       resolveSplitsAndMerges();
   1546    splits.clear(); // avoid duplicate entries on next coalesce pass
   1547    merges.clear();
   1548 
   1549    delete[] nodes;
   1550    nodes = NULL;
   1551    hi.next = hi.prev = &hi;
   1552    lo[0].next = lo[0].prev = &lo[0];
   1553    lo[1].next = lo[1].prev = &lo[1];
   1554 }
   1555 
   1556 Symbol *
   1557 SpillCodeInserter::assignSlot(const Interval &livei, const unsigned int size)
   1558 {
   1559    SpillSlot slot;
   1560    int32_t offsetBase = stackSize;
   1561    int32_t offset;
   1562    std::list<SpillSlot>::iterator pos = slots.end(), it = slots.begin();
   1563 
   1564    if (offsetBase % size)
   1565       offsetBase += size - (offsetBase % size);
   1566 
   1567    slot.sym = NULL;
   1568 
   1569    for (offset = offsetBase; offset < stackSize; offset += size) {
   1570       const int32_t entryEnd = offset + size;
   1571       while (it != slots.end() && it->offset < offset)
   1572          ++it;
   1573       if (it == slots.end()) // no slots left
   1574          break;
   1575       std::list<SpillSlot>::iterator bgn = it;
   1576 
   1577       while (it != slots.end() && it->offset < entryEnd) {
   1578          it->occup.print();
   1579          if (it->occup.overlaps(livei))
   1580             break;
   1581          ++it;
   1582       }
   1583       if (it == slots.end() || it->offset >= entryEnd) {
   1584          // fits
   1585          for (; bgn != slots.end() && bgn->offset < entryEnd; ++bgn) {
   1586             bgn->occup.insert(livei);
   1587             if (bgn->size() == size)
   1588                slot.sym = bgn->sym;
   1589          }
   1590          break;
   1591       }
   1592    }
   1593    if (!slot.sym) {
   1594       stackSize = offset + size;
   1595       slot.offset = offset;
   1596       slot.sym = new_Symbol(func->getProgram(), FILE_MEMORY_LOCAL);
   1597       if (!func->stackPtr)
   1598          offset += func->tlsBase;
   1599       slot.sym->setAddress(NULL, offset);
   1600       slot.sym->reg.size = size;
   1601       slots.insert(pos, slot)->occup.insert(livei);
   1602    }
   1603    return slot.sym;
   1604 }
   1605 
   1606 Value *
   1607 SpillCodeInserter::offsetSlot(Value *base, const LValue *lval)
   1608 {
   1609    if (!lval->compound || (lval->compMask & 0x1))
   1610       return base;
   1611    Value *slot = cloneShallow(func, base);
   1612 
   1613    slot->reg.data.offset += (ffs(lval->compMask) - 1) * lval->reg.size;
   1614    slot->reg.size = lval->reg.size;
   1615 
   1616    return slot;
   1617 }
   1618 
   1619 void
   1620 SpillCodeInserter::spill(Instruction *defi, Value *slot, LValue *lval)
   1621 {
   1622    const DataType ty = typeOfSize(lval->reg.size);
   1623 
   1624    slot = offsetSlot(slot, lval);
   1625 
   1626    Instruction *st;
   1627    if (slot->reg.file == FILE_MEMORY_LOCAL) {
   1628       lval->noSpill = 1;
   1629       if (ty != TYPE_B96) {
   1630          st = new_Instruction(func, OP_STORE, ty);
   1631          st->setSrc(0, slot);
   1632          st->setSrc(1, lval);
   1633       } else {
   1634          st = new_Instruction(func, OP_SPLIT, ty);
   1635          st->setSrc(0, lval);
   1636          for (int d = 0; d < lval->reg.size / 4; ++d)
   1637             st->setDef(d, new_LValue(func, FILE_GPR));
   1638 
   1639          for (int d = lval->reg.size / 4 - 1; d >= 0; --d) {
   1640             Value *tmp = cloneShallow(func, slot);
   1641             tmp->reg.size = 4;
   1642             tmp->reg.data.offset += 4 * d;
   1643 
   1644             Instruction *s = new_Instruction(func, OP_STORE, TYPE_U32);
   1645             s->setSrc(0, tmp);
   1646             s->setSrc(1, st->getDef(d));
   1647             defi->bb->insertAfter(defi, s);
   1648          }
   1649       }
   1650    } else {
   1651       st = new_Instruction(func, OP_CVT, ty);
   1652       st->setDef(0, slot);
   1653       st->setSrc(0, lval);
   1654       if (lval->reg.file == FILE_FLAGS)
   1655          st->flagsSrc = 0;
   1656    }
   1657    defi->bb->insertAfter(defi, st);
   1658 }
   1659 
   1660 LValue *
   1661 SpillCodeInserter::unspill(Instruction *usei, LValue *lval, Value *slot)
   1662 {
   1663    const DataType ty = typeOfSize(lval->reg.size);
   1664 
   1665    slot = offsetSlot(slot, lval);
   1666    lval = cloneShallow(func, lval);
   1667 
   1668    Instruction *ld;
   1669    if (slot->reg.file == FILE_MEMORY_LOCAL) {
   1670       lval->noSpill = 1;
   1671       if (ty != TYPE_B96) {
   1672          ld = new_Instruction(func, OP_LOAD, ty);
   1673       } else {
   1674          ld = new_Instruction(func, OP_MERGE, ty);
   1675          for (int d = 0; d < lval->reg.size / 4; ++d) {
   1676             Value *tmp = cloneShallow(func, slot);
   1677             LValue *val;
   1678             tmp->reg.size = 4;
   1679             tmp->reg.data.offset += 4 * d;
   1680 
   1681             Instruction *l = new_Instruction(func, OP_LOAD, TYPE_U32);
   1682             l->setDef(0, (val = new_LValue(func, FILE_GPR)));
   1683             l->setSrc(0, tmp);
   1684             usei->bb->insertBefore(usei, l);
   1685             ld->setSrc(d, val);
   1686             val->noSpill = 1;
   1687          }
   1688          ld->setDef(0, lval);
   1689          usei->bb->insertBefore(usei, ld);
   1690          return lval;
   1691       }
   1692    } else {
   1693       ld = new_Instruction(func, OP_CVT, ty);
   1694    }
   1695    ld->setDef(0, lval);
   1696    ld->setSrc(0, slot);
   1697    if (lval->reg.file == FILE_FLAGS)
   1698       ld->flagsDef = 0;
   1699 
   1700    usei->bb->insertBefore(usei, ld);
   1701    return lval;
   1702 }
   1703 
   1704 static bool
   1705 value_cmp(ValueRef *a, ValueRef *b) {
   1706    Instruction *ai = a->getInsn(), *bi = b->getInsn();
   1707    if (ai->bb != bi->bb)
   1708       return ai->bb->getId() < bi->bb->getId();
   1709    return ai->serial < bi->serial;
   1710 }
   1711 
   1712 // For each value that is to be spilled, go through all its definitions.
   1713 // A value can have multiple definitions if it has been coalesced before.
   1714 // For each definition, first go through all its uses and insert an unspill
   1715 // instruction before it, then replace the use with the temporary register.
   1716 // Unspill can be either a load from memory or simply a move to another
   1717 // register file.
   1718 // For "Pseudo" instructions (like PHI, SPLIT, MERGE) we can erase the use
   1719 // if we have spilled to a memory location, or simply with the new register.
   1720 // No load or conversion instruction should be needed.
   1721 bool
   1722 SpillCodeInserter::run(const std::list<ValuePair>& lst)
   1723 {
   1724    for (std::list<ValuePair>::const_iterator it = lst.begin(); it != lst.end();
   1725         ++it) {
   1726       LValue *lval = it->first->asLValue();
   1727       Symbol *mem = it->second ? it->second->asSym() : NULL;
   1728 
   1729       // Keep track of which instructions to delete later. Deleting them
   1730       // inside the loop is unsafe since a single instruction may have
   1731       // multiple destinations that all need to be spilled (like OP_SPLIT).
   1732       unordered_set<Instruction *> to_del;
   1733 
   1734       for (Value::DefIterator d = lval->defs.begin(); d != lval->defs.end();
   1735            ++d) {
   1736          Value *slot = mem ?
   1737             static_cast<Value *>(mem) : new_LValue(func, FILE_GPR);
   1738          Value *tmp = NULL;
   1739          Instruction *last = NULL;
   1740 
   1741          LValue *dval = (*d)->get()->asLValue();
   1742          Instruction *defi = (*d)->getInsn();
   1743 
   1744          // Sort all the uses by BB/instruction so that we don't unspill
   1745          // multiple times in a row, and also remove a source of
   1746          // non-determinism.
   1747          std::vector<ValueRef *> refs(dval->uses.begin(), dval->uses.end());
   1748          std::sort(refs.begin(), refs.end(), value_cmp);
   1749 
   1750          // Unspill at each use *before* inserting spill instructions,
   1751          // we don't want to have the spill instructions in the use list here.
   1752          for (std::vector<ValueRef*>::const_iterator it = refs.begin();
   1753               it != refs.end(); ++it) {
   1754             ValueRef *u = *it;
   1755             Instruction *usei = u->getInsn();
   1756             assert(usei);
   1757             if (usei->isPseudo()) {
   1758                tmp = (slot->reg.file == FILE_MEMORY_LOCAL) ? NULL : slot;
   1759                last = NULL;
   1760             } else {
   1761                if (!last || (usei != last->next && usei != last))
   1762                   tmp = unspill(usei, dval, slot);
   1763                last = usei;
   1764             }
   1765             u->set(tmp);
   1766          }
   1767 
   1768          assert(defi);
   1769          if (defi->isPseudo()) {
   1770             d = lval->defs.erase(d);
   1771             --d;
   1772             if (slot->reg.file == FILE_MEMORY_LOCAL)
   1773                to_del.insert(defi);
   1774             else
   1775                defi->setDef(0, slot);
   1776          } else {
   1777             spill(defi, slot, dval);
   1778          }
   1779       }
   1780 
   1781       for (unordered_set<Instruction *>::const_iterator it = to_del.begin();
   1782            it != to_del.end(); ++it)
   1783          delete_Instruction(func->getProgram(), *it);
   1784    }
   1785 
   1786    // TODO: We're not trying to reuse old slots in a potential next iteration.
   1787    //  We have to update the slots' livei intervals to be able to do that.
   1788    stackBase = stackSize;
   1789    slots.clear();
   1790    return true;
   1791 }
   1792 
   1793 bool
   1794 RegAlloc::exec()
   1795 {
   1796    for (IteratorRef it = prog->calls.iteratorDFS(false);
   1797         !it->end(); it->next()) {
   1798       func = Function::get(reinterpret_cast<Graph::Node *>(it->get()));
   1799 
   1800       func->tlsBase = prog->tlsSize;
   1801       if (!execFunc())
   1802          return false;
   1803       prog->tlsSize += func->tlsSize;
   1804    }
   1805    return true;
   1806 }
   1807 
   1808 bool
   1809 RegAlloc::execFunc()
   1810 {
   1811    InsertConstraintsPass insertConstr;
   1812    PhiMovesPass insertPhiMoves;
   1813    ArgumentMovesPass insertArgMoves;
   1814    BuildIntervalsPass buildIntervals;
   1815    SpillCodeInserter insertSpills(func);
   1816 
   1817    GCRA gcra(func, insertSpills);
   1818 
   1819    unsigned int i, retries;
   1820    bool ret;
   1821 
   1822    if (!func->ins.empty()) {
   1823       // Insert a nop at the entry so inputs only used by the first instruction
   1824       // don't count as having an empty live range.
   1825       Instruction *nop = new_Instruction(func, OP_NOP, TYPE_NONE);
   1826       BasicBlock::get(func->cfg.getRoot())->insertHead(nop);
   1827    }
   1828 
   1829    ret = insertConstr.exec(func);
   1830    if (!ret)
   1831       goto out;
   1832 
   1833    ret = insertPhiMoves.run(func);
   1834    if (!ret)
   1835       goto out;
   1836 
   1837    ret = insertArgMoves.run(func);
   1838    if (!ret)
   1839       goto out;
   1840 
   1841    // TODO: need to fix up spill slot usage ranges to support > 1 retry
   1842    for (retries = 0; retries < 3; ++retries) {
   1843       if (retries && (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC))
   1844          INFO("Retry: %i\n", retries);
   1845       if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
   1846          func->print();
   1847 
   1848       // spilling to registers may add live ranges, need to rebuild everything
   1849       ret = true;
   1850       for (sequence = func->cfg.nextSequence(), i = 0;
   1851            ret && i <= func->loopNestingBound;
   1852            sequence = func->cfg.nextSequence(), ++i)
   1853          ret = buildLiveSets(BasicBlock::get(func->cfg.getRoot()));
   1854       // reset marker
   1855       for (ArrayList::Iterator bi = func->allBBlocks.iterator();
   1856            !bi.end(); bi.next())
   1857          BasicBlock::get(bi)->liveSet.marker = false;
   1858       if (!ret)
   1859          break;
   1860       func->orderInstructions(this->insns);
   1861 
   1862       ret = buildIntervals.run(func);
   1863       if (!ret)
   1864          break;
   1865       ret = gcra.allocateRegisters(insns);
   1866       if (ret)
   1867          break; // success
   1868    }
   1869    INFO_DBG(prog->dbgFlags, REG_ALLOC, "RegAlloc done: %i\n", ret);
   1870 
   1871    func->tlsSize = insertSpills.getStackSize();
   1872 out:
   1873    return ret;
   1874 }
   1875 
   1876 // TODO: check if modifying Instruction::join here breaks anything
   1877 void
   1878 GCRA::resolveSplitsAndMerges()
   1879 {
   1880    for (std::list<Instruction *>::iterator it = splits.begin();
   1881         it != splits.end();
   1882         ++it) {
   1883       Instruction *split = *it;
   1884       unsigned int reg = regs.idToBytes(split->getSrc(0));
   1885       for (int d = 0; split->defExists(d); ++d) {
   1886          Value *v = split->getDef(d);
   1887          v->reg.data.id = regs.bytesToId(v, reg);
   1888          v->join = v;
   1889          reg += v->reg.size;
   1890       }
   1891    }
   1892    splits.clear();
   1893 
   1894    for (std::list<Instruction *>::iterator it = merges.begin();
   1895         it != merges.end();
   1896         ++it) {
   1897       Instruction *merge = *it;
   1898       unsigned int reg = regs.idToBytes(merge->getDef(0));
   1899       for (int s = 0; merge->srcExists(s); ++s) {
   1900          Value *v = merge->getSrc(s);
   1901          v->reg.data.id = regs.bytesToId(v, reg);
   1902          v->join = v;
   1903          // If the value is defined by a phi/union node, we also need to
   1904          // perform the same fixup on that node's sources, since after RA
   1905          // their registers should be identical.
   1906          if (v->getInsn()->op == OP_PHI || v->getInsn()->op == OP_UNION) {
   1907             Instruction *phi = v->getInsn();
   1908             for (int phis = 0; phi->srcExists(phis); ++phis) {
   1909                phi->getSrc(phis)->join = v;
   1910                phi->getSrc(phis)->reg.data.id = v->reg.data.id;
   1911             }
   1912          }
   1913          reg += v->reg.size;
   1914       }
   1915    }
   1916    merges.clear();
   1917 }
   1918 
   1919 bool Program::registerAllocation()
   1920 {
   1921    RegAlloc ra(this);
   1922    return ra.exec();
   1923 }
   1924 
   1925 bool
   1926 RegAlloc::InsertConstraintsPass::exec(Function *ir)
   1927 {
   1928    constrList.clear();
   1929 
   1930    bool ret = run(ir, true, true);
   1931    if (ret)
   1932       ret = insertConstraintMoves();
   1933    return ret;
   1934 }
   1935 
   1936 // TODO: make part of texture insn
   1937 void
   1938 RegAlloc::InsertConstraintsPass::textureMask(TexInstruction *tex)
   1939 {
   1940    Value *def[4];
   1941    int c, k, d;
   1942    uint8_t mask = 0;
   1943 
   1944    for (d = 0, k = 0, c = 0; c < 4; ++c) {
   1945       if (!(tex->tex.mask & (1 << c)))
   1946          continue;
   1947       if (tex->getDef(k)->refCount()) {
   1948          mask |= 1 << c;
   1949          def[d++] = tex->getDef(k);
   1950       }
   1951       ++k;
   1952    }
   1953    tex->tex.mask = mask;
   1954 
   1955    for (c = 0; c < d; ++c)
   1956       tex->setDef(c, def[c]);
   1957    for (; c < 4; ++c)
   1958       tex->setDef(c, NULL);
   1959 }
   1960 
   1961 bool
   1962 RegAlloc::InsertConstraintsPass::detectConflict(Instruction *cst, int s)
   1963 {
   1964    Value *v = cst->getSrc(s);
   1965 
   1966    // current register allocation can't handle it if a value participates in
   1967    // multiple constraints
   1968    for (Value::UseIterator it = v->uses.begin(); it != v->uses.end(); ++it) {
   1969       if (cst != (*it)->getInsn())
   1970          return true;
   1971    }
   1972 
   1973    // can start at s + 1 because detectConflict is called on all sources
   1974    for (int c = s + 1; cst->srcExists(c); ++c)
   1975       if (v == cst->getSrc(c))
   1976          return true;
   1977 
   1978    Instruction *defi = v->getInsn();
   1979 
   1980    return (!defi || defi->constrainedDefs());
   1981 }
   1982 
   1983 void
   1984 RegAlloc::InsertConstraintsPass::addConstraint(Instruction *i, int s, int n)
   1985 {
   1986    Instruction *cst;
   1987    int d;
   1988 
   1989    // first, look for an existing identical constraint op
   1990    for (std::list<Instruction *>::iterator it = constrList.begin();
   1991         it != constrList.end();
   1992         ++it) {
   1993       cst = (*it);
   1994       if (!i->bb->dominatedBy(cst->bb))
   1995          break;
   1996       for (d = 0; d < n; ++d)
   1997          if (cst->getSrc(d) != i->getSrc(d + s))
   1998             break;
   1999       if (d >= n) {
   2000          for (d = 0; d < n; ++d, ++s)
   2001             i->setSrc(s, cst->getDef(d));
   2002          return;
   2003       }
   2004    }
   2005    cst = new_Instruction(func, OP_CONSTRAINT, i->dType);
   2006 
   2007    for (d = 0; d < n; ++s, ++d) {
   2008       cst->setDef(d, new_LValue(func, FILE_GPR));
   2009       cst->setSrc(d, i->getSrc(s));
   2010       i->setSrc(s, cst->getDef(d));
   2011    }
   2012    i->bb->insertBefore(i, cst);
   2013 
   2014    constrList.push_back(cst);
   2015 }
   2016 
   2017 // Add a dummy use of the pointer source of >= 8 byte loads after the load
   2018 // to prevent it from being assigned a register which overlapping the load's
   2019 // destination, which would produce random corruptions.
   2020 void
   2021 RegAlloc::InsertConstraintsPass::addHazard(Instruction *i, const ValueRef *src)
   2022 {
   2023    Instruction *hzd = new_Instruction(func, OP_NOP, TYPE_NONE);
   2024    hzd->setSrc(0, src->get());
   2025    i->bb->insertAfter(i, hzd);
   2026 
   2027 }
   2028 
   2029 // b32 { %r0 %r1 %r2 %r3 } -> b128 %r0q
   2030 void
   2031 RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn)
   2032 {
   2033    uint8_t size = 0;
   2034    int n;
   2035    for (n = 0; insn->defExists(n) && insn->def(n).getFile() == FILE_GPR; ++n)
   2036       size += insn->getDef(n)->reg.size;
   2037    if (n < 2)
   2038       return;
   2039    LValue *lval = new_LValue(func, FILE_GPR);
   2040    lval->reg.size = size;
   2041 
   2042    Instruction *split = new_Instruction(func, OP_SPLIT, typeOfSize(size));
   2043    split->setSrc(0, lval);
   2044    for (int d = 0; d < n; ++d) {
   2045       split->setDef(d, insn->getDef(d));
   2046       insn->setDef(d, NULL);
   2047    }
   2048    insn->setDef(0, lval);
   2049 
   2050    for (int k = 1, d = n; insn->defExists(d); ++d, ++k) {
   2051       insn->setDef(k, insn->getDef(d));
   2052       insn->setDef(d, NULL);
   2053    }
   2054    // carry over predicate if any (mainly for OP_UNION uses)
   2055    split->setPredicate(insn->cc, insn->getPredicate());
   2056 
   2057    insn->bb->insertAfter(insn, split);
   2058    constrList.push_back(split);
   2059 }
   2060 void
   2061 RegAlloc::InsertConstraintsPass::condenseSrcs(Instruction *insn,
   2062                                               const int a, const int b)
   2063 {
   2064    uint8_t size = 0;
   2065    if (a >= b)
   2066       return;
   2067    for (int s = a; s <= b; ++s)
   2068       size += insn->getSrc(s)->reg.size;
   2069    if (!size)
   2070       return;
   2071    LValue *lval = new_LValue(func, FILE_GPR);
   2072    lval->reg.size = size;
   2073 
   2074    Value *save[3];
   2075    insn->takeExtraSources(0, save);
   2076 
   2077    Instruction *merge = new_Instruction(func, OP_MERGE, typeOfSize(size));
   2078    merge->setDef(0, lval);
   2079    for (int s = a, i = 0; s <= b; ++s, ++i) {
   2080       merge->setSrc(i, insn->getSrc(s));
   2081    }
   2082    insn->moveSources(b + 1, a - b);
   2083    insn->setSrc(a, lval);
   2084    insn->bb->insertBefore(insn, merge);
   2085 
   2086    insn->putExtraSources(0, save);
   2087 
   2088    constrList.push_back(merge);
   2089 }
   2090 
   2091 void
   2092 RegAlloc::InsertConstraintsPass::texConstraintGM107(TexInstruction *tex)
   2093 {
   2094    int n, s;
   2095 
   2096    if (isTextureOp(tex->op))
   2097       textureMask(tex);
   2098    condenseDefs(tex);
   2099 
   2100    if (isSurfaceOp(tex->op)) {
   2101       int s = tex->tex.target.getDim() +
   2102          (tex->tex.target.isArray() || tex->tex.target.isCube());
   2103       int n = 0;
   2104 
   2105       switch (tex->op) {
   2106       case OP_SUSTB:
   2107       case OP_SUSTP:
   2108          n = 4;
   2109          break;
   2110       case OP_SUREDB:
   2111       case OP_SUREDP:
   2112          if (tex->subOp == NV50_IR_SUBOP_ATOM_CAS)
   2113             n = 2;
   2114          break;
   2115       default:
   2116          break;
   2117       }
   2118 
   2119       if (s > 1)
   2120          condenseSrcs(tex, 0, s - 1);
   2121       if (n > 1)
   2122          condenseSrcs(tex, 1, n); // do not condense the tex handle
   2123    } else
   2124    if (isTextureOp(tex->op)) {
   2125       if (tex->op != OP_TXQ) {
   2126          s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
   2127          if (tex->op == OP_TXD) {
   2128             // Indirect handle belongs in the first arg
   2129             if (tex->tex.rIndirectSrc >= 0)
   2130                s++;
   2131             if (!tex->tex.target.isArray() && tex->tex.useOffsets)
   2132                s++;
   2133          }
   2134          n = tex->srcCount(0xff) - s;
   2135       } else {
   2136          s = tex->srcCount(0xff);
   2137          n = 0;
   2138       }
   2139 
   2140       if (s > 1)
   2141          condenseSrcs(tex, 0, s - 1);
   2142       if (n > 1) // NOTE: first call modified positions already
   2143          condenseSrcs(tex, 1, n);
   2144    }
   2145 }
   2146 
   2147 void
   2148 RegAlloc::InsertConstraintsPass::texConstraintNVE0(TexInstruction *tex)
   2149 {
   2150    if (isTextureOp(tex->op))
   2151       textureMask(tex);
   2152    condenseDefs(tex);
   2153 
   2154    if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
   2155       condenseSrcs(tex, 3, 6);
   2156    } else
   2157    if (isTextureOp(tex->op)) {
   2158       int n = tex->srcCount(0xff, true);
   2159       if (n > 4) {
   2160          condenseSrcs(tex, 0, 3);
   2161          if (n > 5) // NOTE: first call modified positions already
   2162             condenseSrcs(tex, 4 - (4 - 1), n - 1 - (4 - 1));
   2163       } else
   2164       if (n > 1) {
   2165          condenseSrcs(tex, 0, n - 1);
   2166       }
   2167    }
   2168 }
   2169 
   2170 void
   2171 RegAlloc::InsertConstraintsPass::texConstraintNVC0(TexInstruction *tex)
   2172 {
   2173    int n, s;
   2174 
   2175    if (isTextureOp(tex->op))
   2176       textureMask(tex);
   2177 
   2178    if (tex->op == OP_TXQ) {
   2179       s = tex->srcCount(0xff);
   2180       n = 0;
   2181    } else if (isSurfaceOp(tex->op)) {
   2182       s = tex->tex.target.getDim() + (tex->tex.target.isArray() || tex->tex.target.isCube());
   2183       if (tex->op == OP_SUSTB || tex->op == OP_SUSTP)
   2184          n = 4;
   2185       else
   2186          n = 0;
   2187    } else {
   2188       s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
   2189       if (!tex->tex.target.isArray() &&
   2190           (tex->tex.rIndirectSrc >= 0 || tex->tex.sIndirectSrc >= 0))
   2191          ++s;
   2192       if (tex->op == OP_TXD && tex->tex.useOffsets)
   2193          ++s;
   2194       n = tex->srcCount(0xff) - s;
   2195       assert(n <= 4);
   2196    }
   2197 
   2198    if (s > 1)
   2199       condenseSrcs(tex, 0, s - 1);
   2200    if (n > 1) // NOTE: first call modified positions already
   2201       condenseSrcs(tex, 1, n);
   2202 
   2203    condenseDefs(tex);
   2204 }
   2205 
   2206 void
   2207 RegAlloc::InsertConstraintsPass::texConstraintNV50(TexInstruction *tex)
   2208 {
   2209    Value *pred = tex->getPredicate();
   2210    if (pred)
   2211       tex->setPredicate(tex->cc, NULL);
   2212 
   2213    textureMask(tex);
   2214 
   2215    assert(tex->defExists(0) && tex->srcExists(0));
   2216    // make src and def count match
   2217    int c;
   2218    for (c = 0; tex->srcExists(c) || tex->defExists(c); ++c) {
   2219       if (!tex->srcExists(c))
   2220          tex->setSrc(c, new_LValue(func, tex->getSrc(0)->asLValue()));
   2221       if (!tex->defExists(c))
   2222          tex->setDef(c, new_LValue(func, tex->getDef(0)->asLValue()));
   2223    }
   2224    if (pred)
   2225       tex->setPredicate(tex->cc, pred);
   2226    condenseDefs(tex);
   2227    condenseSrcs(tex, 0, c - 1);
   2228 }
   2229 
   2230 // Insert constraint markers for instructions whose multiple sources must be
   2231 // located in consecutive registers.
   2232 bool
   2233 RegAlloc::InsertConstraintsPass::visit(BasicBlock *bb)
   2234 {
   2235    TexInstruction *tex;
   2236    Instruction *next;
   2237    int s, size;
   2238 
   2239    targ = bb->getProgram()->getTarget();
   2240 
   2241    for (Instruction *i = bb->getEntry(); i; i = next) {
   2242       next = i->next;
   2243 
   2244       if ((tex = i->asTex())) {
   2245          switch (targ->getChipset() & ~0xf) {
   2246          case 0x50:
   2247          case 0x80:
   2248          case 0x90:
   2249          case 0xa0:
   2250             texConstraintNV50(tex);
   2251             break;
   2252          case 0xc0:
   2253          case 0xd0:
   2254             texConstraintNVC0(tex);
   2255             break;
   2256          case 0xe0:
   2257          case 0xf0:
   2258          case 0x100:
   2259             texConstraintNVE0(tex);
   2260             break;
   2261          case 0x110:
   2262          case 0x120:
   2263          case 0x130:
   2264             texConstraintGM107(tex);
   2265             break;
   2266          default:
   2267             break;
   2268          }
   2269       } else
   2270       if (i->op == OP_EXPORT || i->op == OP_STORE) {
   2271          for (size = typeSizeof(i->dType), s = 1; size > 0; ++s) {
   2272             assert(i->srcExists(s));
   2273             size -= i->getSrc(s)->reg.size;
   2274          }
   2275          condenseSrcs(i, 1, s - 1);
   2276       } else
   2277       if (i->op == OP_LOAD || i->op == OP_VFETCH) {
   2278          condenseDefs(i);
   2279          if (i->src(0).isIndirect(0) && typeSizeof(i->dType) >= 8)
   2280             addHazard(i, i->src(0).getIndirect(0));
   2281          if (i->src(0).isIndirect(1) && typeSizeof(i->dType) >= 8)
   2282             addHazard(i, i->src(0).getIndirect(1));
   2283       } else
   2284       if (i->op == OP_UNION ||
   2285           i->op == OP_MERGE ||
   2286           i->op == OP_SPLIT) {
   2287          constrList.push_back(i);
   2288       }
   2289    }
   2290    return true;
   2291 }
   2292 
   2293 // Insert extra moves so that, if multiple register constraints on a value are
   2294 // in conflict, these conflicts can be resolved.
   2295 bool
   2296 RegAlloc::InsertConstraintsPass::insertConstraintMoves()
   2297 {
   2298    for (std::list<Instruction *>::iterator it = constrList.begin();
   2299         it != constrList.end();
   2300         ++it) {
   2301       Instruction *cst = *it;
   2302       Instruction *mov;
   2303 
   2304       if (cst->op == OP_SPLIT && 0) {
   2305          // spilling splits is annoying, just make sure they're separate
   2306          for (int d = 0; cst->defExists(d); ++d) {
   2307             if (!cst->getDef(d)->refCount())
   2308                continue;
   2309             LValue *lval = new_LValue(func, cst->def(d).getFile());
   2310             const uint8_t size = cst->def(d).getSize();
   2311             lval->reg.size = size;
   2312 
   2313             mov = new_Instruction(func, OP_MOV, typeOfSize(size));
   2314             mov->setSrc(0, lval);
   2315             mov->setDef(0, cst->getDef(d));
   2316             cst->setDef(d, mov->getSrc(0));
   2317             cst->bb->insertAfter(cst, mov);
   2318 
   2319             cst->getSrc(0)->asLValue()->noSpill = 1;
   2320             mov->getSrc(0)->asLValue()->noSpill = 1;
   2321          }
   2322       } else
   2323       if (cst->op == OP_MERGE || cst->op == OP_UNION) {
   2324          for (int s = 0; cst->srcExists(s); ++s) {
   2325             const uint8_t size = cst->src(s).getSize();
   2326 
   2327             if (!cst->getSrc(s)->defs.size()) {
   2328                mov = new_Instruction(func, OP_NOP, typeOfSize(size));
   2329                mov->setDef(0, cst->getSrc(s));
   2330                cst->bb->insertBefore(cst, mov);
   2331                continue;
   2332             }
   2333             assert(cst->getSrc(s)->defs.size() == 1); // still SSA
   2334 
   2335             Instruction *defi = cst->getSrc(s)->defs.front()->getInsn();
   2336             // catch some cases where don't really need MOVs
   2337             if (cst->getSrc(s)->refCount() == 1 && !defi->constrainedDefs())
   2338                continue;
   2339 
   2340             LValue *lval = new_LValue(func, cst->src(s).getFile());
   2341             lval->reg.size = size;
   2342 
   2343             mov = new_Instruction(func, OP_MOV, typeOfSize(size));
   2344             mov->setDef(0, lval);
   2345             mov->setSrc(0, cst->getSrc(s));
   2346             cst->setSrc(s, mov->getDef(0));
   2347             cst->bb->insertBefore(cst, mov);
   2348 
   2349             cst->getDef(0)->asLValue()->noSpill = 1; // doesn't help
   2350 
   2351             if (cst->op == OP_UNION)
   2352                mov->setPredicate(defi->cc, defi->getPredicate());
   2353          }
   2354       }
   2355    }
   2356 
   2357    return true;
   2358 }
   2359 
   2360 } // namespace nv50_ir
   2361