HomeSort by relevance Sort by last modified time
    Searched refs:PHI (Results 1 - 25 of 105) sorted by null

1 2 3 4 5

  /external/llvm/lib/CodeGen/
MachineSSAUpdater.cpp 51 /// updates. ProtoValue is the value used to name PHI nodes.
110 /// InsertNewDef - Insert an empty PHI or IMPLICIT_DEF instruction which define
139 /// a block. Because of this, we need to insert a new PHI node in SomeBB to
181 // If an identical PHI is already in BB, just reuse it.
186 // Otherwise, we do need a PHI: insert one now.
188 MachineInstrBuilder InsertedPHI = InsertNewDef(TargetOpcode::PHI, BB,
191 // Fill in all the predecessors of the PHI.
195 // See if the PHI node can be merged to a single value. This can happen in
196 // loop cases when we get a PHI of itself and one other value.
205 DEBUG(dbgs() << " Inserted PHI: " << *InsertedPHI << "\n")
    [all...]
EarlyIfConversion.cpp 109 /// Information about each phi in the Tail block.
111 MachineInstr *PHI;
116 PHIInfo(MachineInstr *phi)
117 : PHI(phi), TReg(0), FReg(0), CondCycles(0), TCycles(0), FCycles(0) {}
147 /// Replace PHI instructions in Tail with selects.
150 /// Insert selects and rewrite PHI operands to use them.
412 // Find PHI operands corresponding to TPred and FPred.
413 for (unsigned i = 1; i != PI.PHI->getNumOperands(); i += 2) {
414 if (PI.PHI->getOperand(i+1).getMBB() == TPred
    [all...]
MachineTraceMetrics.cpp 651 // Get the input data dependencies of a PHI instruction, using Pred as the
661 assert(UseMI->isPHI() && UseMI->getNumOperands() % 2 && "Bad PHI");
    [all...]
  /external/llvm/include/llvm/Target/
TargetOpcodes.h 26 PHI = 0,
  /prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/lib/gcc/x86_64-w64-mingw32/4.8.3/plugin/include/
tree-ssa-operands.h 75 #define PHI_RESULT_PTR(PHI) gimple_phi_result_ptr (PHI)
76 #define PHI_RESULT(PHI) DEF_FROM_PTR (PHI_RESULT_PTR (PHI))
77 #define SET_PHI_RESULT(PHI, V) SET_DEF (PHI_RESULT_PTR (PHI), (V))
79 #define PHI_ARG_DEF_PTR(PHI, I) gimple_phi_arg_imm_use_ptr ((PHI), (I))
80 #define PHI_ARG_DEF(PHI, I) USE_FROM_PTR (PHI_ARG_DEF_PTR ((PHI), (I))
    [all...]
  /external/llvm/lib/Transforms/Utils/
SSAUpdater.cpp 65 static bool IsEquivalentPHI(PHINode *PHI,
67 unsigned PHINumValues = PHI->getNumIncomingValues();
71 // Scan the phi to see if it matches.
73 if (ValueMapping[PHI->getIncomingBlock(i)] !=
74 PHI->getIncomingValue(i)) {
98 // is relatively slow. If we already have PHI nodes in this block, walk one
136 // Otherwise, we do need a PHI: check to see if we already have one available
153 // Fill in all the predecessors of the PHI.
157 // See if the PHI node can be merged to a single value. This can happen in
158 // loop cases when we get a PHI of itself and one other value
    [all...]
FlattenCFG.cpp 125 PHINode *PHI = dyn_cast<PHINode>(BB->begin());
126 if (PHI)
127 return false; // For simplicity, avoid cases containing PHI nodes.
262 // PS2 should not contain PHI node.
263 PHI = dyn_cast<PHINode>(PS2->begin());
264 if (PHI)
SimplifyCFG.cpp 61 PHINodeFoldingThreshold("phi-node-folding-threshold", cl::Hidden, cl::init(2),
62 cl::desc("Control the amount of phi node folding to perform (default = 2)"));
106 // The first field contains the phi node that generates a result of the switch
108 // switch for that PHI.
164 // successor, and if that successor has a PHI node, and if *that* PHI node has
185 /// store all PHI nodes in common successors.
193 // We fold the unconditional branch if we can easily update all PHI nodes in
222 /// Update PHI nodes in Succ to indicate that there will now be entries in it
223 /// from the 'NewPred' block. The values that will be flowing into the PHI node
    [all...]
InlineFunction.cpp 73 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
80 // If there are PHI nodes in the unwind destination block, we need to keep
87 PHINode *PHI = cast<PHINode>(I);
88 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
112 /// Add incoming-PHI values to the unwind destination block for the given
121 PHINode *phi = cast<PHINode>(I); local
122 phi->addIncoming(UnwindDestPHIValues[i], src);
153 // Create a PHI for the exception values.
185 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
276 // Update any PHI nodes in the exceptional block to indicate that ther
    [all...]
  /external/llvm/include/llvm/Transforms/Utils/
SSAUpdaterImpl.h 281 /// dominating definitions for non-PHI blocks.
291 // If this block already needs a PHI, there is nothing to do here.
299 // Need a PHI here.
314 /// FindAvailableVal - If this block requires a PHI, first check if an
315 /// existing PHI matches the PHI placement and reaching definitions computed
316 /// earlier, and if not, create a new PHI. Visit all the block's
318 /// the incoming values for a new PHI.
326 // Check if there needs to be a PHI in BB.
330 // Look for an existing PHI
    [all...]
  /external/chromium-trace/catapult/experimental/heatmap/
color.js 4 var PHI = (1 + Math.sqrt(5)) / 2;
26 return sinebow(n * PHI);
  /external/llvm/lib/Target/WebAssembly/
WebAssemblyRegStackify.cpp 96 if (Def->getOpcode() == TargetOpcode::PHI)
139 // Don't nest anything inside a phi.
140 if (Insert->getOpcode() == TargetOpcode::PHI)
168 // TODO: Eventually we may relax this, to stackify phi transfers.
183 if (Def->getOpcode() == TargetOpcode::PHI)
WebAssemblyStoreResults.cpp 98 if (Where->getOpcode() == TargetOpcode::PHI) {
107 // For a non-PHI, check that MI dominates the instruction in the
  /external/llvm/lib/Transforms/Scalar/
LoopInterchange.cpp 533 // Since we currently do not handle LCSSA PHI's any failure in loop
535 // TODO: This should be removed once we handle LCSSA PHI nodes.
545 DEBUG(dbgs() << "PHI Nodes in loop nest exit is not handled for now "
614 // Load corresponding to reduction PHI's are safe while concluding if
631 PHINode *PHI = dyn_cast<PHINode>(L->getOperand(0));
632 if (!PHI)
707 PHINode *PHI = cast<PHINode>(I);
708 if (InductionDescriptor::isInductionPHI(PHI, SE, ID))
709 Inductions.push_back(PHI);
710 else if (RecurrenceDescriptor::isReductionPHI(PHI, L, RD)
    [all...]
LoopLoadElimination.cpp 369 // %x.storeforward = phi [%x.initial, %ph] [%y, %loop]
381 PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded",
383 PHI->addIncoming(Initial, PH);
384 PHI->addIncoming(Cand.Store->getOperand(0), L->getLoopLatch());
386 Cand.Load->replaceAllUsesWith(PHI);
407 // %x.storeforward = phi [%x.initial, %ph] [%y, %loop]
Scalarizer.cpp 566 bool Scalarizer::visitPHINode(PHINode &PHI) {
567 VectorType *VT = dyn_cast<VectorType>(PHI.getType());
572 IRBuilder<> Builder(&PHI);
576 unsigned NumOps = PHI.getNumOperands();
579 PHI.getName() + ".i" + Twine(I));
582 Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I));
583 BasicBlock *IncomingBlock = PHI.getIncomingBlock(I);
587 gather(&PHI, Res);
ConstantHoisting.cpp 229 // We can't insert directly before a phi node or an eh pad. Insert before
231 assert(Entry != Inst->getParent() && "PHI or landing pad in entry block!");
440 /// instruction Mat. If the instruction is a PHI node then special
446 if (auto PHI = dyn_cast<PHINode>(Inst)) {
447 // Check if any previous operand of the PHI node has the same incoming basic
453 BasicBlock *IncomingBB = PHI->getIncomingBlock(Idx);
455 if (PHI->getIncomingBlock(i) == IncomingBB) {
456 Value *IncomingVal = PHI->getIncomingValue(i);
LoopUnrollPass.cpp 568 auto *PHI = dyn_cast<PHINode>(&I);
569 if (!PHI)
572 // The loop header PHI nodes must have exactly two input: one from the
575 PHI->getNumIncomingValues() == 2 &&
578 Value *V = PHI->getIncomingValueForBlock(
584 SimplifiedInputValues.push_back({PHI, C});
    [all...]
  /external/llvm/lib/Transforms/ObjCARC/
ObjCARCContract.cpp 26 // TODO: ObjCARCContract could insert PHI nodes when uses aren't
573 if (PHINode *PHI = dyn_cast<PHINode>(U.getUser())) {
574 // For PHI nodes, insert the bitcast in the predecessor block.
576 BasicBlock *BB = PHI->getIncomingBlock(ValNo);
580 // While we're here, rewrite all edges for this PHI, rather
583 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i)
584 if (PHI->getIncomingBlock(i) == BB) {
587 &PHI->getOperandUse(
590 PHI->setIncomingValue(i, Replacement);
  /device/google/contexthub/firmware/src/algos/
accel_cal.c 26 #define PHI 0.707f // = 1/sqrt(2) gives a 45 degree angle for sorting data.
59 if (PHI < asd->mean_x) {
67 if (PHI < asd->mean_y) {
83 if (PHI > asd->mean_x && PHIb < asd->mean_x &&
84 PHI > asd->mean_y && PHIb < asd->mean_y &&
311 if (PHI < asd->mean_x && ac1->agd.nx < ac1->agd.nfx) {
325 if (PHI < asd->mean_y && ac1->agd.ny < ac1->agd.nfy) {
353 if (PHI > asd->mean_x && PHIb < asd->mean_x &&
354 PHI > asd->mean_y && PHIb < asd->mean_y &&
  /external/llvm/lib/Analysis/
MemoryBuiltins.cpp 747 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) {
749 PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues());
750 PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues());
753 CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI);
755 // compute offset/size for each PHI incoming pointer
756 for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) {
757 Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt());
758 SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i));
767 SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i));
768 OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i))
    [all...]
IVUsers.cpp 162 // Do not infinitely recurse on PHI nodes.
169 // A phi's use is live out of its predecessor block.
170 if (PHINode *PHI = dyn_cast<PHINode>(User)) {
173 UseBB = PHI->getIncomingBlock(ValNo);
178 // Descend recursively, but not into PHI nodes outside the current loop.
276 // them by stride. Start by finding all of the PHI nodes in the header for
  /external/llvm/lib/Target/AMDGPU/
SIFixSGPRCopies.cpp 25 /// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
40 /// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
43 /// Now that the result of the PHI instruction is an SGPR, the register
55 /// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
60 /// In order to avoid this problem, this pass searches for PHI instructions
62 /// <vgpr> if the user of the PHI's definition register is a vector instruction.
63 /// If the PHI's definition class is constrained to <vgpr> then the coalescer
274 case AMDGPU::PHI: {
275 DEBUG(dbgs() << "Fixing PHI: " << MI);
280 // If a PHI node defines an SGPR and any of its operands are VGPRs
    [all...]
  /external/llvm/include/llvm/CodeGen/
MachineTraceMetrics.h 92 /// Doesn't count PHI and COPY instructions that are likely to be removed.
213 /// This does not include PHI uses in the current block, but it does
214 /// include PHI uses in deeper blocks.
287 /// Return the Depth of a PHI instruction in a trace center block successor.
288 /// The PHI does not have to be part of the trace.
289 unsigned getPHIDepth(const MachineInstr *PHI) const;
  /external/llvm/examples/OCaml-Kaleidoscope/Chapter5/
codegen.ml 67 * phi. We create a new name because one is used for the phi node, and the
77 * phi. *)
84 let phi = build_phi incoming "iftmp" builder in var
98 phi
116 (* Start the PHI node with an entry for start. *)
119 (* Within the loop, the variable is defined equal to the PHI node. If it
159 (* Add a new entry to the PHI node for the backedge. *)

Completed in 676 milliseconds

1 2 3 4 5