Lines Matching defs:Load
57 ReMatPICStubLoad("remat-pic-stub-load",
58 cl::desc("Re-materialize load from stub in PIC mode"),
83 // Minimum alignment required for load/store.
278 // Index 0, folded load and store, no alignment requirement.
869 // Index 1, folded load
969 // variants below) to allow load folding.
1279 // Do not fold VFs* loads because there are no scalar load variants for
1280 // these instructions. When folded, the load is required to be 128-bits, so
1281 // the load size would not match.
1736 // Index 2, folded load
1952 // Index 3, folded load
2004 // Index 4, folded load
2312 // Allow re-materialization of PIC load.
2471 // Left shift instructions can be transformed into load-effective-address
3881 bool load) {
3885 return load ? X86::KMOVWkm : X86::KMOVWmk;
3887 return load ? X86::VMOVSSZrm : X86::VMOVSSZmr;
3889 return load ? X86::VMOVSDZrm : X86::VMOVSDZmr;
3891 return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
3904 return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
3905 return load ? X86::MOV8rm : X86::MOV8mr;
3908 return load ? X86::MOV16rm : X86::MOV16mr;
3911 return load ? X86::MOV32rm : X86::MOV32mr;
3913 return load ?
3917 return load ? X86::LD_Fp32m : X86::ST_Fp32m;
3921 return load ? X86::MOV64rm : X86::MOV64mr;
3923 return load ?
3927 return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
3929 return load ? X86::LD_Fp64m : X86::ST_Fp64m;
3933 return load ? X86::LD_Fp80m : X86::ST_FpP80m;
3939 return load ?
3943 return load ?
3952 return load ? X86::VMOVAPSYrm : X86::VMOVAPSYmr;
3954 return load ? X86::VMOVUPSYrm : X86::VMOVUPSYmr;
3958 return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
3960 return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
4562 /// Try to remove the load by folding it to a register
4563 /// operand at the use. We fold the load instructions if load defines a virtual
4565 /// instructions in-between do not load or store, and have no side effects.
4572 // To be conservative, if there exists another load, clear the load candidate.
4835 // Check if it's safe to fold the load. If the size of the object is
4836 // narrower than the load width, then it's not.
4839 // If this is a 64-bit load, but the spill slot is 32, then we can do
4840 // a 32-bit load which is implicitly zero-extended. This likely is
4841 // due to live interval analysis remat'ing a load from stack slot.
4855 // If this is the special case where we use a MOV32rm to load a 32-bit
4937 /// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
5165 // Check if it's safe to fold the load. If the size of the object is
5166 // narrower than the load width, then it's not.
5187 // These instructions only load 32 bits, we can't fold them if the
5192 // These instructions only load 64 bits, we can't fold them if the
5221 // Determine the alignment of the load.
5260 // Otherwise we risk changing the size of the load.
5272 // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
5273 // Create a constant-pool entry and operands to load from it.
5311 // Create operands to load from the constant pool entry.
5323 // Folding a normal load. Just copy the load's address operands.
5432 // Emit the load instruction.
5549 // Emit the load instruction.
5550 SDNode *Load = nullptr;
5560 // Do not introduce a slow unaligned load.
5565 Load
5567 NewNodes.push_back(Load);
5570 cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second);
5585 if (Load)
5586 BeforeOps.push_back(SDValue(Load, 0));
5665 // AVX load instructions
5702 // AVX load instructions