Home | History | Annotate | Download | only in X86

Lines Matching defs:Load

681     setOperationAction(ISD::LOAD, VT, Expand);
787 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
864 // memory vector types which we can load as a scalar (or sequence of
866 // loads these must work with a single scalar load.
893 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
907 setOperationAction(ISD::LOAD, VT, Promote);
908 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
914 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
915 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1030 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1031 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1032 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1235 setOperationAction(ISD::LOAD, VT, Promote);
1236 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1260 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1261 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1262 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1263 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1264 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1428 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1429 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1546 setTargetDAGCombine(ISD::LOAD);
1684 /// Returns the target specific optimal type for load
2195 // Create the nodes corresponding to a load from this parameter slot.
2382 // If value is passed via pointer - do a load.
2640 /// Emit a load of return address if tail call
2651 // Load the "old" Return address.
2779 // Load return address for tail calls.
2871 // If we are tail calling and generating PIC/GOT style code load the
3001 // We should use extra load for direct calls to dllimported functions in
3671 // If LHS is a foldable load, but RHS is not, flip the condition.
3740 /// materialize the FP immediate as a load from a constant pool.
3749 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3753 // relocation target a movq or addq instruction: don't let the load shrink.
3754 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3761 /// \brief Returns true if it is beneficial to convert a load of a constant
4682 // Check if the scalar load can be widened into a vector load. And if
4752 /// elements can be replaced by a single large load which has the same value as
4755 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
4768 // For each element in the initializer, see if we've found a load or an undef.
4769 // If we don't find an initial load element, or later load elements are
4792 // requested vector load.
4801 // load of the entire vector width starting at the base pointer. If we found
4804 assert(LDBase && "Did not find base load for merging consecutive loads");
4812 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
4848 // Make sure the newly-created LOAD is in the same position as LDBase in
4866 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
4868 /// a scalar load, or a constant.
4907 // Make sure that all of the users of a non-constant load are from the
4940 // load node must have exactly one user.
4958 // TODO: If multiple splats are generated to load the same constant,
4966 // On Sandybridge (no AVX2), it is still better to load a constant vector
5006 // The scalar source must be a normal load.
5682 // a constant pool load than it is to do a movd + shuffle.
5757 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
5762 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
5775 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
5787 // handled, so this is best done with a single constant-pool load.
5791 // For AVX-length vectors, see if we can use a vector load to get all of the
6413 // shuffle may be able to fold with a load or other benefit. However, when
6754 load and/or copy.
6932 /// \brief Helper to test for a load that can be folded with x86 shuffles.
6935 /// significantly based on whether the operand is a load or not.
7071 // Go up the chain of (vector) values to find a scalar load that we can
7109 // If the scalar isn't a load, we can't broadcast from it in AVX1.
7344 // If we have AVX, we can use VPERMILPS which will allow folding a load
7375 // We can either use a special instruction to load over the low double or
7653 // If we have AVX, we can use VPERMILPS which will allow folding a load
7739 // so prevents folding a load into this instruction or making a copy.
10310 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
10685 // But if optimizing for size and there's a load folding opportunity,
10793 // Fold two 16-byte subvector loads into one 32-byte load:
10794 // (insert_subvector (insert_subvector undef, (load addr), 0),
10795 // (load addr + 16), Elts/2)
10962 // For symbols that require a load from a stub to get the address, emit the
10963 // load.
11030 // For globals that require a load from a stub to get the address, emit the
11031 // load.
11274 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
11276 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
11306 // Load the _tls_index variable
11525 // Load the 64-bit value into an XMM register.
11566 // Load the 32-bit value into an XMM register.
11567 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
11571 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
11573 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
11574 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
11577 // Or the load with the bias.
11581 MVT::v2f64, Load)),
11815 // Load the value out, extending it from f32 to f80.
11850 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
11871 // FIXME This causes a redundant load/store if the SSE-class value is already
12174 // Load the result.
12191 // Load the result.
12236 // Using a 16-byte mask allows folding the load of the mask with
12555 // selected as part of a load-modify-store instruction. When the root node
12626 // likely to be selected as part of a load-modify-store instruction.
13789 assert(MemVT.isVector() && "Must load a vector from memory");
13804 SDValue Load;
13806 // Just switch this to a normal load.
13810 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
13816 // Do an sext load to a 128-bit vector type. We want to use the same
13823 Load =
13831 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
13832 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
13835 return DAG.getSExtOrTrunc(Load, dl, RegVT);
13842 // Attempt to load the original value using scalar loads.
13857 // in order to load our vector from memory.
13861 "Can only lower sext loads with a single scalar load!");
13868 // largest scalar that we can load.
13892 // Perform a single load.
13928 "We can't implement a sext load without an arithmetic right shift!");
14492 // Load the next argument and return it
15391 if (isAllOnes(Mask)) // return just a load
15435 // Just load the return address.
15567 // Load the pointer to the nested function into R11.
15580 // Load the 'nest' parameter value into R10.
15734 // Load FP Control Word from stack slot
16762 // Before the load we need a fence. Here is an example lifted from
16770 // r2 = x.load(relaxed);
16772 // lowered to just a load without a fence. A mfence flushes the store buffer,
16794 // Finally we can emit the atomic load.
17215 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
17313 load f16)) to FP_TO_INT*.
17328 // Return a load from the stack slot.
17676 // by AM is legal for this target, for a load/store of the specified type.
17691 // If a reference to this global requires an extra load, we can't fold it.
17801 if (Val.getOpcode() != ISD::LOAD)
18154 // Load the offset value into a register
18226 // Load the overflow_area address into a register.
18736 // our load from the relocation, sticking it in either RDI (x86-64)
18836 // if base pointer being used, load it from frame
19155 // Load the old value of the high byte of the control word...
19481 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
19494 // Make sure the newly-created LOAD is in the same position as Ld in
19573 // instructions or the ability to load as part of the instruction, so
19577 // vectors because it can have a load folded into it that UNPCK cannot. This
19588 // into even an unaligned memory load.
20334 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
20376 /// specific shuffle of a load can be folded into a single element load.
20393 // Don't duplicate a load with other uses.
20407 // Don't duplicate a load with other uses.
20429 // Don't duplicate a load with other uses.
20433 AllowedUses = 1; // only allow 1 load use if we have a bitcast
20446 // If there's a bitcast before the shuffle, check if the load type and
20453 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
20474 /// store-load conversions.
20599 // Replace each use (extract) with a load of the appropriate element.
20607 // Load the scalar.
20737 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
22517 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
22583 "Unexpected size for extending masked load");
22840 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
22845 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
22863 // Must be a store of a load. We currently handle two cases: the load
22882 // If this is not the MMX case, i.e. we are just turning i64 load/store
22883 // into f64 load/store, avoid the transformation if there are multiple
22890 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
22891 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
23227 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
23438 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
23440 SDLoc dl(Load);
23441 MVT VT = Load->getSimpleValueType(0);
23443 SDValue Addr = Load->getOperand(1);
23449 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
23451 Load->getMemOperand(), 0, EVT.getStoreSize()));
23465 // address when narrowing the vector load to a specific element.
23659 if (Op0.getOpcode() == ISD::LOAD) {
23882 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
23950 case ISD::LOAD:
23978 case ISD::LOAD: {
23980 // If the non-extending load has a single use and it's not live out, then it
23986 // The only case where we'd want to promote LOAD (rather then it being
24003 // Look out for (store (shl (load), x)).
24021 // Avoid disabling potential load
24455 // If we require an extra load to get this address, as in PIC mode, we
24719 // Requires two allocations (one for the load, one for the computation)
24727 // "load" ports instead of the dedicated "store" port.