1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that X86 uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "x86-isel" 16 #include "X86ISelLowering.h" 17 #include "X86.h" 18 #include "X86InstrBuilder.h" 19 #include "X86TargetMachine.h" 20 #include "X86TargetObjectFile.h" 21 #include "Utils/X86ShuffleDecode.h" 22 #include "llvm/CallingConv.h" 23 #include "llvm/Constants.h" 24 #include "llvm/DerivedTypes.h" 25 #include "llvm/GlobalAlias.h" 26 #include "llvm/GlobalVariable.h" 27 #include "llvm/Function.h" 28 #include "llvm/Instructions.h" 29 #include "llvm/Intrinsics.h" 30 #include "llvm/LLVMContext.h" 31 #include "llvm/CodeGen/IntrinsicLowering.h" 32 #include "llvm/CodeGen/MachineFrameInfo.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineJumpTableInfo.h" 36 #include "llvm/CodeGen/MachineModuleInfo.h" 37 #include "llvm/CodeGen/MachineRegisterInfo.h" 38 #include "llvm/MC/MCAsmInfo.h" 39 #include "llvm/MC/MCContext.h" 40 #include "llvm/MC/MCExpr.h" 41 #include "llvm/MC/MCSymbol.h" 42 #include "llvm/ADT/SmallSet.h" 43 #include "llvm/ADT/Statistic.h" 44 #include "llvm/ADT/StringExtras.h" 45 #include "llvm/ADT/VariadicFunction.h" 46 #include "llvm/Support/CallSite.h" 47 #include "llvm/Support/Debug.h" 48 #include "llvm/Support/ErrorHandling.h" 49 #include "llvm/Support/MathExtras.h" 50 #include "llvm/Target/TargetOptions.h" 51 #include <bitset> 52 #include <cctype> 53 using namespace llvm; 54 55 STATISTIC(NumTailCalls, "Number of tail calls"); 56 57 // Forward declarations. 58 static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 59 SDValue V2); 60 61 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This 62 /// sets things up to match to an AVX VEXTRACTF128 instruction or a 63 /// simple subregister reference. Idx is an index in the 128 bits we 64 /// want. It need not be aligned to a 128-bit bounday. That makes 65 /// lowering EXTRACT_VECTOR_ELT operations easier. 66 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, 67 SelectionDAG &DAG, DebugLoc dl) { 68 EVT VT = Vec.getValueType(); 69 assert(VT.is256BitVector() && "Unexpected vector size!"); 70 EVT ElVT = VT.getVectorElementType(); 71 unsigned Factor = VT.getSizeInBits()/128; 72 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, 73 VT.getVectorNumElements()/Factor); 74 75 // Extract from UNDEF is UNDEF. 76 if (Vec.getOpcode() == ISD::UNDEF) 77 return DAG.getUNDEF(ResultVT); 78 79 // Extract the relevant 128 bits. Generate an EXTRACT_SUBVECTOR 80 // we can match to VEXTRACTF128. 81 unsigned ElemsPerChunk = 128 / ElVT.getSizeInBits(); 82 83 // This is the index of the first element of the 128-bit chunk 84 // we want. 85 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) 86 * ElemsPerChunk); 87 88 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 89 SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, 90 VecIdx); 91 92 return Result; 93 } 94 95 /// Generate a DAG to put 128-bits into a vector > 128 bits. This 96 /// sets things up to match to an AVX VINSERTF128 instruction or a 97 /// simple superregister reference. Idx is an index in the 128 bits 98 /// we want. It need not be aligned to a 128-bit bounday. That makes 99 /// lowering INSERT_VECTOR_ELT operations easier. 100 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, 101 unsigned IdxVal, SelectionDAG &DAG, 102 DebugLoc dl) { 103 // Inserting UNDEF is Result 104 if (Vec.getOpcode() == ISD::UNDEF) 105 return Result; 106 107 EVT VT = Vec.getValueType(); 108 assert(VT.is128BitVector() && "Unexpected vector size!"); 109 110 EVT ElVT = VT.getVectorElementType(); 111 EVT ResultVT = Result.getValueType(); 112 113 // Insert the relevant 128 bits. 114 unsigned ElemsPerChunk = 128/ElVT.getSizeInBits(); 115 116 // This is the index of the first element of the 128-bit chunk 117 // we want. 118 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) 119 * ElemsPerChunk); 120 121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); 122 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, 123 VecIdx); 124 } 125 126 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128 127 /// instructions. This is used because creating CONCAT_VECTOR nodes of 128 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower 129 /// large BUILD_VECTORS. 130 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT, 131 unsigned NumElems, SelectionDAG &DAG, 132 DebugLoc dl) { 133 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl); 134 return Insert128BitVector(V, V2, NumElems/2, DAG, dl); 135 } 136 137 static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { 138 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>(); 139 bool is64Bit = Subtarget->is64Bit(); 140 141 if (Subtarget->isTargetEnvMacho()) { 142 if (is64Bit) 143 return new X86_64MachoTargetObjectFile(); 144 return new TargetLoweringObjectFileMachO(); 145 } 146 147 if (Subtarget->isTargetLinux()) 148 return new X86LinuxTargetObjectFile(); 149 if (Subtarget->isTargetELF()) 150 return new TargetLoweringObjectFileELF(); 151 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 152 return new TargetLoweringObjectFileCOFF(); 153 llvm_unreachable("unknown subtarget type"); 154 } 155 156 X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) 157 : TargetLowering(TM, createTLOF(TM)) { 158 Subtarget = &TM.getSubtarget<X86Subtarget>(); 159 X86ScalarSSEf64 = Subtarget->hasSSE2(); 160 X86ScalarSSEf32 = Subtarget->hasSSE1(); 161 X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; 162 163 RegInfo = TM.getRegisterInfo(); 164 TD = getTargetData(); 165 166 // Set up the TargetLowering object. 167 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; 168 169 // X86 is weird, it always uses i8 for shift amounts and setcc results. 170 setBooleanContents(ZeroOrOneBooleanContent); 171 // X86-SSE is even stranger. It uses -1 or 0 for vector masks. 172 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 173 174 // For 64-bit since we have so many registers use the ILP scheduler, for 175 // 32-bit code use the register pressure specific scheduling. 176 // For Atom, always use ILP scheduling. 177 if (Subtarget->isAtom()) 178 setSchedulingPreference(Sched::ILP); 179 else if (Subtarget->is64Bit()) 180 setSchedulingPreference(Sched::ILP); 181 else 182 setSchedulingPreference(Sched::RegPressure); 183 setStackPointerRegisterToSaveRestore(X86StackPtr); 184 185 // Bypass i32 with i8 on Atom when compiling with O2 186 if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default) 187 addBypassSlowDivType(Type::getInt32Ty(getGlobalContext()), Type::getInt8Ty(getGlobalContext())); 188 189 if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { 190 // Setup Windows compiler runtime calls. 191 setLibcallName(RTLIB::SDIV_I64, "_alldiv"); 192 setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); 193 setLibcallName(RTLIB::SREM_I64, "_allrem"); 194 setLibcallName(RTLIB::UREM_I64, "_aullrem"); 195 setLibcallName(RTLIB::MUL_I64, "_allmul"); 196 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); 197 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); 198 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall); 199 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall); 200 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall); 201 202 // The _ftol2 runtime function has an unusual calling conv, which 203 // is modeled by a special pseudo-instruction. 204 setLibcallName(RTLIB::FPTOUINT_F64_I64, 0); 205 setLibcallName(RTLIB::FPTOUINT_F32_I64, 0); 206 setLibcallName(RTLIB::FPTOUINT_F64_I32, 0); 207 setLibcallName(RTLIB::FPTOUINT_F32_I32, 0); 208 } 209 210 if (Subtarget->isTargetDarwin()) { 211 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. 212 setUseUnderscoreSetJmp(false); 213 setUseUnderscoreLongJmp(false); 214 } else if (Subtarget->isTargetMingw()) { 215 // MS runtime is weird: it exports _setjmp, but longjmp! 216 setUseUnderscoreSetJmp(true); 217 setUseUnderscoreLongJmp(false); 218 } else { 219 setUseUnderscoreSetJmp(true); 220 setUseUnderscoreLongJmp(true); 221 } 222 223 // Set up the register classes. 224 addRegisterClass(MVT::i8, &X86::GR8RegClass); 225 addRegisterClass(MVT::i16, &X86::GR16RegClass); 226 addRegisterClass(MVT::i32, &X86::GR32RegClass); 227 if (Subtarget->is64Bit()) 228 addRegisterClass(MVT::i64, &X86::GR64RegClass); 229 230 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 231 232 // We don't accept any truncstore of integer registers. 233 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 234 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 235 setTruncStoreAction(MVT::i64, MVT::i8 , Expand); 236 setTruncStoreAction(MVT::i32, MVT::i16, Expand); 237 setTruncStoreAction(MVT::i32, MVT::i8 , Expand); 238 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 239 240 // SETOEQ and SETUNE require checking two conditions. 241 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); 242 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); 243 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); 244 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); 245 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); 246 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); 247 248 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this 249 // operation. 250 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); 251 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); 252 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); 253 254 if (Subtarget->is64Bit()) { 255 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); 256 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 257 } else if (!TM.Options.UseSoftFloat) { 258 // We have an algorithm for SSE2->double, and we turn this into a 259 // 64-bit FILD followed by conditional FADD for other targets. 260 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); 261 // We have an algorithm for SSE2, and we turn this into a 64-bit 262 // FILD for other targets. 263 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); 264 } 265 266 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have 267 // this operation. 268 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); 269 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); 270 271 if (!TM.Options.UseSoftFloat) { 272 // SSE has no i16 to fp conversion, only i32 273 if (X86ScalarSSEf32) { 274 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 275 // f32 and f64 cases are Legal, f80 case is not 276 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 277 } else { 278 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); 279 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); 280 } 281 } else { 282 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); 283 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote); 284 } 285 286 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 287 // are Legal, f80 is custom lowered. 288 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); 289 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); 290 291 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have 292 // this operation. 293 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); 294 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); 295 296 if (X86ScalarSSEf32) { 297 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); 298 // f32 and f64 cases are Legal, f80 case is not 299 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 300 } else { 301 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); 302 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); 303 } 304 305 // Handle FP_TO_UINT by promoting the destination to a larger signed 306 // conversion. 307 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); 308 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); 309 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); 310 311 if (Subtarget->is64Bit()) { 312 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); 313 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); 314 } else if (!TM.Options.UseSoftFloat) { 315 // Since AVX is a superset of SSE3, only check for SSE here. 316 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3()) 317 // Expand FP_TO_UINT into a select. 318 // FIXME: We would like to use a Custom expander here eventually to do 319 // the optimal thing for SSE vs. the default expansion in the legalizer. 320 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); 321 else 322 // With SSE3 we can use fisttpll to convert to a signed i64; without 323 // SSE, we're stuck with a fistpll. 324 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); 325 } 326 327 if (isTargetFTOL()) { 328 // Use the _ftol2 runtime function, which has a pseudo-instruction 329 // to handle its weird calling convention. 330 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom); 331 } 332 333 // TODO: when we have SSE, these could be more efficient, by using movd/movq. 334 if (!X86ScalarSSEf64) { 335 setOperationAction(ISD::BITCAST , MVT::f32 , Expand); 336 setOperationAction(ISD::BITCAST , MVT::i32 , Expand); 337 if (Subtarget->is64Bit()) { 338 setOperationAction(ISD::BITCAST , MVT::f64 , Expand); 339 // Without SSE, i64->f64 goes through memory. 340 setOperationAction(ISD::BITCAST , MVT::i64 , Expand); 341 } 342 } 343 344 // Scalar integer divide and remainder are lowered to use operations that 345 // produce two results, to match the available instructions. This exposes 346 // the two-result form to trivial CSE, which is able to combine x/y and x%y 347 // into a single instruction. 348 // 349 // Scalar integer multiply-high is also lowered to use two-result 350 // operations, to match the available instructions. However, plain multiply 351 // (low) operations are left as Legal, as there are single-result 352 // instructions for this in x86. Using the two-result multiply instructions 353 // when both high and low results are needed must be arranged by dagcombine. 354 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 355 MVT VT = IntVTs[i]; 356 setOperationAction(ISD::MULHS, VT, Expand); 357 setOperationAction(ISD::MULHU, VT, Expand); 358 setOperationAction(ISD::SDIV, VT, Expand); 359 setOperationAction(ISD::UDIV, VT, Expand); 360 setOperationAction(ISD::SREM, VT, Expand); 361 setOperationAction(ISD::UREM, VT, Expand); 362 363 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. 364 setOperationAction(ISD::ADDC, VT, Custom); 365 setOperationAction(ISD::ADDE, VT, Custom); 366 setOperationAction(ISD::SUBC, VT, Custom); 367 setOperationAction(ISD::SUBE, VT, Custom); 368 } 369 370 setOperationAction(ISD::BR_JT , MVT::Other, Expand); 371 setOperationAction(ISD::BRCOND , MVT::Other, Custom); 372 setOperationAction(ISD::BR_CC , MVT::Other, Expand); 373 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand); 374 if (Subtarget->is64Bit()) 375 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 376 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); 377 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); 378 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); 379 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); 380 setOperationAction(ISD::FREM , MVT::f32 , Expand); 381 setOperationAction(ISD::FREM , MVT::f64 , Expand); 382 setOperationAction(ISD::FREM , MVT::f80 , Expand); 383 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); 384 385 // Promote the i8 variants and force them on up to i32 which has a shorter 386 // encoding. 387 setOperationAction(ISD::CTTZ , MVT::i8 , Promote); 388 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32); 389 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote); 390 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32); 391 if (Subtarget->hasBMI()) { 392 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand); 393 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand); 394 if (Subtarget->is64Bit()) 395 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 396 } else { 397 setOperationAction(ISD::CTTZ , MVT::i16 , Custom); 398 setOperationAction(ISD::CTTZ , MVT::i32 , Custom); 399 if (Subtarget->is64Bit()) 400 setOperationAction(ISD::CTTZ , MVT::i64 , Custom); 401 } 402 403 if (Subtarget->hasLZCNT()) { 404 // When promoting the i8 variants, force them to i32 for a shorter 405 // encoding. 406 setOperationAction(ISD::CTLZ , MVT::i8 , Promote); 407 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32); 408 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote); 409 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32); 410 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand); 411 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand); 412 if (Subtarget->is64Bit()) 413 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 414 } else { 415 setOperationAction(ISD::CTLZ , MVT::i8 , Custom); 416 setOperationAction(ISD::CTLZ , MVT::i16 , Custom); 417 setOperationAction(ISD::CTLZ , MVT::i32 , Custom); 418 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom); 419 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom); 420 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom); 421 if (Subtarget->is64Bit()) { 422 setOperationAction(ISD::CTLZ , MVT::i64 , Custom); 423 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 424 } 425 } 426 427 if (Subtarget->hasPOPCNT()) { 428 setOperationAction(ISD::CTPOP , MVT::i8 , Promote); 429 } else { 430 setOperationAction(ISD::CTPOP , MVT::i8 , Expand); 431 setOperationAction(ISD::CTPOP , MVT::i16 , Expand); 432 setOperationAction(ISD::CTPOP , MVT::i32 , Expand); 433 if (Subtarget->is64Bit()) 434 setOperationAction(ISD::CTPOP , MVT::i64 , Expand); 435 } 436 437 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); 438 setOperationAction(ISD::BSWAP , MVT::i16 , Expand); 439 440 // These should be promoted to a larger select which is supported. 441 setOperationAction(ISD::SELECT , MVT::i1 , Promote); 442 // X86 wants to expand cmov itself. 443 setOperationAction(ISD::SELECT , MVT::i8 , Custom); 444 setOperationAction(ISD::SELECT , MVT::i16 , Custom); 445 setOperationAction(ISD::SELECT , MVT::i32 , Custom); 446 setOperationAction(ISD::SELECT , MVT::f32 , Custom); 447 setOperationAction(ISD::SELECT , MVT::f64 , Custom); 448 setOperationAction(ISD::SELECT , MVT::f80 , Custom); 449 setOperationAction(ISD::SETCC , MVT::i8 , Custom); 450 setOperationAction(ISD::SETCC , MVT::i16 , Custom); 451 setOperationAction(ISD::SETCC , MVT::i32 , Custom); 452 setOperationAction(ISD::SETCC , MVT::f32 , Custom); 453 setOperationAction(ISD::SETCC , MVT::f64 , Custom); 454 setOperationAction(ISD::SETCC , MVT::f80 , Custom); 455 if (Subtarget->is64Bit()) { 456 setOperationAction(ISD::SELECT , MVT::i64 , Custom); 457 setOperationAction(ISD::SETCC , MVT::i64 , Custom); 458 } 459 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); 460 461 // Darwin ABI issue. 462 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); 463 setOperationAction(ISD::JumpTable , MVT::i32 , Custom); 464 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom); 465 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom); 466 if (Subtarget->is64Bit()) 467 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 468 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom); 469 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom); 470 if (Subtarget->is64Bit()) { 471 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom); 472 setOperationAction(ISD::JumpTable , MVT::i64 , Custom); 473 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom); 474 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom); 475 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom); 476 } 477 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86) 478 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom); 479 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom); 480 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom); 481 if (Subtarget->is64Bit()) { 482 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom); 483 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom); 484 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom); 485 } 486 487 if (Subtarget->hasSSE1()) 488 setOperationAction(ISD::PREFETCH , MVT::Other, Legal); 489 490 setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); 491 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); 492 493 // On X86 and X86-64, atomic operations are lowered to locked instructions. 494 // Locked instructions, in turn, have implicit fence semantics (all memory 495 // operations are flushed before issuing the locked instruction, and they 496 // are not buffered), so we can fold away the common pattern of 497 // fence-atomic-fence. 498 setShouldFoldAtomicFences(true); 499 500 // Expand certain atomics 501 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) { 502 MVT VT = IntVTs[i]; 503 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); 504 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 505 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 506 } 507 508 if (!Subtarget->is64Bit()) { 509 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); 510 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 511 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 512 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 513 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 514 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 515 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); 516 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 517 } 518 519 if (Subtarget->hasCmpxchg16b()) { 520 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); 521 } 522 523 // FIXME - use subtarget debug flags 524 if (!Subtarget->isTargetDarwin() && 525 !Subtarget->isTargetELF() && 526 !Subtarget->isTargetCygMing()) { 527 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); 528 } 529 530 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); 531 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); 532 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 533 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 534 if (Subtarget->is64Bit()) { 535 setExceptionPointerRegister(X86::RAX); 536 setExceptionSelectorRegister(X86::RDX); 537 } else { 538 setExceptionPointerRegister(X86::EAX); 539 setExceptionSelectorRegister(X86::EDX); 540 } 541 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 542 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); 543 544 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 545 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 546 547 setOperationAction(ISD::TRAP, MVT::Other, Legal); 548 549 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 550 setOperationAction(ISD::VASTART , MVT::Other, Custom); 551 setOperationAction(ISD::VAEND , MVT::Other, Expand); 552 if (Subtarget->is64Bit()) { 553 setOperationAction(ISD::VAARG , MVT::Other, Custom); 554 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 555 } else { 556 setOperationAction(ISD::VAARG , MVT::Other, Expand); 557 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 558 } 559 560 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 561 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 562 563 if (Subtarget->isTargetCOFF() && !Subtarget->isTargetEnvMacho()) 564 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 565 MVT::i64 : MVT::i32, Custom); 566 else if (TM.Options.EnableSegmentedStacks) 567 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 568 MVT::i64 : MVT::i32, Custom); 569 else 570 setOperationAction(ISD::DYNAMIC_STACKALLOC, Subtarget->is64Bit() ? 571 MVT::i64 : MVT::i32, Expand); 572 573 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) { 574 // f32 and f64 use SSE. 575 // Set up the FP register classes. 576 addRegisterClass(MVT::f32, &X86::FR32RegClass); 577 addRegisterClass(MVT::f64, &X86::FR64RegClass); 578 579 // Use ANDPD to simulate FABS. 580 setOperationAction(ISD::FABS , MVT::f64, Custom); 581 setOperationAction(ISD::FABS , MVT::f32, Custom); 582 583 // Use XORP to simulate FNEG. 584 setOperationAction(ISD::FNEG , MVT::f64, Custom); 585 setOperationAction(ISD::FNEG , MVT::f32, Custom); 586 587 // Use ANDPD and ORPD to simulate FCOPYSIGN. 588 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 589 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 590 591 // Lower this to FGETSIGNx86 plus an AND. 592 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); 593 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); 594 595 // We don't support sin/cos/fmod 596 setOperationAction(ISD::FSIN , MVT::f64, Expand); 597 setOperationAction(ISD::FCOS , MVT::f64, Expand); 598 setOperationAction(ISD::FSIN , MVT::f32, Expand); 599 setOperationAction(ISD::FCOS , MVT::f32, Expand); 600 601 // Expand FP immediates into loads from the stack, except for the special 602 // cases we handle. 603 addLegalFPImmediate(APFloat(+0.0)); // xorpd 604 addLegalFPImmediate(APFloat(+0.0f)); // xorps 605 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) { 606 // Use SSE for f32, x87 for f64. 607 // Set up the FP register classes. 608 addRegisterClass(MVT::f32, &X86::FR32RegClass); 609 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 610 611 // Use ANDPS to simulate FABS. 612 setOperationAction(ISD::FABS , MVT::f32, Custom); 613 614 // Use XORP to simulate FNEG. 615 setOperationAction(ISD::FNEG , MVT::f32, Custom); 616 617 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 618 619 // Use ANDPS and ORPS to simulate FCOPYSIGN. 620 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 621 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 622 623 // We don't support sin/cos/fmod 624 setOperationAction(ISD::FSIN , MVT::f32, Expand); 625 setOperationAction(ISD::FCOS , MVT::f32, Expand); 626 627 // Special cases we handle for FP constants. 628 addLegalFPImmediate(APFloat(+0.0f)); // xorps 629 addLegalFPImmediate(APFloat(+0.0)); // FLD0 630 addLegalFPImmediate(APFloat(+1.0)); // FLD1 631 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 632 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 633 634 if (!TM.Options.UnsafeFPMath) { 635 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 636 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 637 } 638 } else if (!TM.Options.UseSoftFloat) { 639 // f32 and f64 in x87. 640 // Set up the FP register classes. 641 addRegisterClass(MVT::f64, &X86::RFP64RegClass); 642 addRegisterClass(MVT::f32, &X86::RFP32RegClass); 643 644 setOperationAction(ISD::UNDEF, MVT::f64, Expand); 645 setOperationAction(ISD::UNDEF, MVT::f32, Expand); 646 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 647 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 648 649 if (!TM.Options.UnsafeFPMath) { 650 setOperationAction(ISD::FSIN , MVT::f64 , Expand); 651 setOperationAction(ISD::FCOS , MVT::f64 , Expand); 652 } 653 addLegalFPImmediate(APFloat(+0.0)); // FLD0 654 addLegalFPImmediate(APFloat(+1.0)); // FLD1 655 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS 656 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS 657 addLegalFPImmediate(APFloat(+0.0f)); // FLD0 658 addLegalFPImmediate(APFloat(+1.0f)); // FLD1 659 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS 660 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS 661 } 662 663 // We don't support FMA. 664 setOperationAction(ISD::FMA, MVT::f64, Expand); 665 setOperationAction(ISD::FMA, MVT::f32, Expand); 666 667 // Long double always uses X87. 668 if (!TM.Options.UseSoftFloat) { 669 addRegisterClass(MVT::f80, &X86::RFP80RegClass); 670 setOperationAction(ISD::UNDEF, MVT::f80, Expand); 671 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); 672 { 673 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended); 674 addLegalFPImmediate(TmpFlt); // FLD0 675 TmpFlt.changeSign(); 676 addLegalFPImmediate(TmpFlt); // FLD0/FCHS 677 678 bool ignored; 679 APFloat TmpFlt2(+1.0); 680 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven, 681 &ignored); 682 addLegalFPImmediate(TmpFlt2); // FLD1 683 TmpFlt2.changeSign(); 684 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS 685 } 686 687 if (!TM.Options.UnsafeFPMath) { 688 setOperationAction(ISD::FSIN , MVT::f80 , Expand); 689 setOperationAction(ISD::FCOS , MVT::f80 , Expand); 690 } 691 692 setOperationAction(ISD::FFLOOR, MVT::f80, Expand); 693 setOperationAction(ISD::FCEIL, MVT::f80, Expand); 694 setOperationAction(ISD::FTRUNC, MVT::f80, Expand); 695 setOperationAction(ISD::FRINT, MVT::f80, Expand); 696 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); 697 setOperationAction(ISD::FMA, MVT::f80, Expand); 698 } 699 700 // Always use a library call for pow. 701 setOperationAction(ISD::FPOW , MVT::f32 , Expand); 702 setOperationAction(ISD::FPOW , MVT::f64 , Expand); 703 setOperationAction(ISD::FPOW , MVT::f80 , Expand); 704 705 setOperationAction(ISD::FLOG, MVT::f80, Expand); 706 setOperationAction(ISD::FLOG2, MVT::f80, Expand); 707 setOperationAction(ISD::FLOG10, MVT::f80, Expand); 708 setOperationAction(ISD::FEXP, MVT::f80, Expand); 709 setOperationAction(ISD::FEXP2, MVT::f80, Expand); 710 711 // First set operation action for all vector types to either promote 712 // (for widening) or expand (for scalarization). Then we will selectively 713 // turn on ones that can be effectively codegen'd. 714 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 715 VT <= MVT::LAST_VECTOR_VALUETYPE; ++VT) { 716 setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand); 717 setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand); 718 setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand); 719 setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand); 720 setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand); 721 setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand); 722 setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand); 723 setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand); 724 setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand); 725 setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand); 726 setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand); 727 setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand); 728 setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand); 729 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand); 730 setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand); 731 setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand); 732 setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 733 setOperationAction(ISD::INSERT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand); 734 setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand); 735 setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand); 736 setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand); 737 setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand); 738 setOperationAction(ISD::FMA, (MVT::SimpleValueType)VT, Expand); 739 setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); 740 setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); 741 setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); 742 setOperationAction(ISD::FFLOOR, (MVT::SimpleValueType)VT, Expand); 743 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 744 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 745 setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); 746 setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand); 747 setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand); 748 setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand); 749 setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand); 750 setOperationAction(ISD::CTTZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 751 setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand); 752 setOperationAction(ISD::CTLZ_ZERO_UNDEF, (MVT::SimpleValueType)VT, Expand); 753 setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand); 754 setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand); 755 setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand); 756 setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand); 757 setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand); 758 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); 759 setOperationAction(ISD::SETCC, (MVT::SimpleValueType)VT, Expand); 760 setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand); 761 setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand); 762 setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand); 763 setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand); 764 setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand); 765 setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand); 766 setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand); 767 setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 768 setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand); 769 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT,Expand); 770 setOperationAction(ISD::TRUNCATE, (MVT::SimpleValueType)VT, Expand); 771 setOperationAction(ISD::SIGN_EXTEND, (MVT::SimpleValueType)VT, Expand); 772 setOperationAction(ISD::ZERO_EXTEND, (MVT::SimpleValueType)VT, Expand); 773 setOperationAction(ISD::ANY_EXTEND, (MVT::SimpleValueType)VT, Expand); 774 setOperationAction(ISD::VSELECT, (MVT::SimpleValueType)VT, Expand); 775 for (int InnerVT = MVT::FIRST_VECTOR_VALUETYPE; 776 InnerVT <= MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 777 setTruncStoreAction((MVT::SimpleValueType)VT, 778 (MVT::SimpleValueType)InnerVT, Expand); 779 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 780 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 781 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 782 } 783 784 // FIXME: In order to prevent SSE instructions being expanded to MMX ones 785 // with -msoft-float, disable use of MMX as well. 786 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) { 787 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass); 788 // No operations on x86mmx supported, everything uses intrinsics. 789 } 790 791 // MMX-sized vectors (other than x86mmx) are expected to be expanded 792 // into smaller operations. 793 setOperationAction(ISD::MULHS, MVT::v8i8, Expand); 794 setOperationAction(ISD::MULHS, MVT::v4i16, Expand); 795 setOperationAction(ISD::MULHS, MVT::v2i32, Expand); 796 setOperationAction(ISD::MULHS, MVT::v1i64, Expand); 797 setOperationAction(ISD::AND, MVT::v8i8, Expand); 798 setOperationAction(ISD::AND, MVT::v4i16, Expand); 799 setOperationAction(ISD::AND, MVT::v2i32, Expand); 800 setOperationAction(ISD::AND, MVT::v1i64, Expand); 801 setOperationAction(ISD::OR, MVT::v8i8, Expand); 802 setOperationAction(ISD::OR, MVT::v4i16, Expand); 803 setOperationAction(ISD::OR, MVT::v2i32, Expand); 804 setOperationAction(ISD::OR, MVT::v1i64, Expand); 805 setOperationAction(ISD::XOR, MVT::v8i8, Expand); 806 setOperationAction(ISD::XOR, MVT::v4i16, Expand); 807 setOperationAction(ISD::XOR, MVT::v2i32, Expand); 808 setOperationAction(ISD::XOR, MVT::v1i64, Expand); 809 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand); 810 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand); 811 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand); 812 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand); 813 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand); 814 setOperationAction(ISD::SELECT, MVT::v8i8, Expand); 815 setOperationAction(ISD::SELECT, MVT::v4i16, Expand); 816 setOperationAction(ISD::SELECT, MVT::v2i32, Expand); 817 setOperationAction(ISD::SELECT, MVT::v1i64, Expand); 818 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); 819 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand); 820 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand); 821 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand); 822 823 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) { 824 addRegisterClass(MVT::v4f32, &X86::VR128RegClass); 825 826 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 827 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 828 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 829 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 830 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 831 setOperationAction(ISD::FNEG, MVT::v4f32, Custom); 832 setOperationAction(ISD::FABS, MVT::v4f32, Custom); 833 setOperationAction(ISD::LOAD, MVT::v4f32, Legal); 834 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 835 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 836 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 837 setOperationAction(ISD::SELECT, MVT::v4f32, Custom); 838 } 839 840 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) { 841 addRegisterClass(MVT::v2f64, &X86::VR128RegClass); 842 843 // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM 844 // registers cannot be used even for integer operations. 845 addRegisterClass(MVT::v16i8, &X86::VR128RegClass); 846 addRegisterClass(MVT::v8i16, &X86::VR128RegClass); 847 addRegisterClass(MVT::v4i32, &X86::VR128RegClass); 848 addRegisterClass(MVT::v2i64, &X86::VR128RegClass); 849 850 setOperationAction(ISD::ADD, MVT::v16i8, Legal); 851 setOperationAction(ISD::ADD, MVT::v8i16, Legal); 852 setOperationAction(ISD::ADD, MVT::v4i32, Legal); 853 setOperationAction(ISD::ADD, MVT::v2i64, Legal); 854 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 855 setOperationAction(ISD::SUB, MVT::v16i8, Legal); 856 setOperationAction(ISD::SUB, MVT::v8i16, Legal); 857 setOperationAction(ISD::SUB, MVT::v4i32, Legal); 858 setOperationAction(ISD::SUB, MVT::v2i64, Legal); 859 setOperationAction(ISD::MUL, MVT::v8i16, Legal); 860 setOperationAction(ISD::FADD, MVT::v2f64, Legal); 861 setOperationAction(ISD::FSUB, MVT::v2f64, Legal); 862 setOperationAction(ISD::FMUL, MVT::v2f64, Legal); 863 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 864 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 865 setOperationAction(ISD::FNEG, MVT::v2f64, Custom); 866 setOperationAction(ISD::FABS, MVT::v2f64, Custom); 867 868 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 869 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 870 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 871 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 872 873 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom); 874 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom); 875 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 876 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 877 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 878 879 // Custom lower build_vector, vector_shuffle, and extract_vector_elt. 880 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 881 MVT VT = (MVT::SimpleValueType)i; 882 // Do not attempt to custom lower non-power-of-2 vectors 883 if (!isPowerOf2_32(VT.getVectorNumElements())) 884 continue; 885 // Do not attempt to custom lower non-128-bit vectors 886 if (!VT.is128BitVector()) 887 continue; 888 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 889 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 890 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 891 } 892 893 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 894 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 895 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 896 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 897 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom); 898 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom); 899 900 if (Subtarget->is64Bit()) { 901 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 902 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 903 } 904 905 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64. 906 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) { 907 MVT VT = (MVT::SimpleValueType)i; 908 909 // Do not attempt to promote non-128-bit vectors 910 if (!VT.is128BitVector()) 911 continue; 912 913 setOperationAction(ISD::AND, VT, Promote); 914 AddPromotedToType (ISD::AND, VT, MVT::v2i64); 915 setOperationAction(ISD::OR, VT, Promote); 916 AddPromotedToType (ISD::OR, VT, MVT::v2i64); 917 setOperationAction(ISD::XOR, VT, Promote); 918 AddPromotedToType (ISD::XOR, VT, MVT::v2i64); 919 setOperationAction(ISD::LOAD, VT, Promote); 920 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64); 921 setOperationAction(ISD::SELECT, VT, Promote); 922 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64); 923 } 924 925 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 926 927 // Custom lower v2i64 and v2f64 selects. 928 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 929 setOperationAction(ISD::LOAD, MVT::v2i64, Legal); 930 setOperationAction(ISD::SELECT, MVT::v2f64, Custom); 931 setOperationAction(ISD::SELECT, MVT::v2i64, Custom); 932 933 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 934 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 935 936 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal); 937 } 938 939 if (Subtarget->hasSSE41()) { 940 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 941 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 942 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 943 setOperationAction(ISD::FRINT, MVT::f32, Legal); 944 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 945 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 946 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 947 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 948 setOperationAction(ISD::FRINT, MVT::f64, Legal); 949 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 950 951 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 952 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 953 954 // FIXME: Do we need to handle scalar-to-vector here? 955 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 956 957 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 958 setOperationAction(ISD::VSELECT, MVT::v2i64, Legal); 959 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 960 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 961 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 962 963 // i8 and i16 vectors are custom , because the source register and source 964 // source memory operand types are not the same width. f32 vectors are 965 // custom since the immediate controlling the insert encodes additional 966 // information. 967 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 968 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 969 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 970 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 971 972 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom); 973 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom); 974 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom); 975 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); 976 977 // FIXME: these should be Legal but thats only for the case where 978 // the index is constant. For now custom expand to deal with that. 979 if (Subtarget->is64Bit()) { 980 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom); 981 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom); 982 } 983 } 984 985 if (Subtarget->hasSSE2()) { 986 setOperationAction(ISD::SRL, MVT::v8i16, Custom); 987 setOperationAction(ISD::SRL, MVT::v16i8, Custom); 988 989 setOperationAction(ISD::SHL, MVT::v8i16, Custom); 990 setOperationAction(ISD::SHL, MVT::v16i8, Custom); 991 992 setOperationAction(ISD::SRA, MVT::v8i16, Custom); 993 setOperationAction(ISD::SRA, MVT::v16i8, Custom); 994 995 if (Subtarget->hasAVX2()) { 996 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 997 setOperationAction(ISD::SRL, MVT::v4i32, Legal); 998 999 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 1000 setOperationAction(ISD::SHL, MVT::v4i32, Legal); 1001 1002 setOperationAction(ISD::SRA, MVT::v4i32, Legal); 1003 } else { 1004 setOperationAction(ISD::SRL, MVT::v2i64, Custom); 1005 setOperationAction(ISD::SRL, MVT::v4i32, Custom); 1006 1007 setOperationAction(ISD::SHL, MVT::v2i64, Custom); 1008 setOperationAction(ISD::SHL, MVT::v4i32, Custom); 1009 1010 setOperationAction(ISD::SRA, MVT::v4i32, Custom); 1011 } 1012 } 1013 1014 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX()) { 1015 addRegisterClass(MVT::v32i8, &X86::VR256RegClass); 1016 addRegisterClass(MVT::v16i16, &X86::VR256RegClass); 1017 addRegisterClass(MVT::v8i32, &X86::VR256RegClass); 1018 addRegisterClass(MVT::v8f32, &X86::VR256RegClass); 1019 addRegisterClass(MVT::v4i64, &X86::VR256RegClass); 1020 addRegisterClass(MVT::v4f64, &X86::VR256RegClass); 1021 1022 setOperationAction(ISD::LOAD, MVT::v8f32, Legal); 1023 setOperationAction(ISD::LOAD, MVT::v4f64, Legal); 1024 setOperationAction(ISD::LOAD, MVT::v4i64, Legal); 1025 1026 setOperationAction(ISD::FADD, MVT::v8f32, Legal); 1027 setOperationAction(ISD::FSUB, MVT::v8f32, Legal); 1028 setOperationAction(ISD::FMUL, MVT::v8f32, Legal); 1029 setOperationAction(ISD::FDIV, MVT::v8f32, Legal); 1030 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); 1031 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal); 1032 setOperationAction(ISD::FNEG, MVT::v8f32, Custom); 1033 setOperationAction(ISD::FABS, MVT::v8f32, Custom); 1034 1035 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 1036 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 1037 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 1038 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1039 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1040 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 1041 setOperationAction(ISD::FNEG, MVT::v4f64, Custom); 1042 setOperationAction(ISD::FABS, MVT::v4f64, Custom); 1043 1044 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); 1045 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); 1046 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); 1047 1048 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, Legal); 1049 1050 setOperationAction(ISD::SRL, MVT::v16i16, Custom); 1051 setOperationAction(ISD::SRL, MVT::v32i8, Custom); 1052 1053 setOperationAction(ISD::SHL, MVT::v16i16, Custom); 1054 setOperationAction(ISD::SHL, MVT::v32i8, Custom); 1055 1056 setOperationAction(ISD::SRA, MVT::v16i16, Custom); 1057 setOperationAction(ISD::SRA, MVT::v32i8, Custom); 1058 1059 setOperationAction(ISD::SETCC, MVT::v32i8, Custom); 1060 setOperationAction(ISD::SETCC, MVT::v16i16, Custom); 1061 setOperationAction(ISD::SETCC, MVT::v8i32, Custom); 1062 setOperationAction(ISD::SETCC, MVT::v4i64, Custom); 1063 1064 setOperationAction(ISD::SELECT, MVT::v4f64, Custom); 1065 setOperationAction(ISD::SELECT, MVT::v4i64, Custom); 1066 setOperationAction(ISD::SELECT, MVT::v8f32, Custom); 1067 1068 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 1069 setOperationAction(ISD::VSELECT, MVT::v4i64, Legal); 1070 setOperationAction(ISD::VSELECT, MVT::v8i32, Legal); 1071 setOperationAction(ISD::VSELECT, MVT::v8f32, Legal); 1072 1073 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) { 1074 setOperationAction(ISD::FMA, MVT::v8f32, Custom); 1075 setOperationAction(ISD::FMA, MVT::v4f64, Custom); 1076 setOperationAction(ISD::FMA, MVT::v4f32, Custom); 1077 setOperationAction(ISD::FMA, MVT::v2f64, Custom); 1078 setOperationAction(ISD::FMA, MVT::f32, Custom); 1079 setOperationAction(ISD::FMA, MVT::f64, Custom); 1080 } 1081 1082 if (Subtarget->hasAVX2()) { 1083 setOperationAction(ISD::ADD, MVT::v4i64, Legal); 1084 setOperationAction(ISD::ADD, MVT::v8i32, Legal); 1085 setOperationAction(ISD::ADD, MVT::v16i16, Legal); 1086 setOperationAction(ISD::ADD, MVT::v32i8, Legal); 1087 1088 setOperationAction(ISD::SUB, MVT::v4i64, Legal); 1089 setOperationAction(ISD::SUB, MVT::v8i32, Legal); 1090 setOperationAction(ISD::SUB, MVT::v16i16, Legal); 1091 setOperationAction(ISD::SUB, MVT::v32i8, Legal); 1092 1093 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1094 setOperationAction(ISD::MUL, MVT::v8i32, Legal); 1095 setOperationAction(ISD::MUL, MVT::v16i16, Legal); 1096 // Don't lower v32i8 because there is no 128-bit byte mul 1097 1098 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); 1099 1100 setOperationAction(ISD::SRL, MVT::v4i64, Legal); 1101 setOperationAction(ISD::SRL, MVT::v8i32, Legal); 1102 1103 setOperationAction(ISD::SHL, MVT::v4i64, Legal); 1104 setOperationAction(ISD::SHL, MVT::v8i32, Legal); 1105 1106 setOperationAction(ISD::SRA, MVT::v8i32, Legal); 1107 } else { 1108 setOperationAction(ISD::ADD, MVT::v4i64, Custom); 1109 setOperationAction(ISD::ADD, MVT::v8i32, Custom); 1110 setOperationAction(ISD::ADD, MVT::v16i16, Custom); 1111 setOperationAction(ISD::ADD, MVT::v32i8, Custom); 1112 1113 setOperationAction(ISD::SUB, MVT::v4i64, Custom); 1114 setOperationAction(ISD::SUB, MVT::v8i32, Custom); 1115 setOperationAction(ISD::SUB, MVT::v16i16, Custom); 1116 setOperationAction(ISD::SUB, MVT::v32i8, Custom); 1117 1118 setOperationAction(ISD::MUL, MVT::v4i64, Custom); 1119 setOperationAction(ISD::MUL, MVT::v8i32, Custom); 1120 setOperationAction(ISD::MUL, MVT::v16i16, Custom); 1121 // Don't lower v32i8 because there is no 128-bit byte mul 1122 1123 setOperationAction(ISD::SRL, MVT::v4i64, Custom); 1124 setOperationAction(ISD::SRL, MVT::v8i32, Custom); 1125 1126 setOperationAction(ISD::SHL, MVT::v4i64, Custom); 1127 setOperationAction(ISD::SHL, MVT::v8i32, Custom); 1128 1129 setOperationAction(ISD::SRA, MVT::v8i32, Custom); 1130 } 1131 1132 // Custom lower several nodes for 256-bit types. 1133 for (int i = MVT::FIRST_VECTOR_VALUETYPE; 1134 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 1135 MVT VT = (MVT::SimpleValueType)i; 1136 1137 // Extract subvector is special because the value type 1138 // (result) is 128-bit but the source is 256-bit wide. 1139 if (VT.is128BitVector()) 1140 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 1141 1142 // Do not attempt to custom lower other non-256-bit vectors 1143 if (!VT.is256BitVector()) 1144 continue; 1145 1146 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 1147 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 1148 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 1149 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 1150 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); 1151 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 1152 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 1153 } 1154 1155 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64. 1156 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) { 1157 MVT VT = (MVT::SimpleValueType)i; 1158 1159 // Do not attempt to promote non-256-bit vectors 1160 if (!VT.is256BitVector()) 1161 continue; 1162 1163 setOperationAction(ISD::AND, VT, Promote); 1164 AddPromotedToType (ISD::AND, VT, MVT::v4i64); 1165 setOperationAction(ISD::OR, VT, Promote); 1166 AddPromotedToType (ISD::OR, VT, MVT::v4i64); 1167 setOperationAction(ISD::XOR, VT, Promote); 1168 AddPromotedToType (ISD::XOR, VT, MVT::v4i64); 1169 setOperationAction(ISD::LOAD, VT, Promote); 1170 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64); 1171 setOperationAction(ISD::SELECT, VT, Promote); 1172 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64); 1173 } 1174 } 1175 1176 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion 1177 // of this type with custom code. 1178 for (int VT = MVT::FIRST_VECTOR_VALUETYPE; 1179 VT != MVT::LAST_VECTOR_VALUETYPE; VT++) { 1180 setOperationAction(ISD::SIGN_EXTEND_INREG, (MVT::SimpleValueType)VT, 1181 Custom); 1182 } 1183 1184 // We want to custom lower some of our intrinsics. 1185 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 1186 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 1187 1188 1189 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't 1190 // handle type legalization for these operations here. 1191 // 1192 // FIXME: We really should do custom legalization for addition and 1193 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better 1194 // than generic legalization for 64-bit multiplication-with-overflow, though. 1195 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) { 1196 // Add/Sub/Mul with overflow operations are custom lowered. 1197 MVT VT = IntVTs[i]; 1198 setOperationAction(ISD::SADDO, VT, Custom); 1199 setOperationAction(ISD::UADDO, VT, Custom); 1200 setOperationAction(ISD::SSUBO, VT, Custom); 1201 setOperationAction(ISD::USUBO, VT, Custom); 1202 setOperationAction(ISD::SMULO, VT, Custom); 1203 setOperationAction(ISD::UMULO, VT, Custom); 1204 } 1205 1206 // There are no 8-bit 3-address imul/mul instructions 1207 setOperationAction(ISD::SMULO, MVT::i8, Expand); 1208 setOperationAction(ISD::UMULO, MVT::i8, Expand); 1209 1210 if (!Subtarget->is64Bit()) { 1211 // These libcalls are not available in 32-bit. 1212 setLibcallName(RTLIB::SHL_I128, 0); 1213 setLibcallName(RTLIB::SRL_I128, 0); 1214 setLibcallName(RTLIB::SRA_I128, 0); 1215 } 1216 1217 // We have target-specific dag combine patterns for the following nodes: 1218 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1219 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 1220 setTargetDAGCombine(ISD::VSELECT); 1221 setTargetDAGCombine(ISD::SELECT); 1222 setTargetDAGCombine(ISD::SHL); 1223 setTargetDAGCombine(ISD::SRA); 1224 setTargetDAGCombine(ISD::SRL); 1225 setTargetDAGCombine(ISD::OR); 1226 setTargetDAGCombine(ISD::AND); 1227 setTargetDAGCombine(ISD::ADD); 1228 setTargetDAGCombine(ISD::FADD); 1229 setTargetDAGCombine(ISD::FSUB); 1230 setTargetDAGCombine(ISD::FMA); 1231 setTargetDAGCombine(ISD::SUB); 1232 setTargetDAGCombine(ISD::LOAD); 1233 setTargetDAGCombine(ISD::STORE); 1234 setTargetDAGCombine(ISD::ZERO_EXTEND); 1235 setTargetDAGCombine(ISD::ANY_EXTEND); 1236 setTargetDAGCombine(ISD::SIGN_EXTEND); 1237 setTargetDAGCombine(ISD::TRUNCATE); 1238 setTargetDAGCombine(ISD::UINT_TO_FP); 1239 setTargetDAGCombine(ISD::SINT_TO_FP); 1240 setTargetDAGCombine(ISD::SETCC); 1241 setTargetDAGCombine(ISD::FP_TO_SINT); 1242 if (Subtarget->is64Bit()) 1243 setTargetDAGCombine(ISD::MUL); 1244 setTargetDAGCombine(ISD::XOR); 1245 1246 computeRegisterProperties(); 1247 1248 // On Darwin, -Os means optimize for size without hurting performance, 1249 // do not reduce the limit. 1250 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores 1251 maxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8; 1252 maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores 1253 maxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1254 maxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores 1255 maxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 1256 setPrefLoopAlignment(4); // 2^4 bytes. 1257 benefitFromCodePlacementOpt = true; 1258 1259 // Predictable cmov don't hurt on atom because it's in-order. 1260 predictableSelectIsExpensive = !Subtarget->isAtom(); 1261 1262 setPrefFunctionAlignment(4); // 2^4 bytes. 1263 } 1264 1265 1266 EVT X86TargetLowering::getSetCCResultType(EVT VT) const { 1267 if (!VT.isVector()) return MVT::i8; 1268 return VT.changeVectorElementTypeToInteger(); 1269 } 1270 1271 1272 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1273 /// the desired ByVal argument alignment. 1274 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { 1275 if (MaxAlign == 16) 1276 return; 1277 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1278 if (VTy->getBitWidth() == 128) 1279 MaxAlign = 16; 1280 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1281 unsigned EltAlign = 0; 1282 getMaxByValAlign(ATy->getElementType(), EltAlign); 1283 if (EltAlign > MaxAlign) 1284 MaxAlign = EltAlign; 1285 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1286 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1287 unsigned EltAlign = 0; 1288 getMaxByValAlign(STy->getElementType(i), EltAlign); 1289 if (EltAlign > MaxAlign) 1290 MaxAlign = EltAlign; 1291 if (MaxAlign == 16) 1292 break; 1293 } 1294 } 1295 } 1296 1297 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1298 /// function arguments in the caller parameter area. For X86, aggregates 1299 /// that contain SSE vectors are placed at 16-byte boundaries while the rest 1300 /// are at 4-byte boundaries. 1301 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const { 1302 if (Subtarget->is64Bit()) { 1303 // Max of 8 and alignment of type. 1304 unsigned TyAlign = TD->getABITypeAlignment(Ty); 1305 if (TyAlign > 8) 1306 return TyAlign; 1307 return 8; 1308 } 1309 1310 unsigned Align = 4; 1311 if (Subtarget->hasSSE1()) 1312 getMaxByValAlign(Ty, Align); 1313 return Align; 1314 } 1315 1316 /// getOptimalMemOpType - Returns the target specific optimal type for load 1317 /// and store operations as a result of memset, memcpy, and memmove 1318 /// lowering. If DstAlign is zero that means it's safe to destination 1319 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 1320 /// means there isn't a need to check it against alignment requirement, 1321 /// probably because the source does not need to be loaded. If 1322 /// 'IsZeroVal' is true, that means it's safe to return a 1323 /// non-scalar-integer type, e.g. empty string source, constant, or loaded 1324 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 1325 /// constant so it does not need to be loaded. 1326 /// It returns EVT::Other if the type should be determined using generic 1327 /// target-independent logic. 1328 EVT 1329 X86TargetLowering::getOptimalMemOpType(uint64_t Size, 1330 unsigned DstAlign, unsigned SrcAlign, 1331 bool IsZeroVal, 1332 bool MemcpyStrSrc, 1333 MachineFunction &MF) const { 1334 // FIXME: This turns off use of xmm stores for memset/memcpy on targets like 1335 // linux. This is because the stack realignment code can't handle certain 1336 // cases like PR2962. This should be removed when PR2962 is fixed. 1337 const Function *F = MF.getFunction(); 1338 if (IsZeroVal && 1339 !F->hasFnAttr(Attribute::NoImplicitFloat)) { 1340 if (Size >= 16 && 1341 (Subtarget->isUnalignedMemAccessFast() || 1342 ((DstAlign == 0 || DstAlign >= 16) && 1343 (SrcAlign == 0 || SrcAlign >= 16))) && 1344 Subtarget->getStackAlignment() >= 16) { 1345 if (Subtarget->getStackAlignment() >= 32) { 1346 if (Subtarget->hasAVX2()) 1347 return MVT::v8i32; 1348 if (Subtarget->hasAVX()) 1349 return MVT::v8f32; 1350 } 1351 if (Subtarget->hasSSE2()) 1352 return MVT::v4i32; 1353 if (Subtarget->hasSSE1()) 1354 return MVT::v4f32; 1355 } else if (!MemcpyStrSrc && Size >= 8 && 1356 !Subtarget->is64Bit() && 1357 Subtarget->getStackAlignment() >= 8 && 1358 Subtarget->hasSSE2()) { 1359 // Do not use f64 to lower memcpy if source is string constant. It's 1360 // better to use i32 to avoid the loads. 1361 return MVT::f64; 1362 } 1363 } 1364 if (Subtarget->is64Bit() && Size >= 8) 1365 return MVT::i64; 1366 return MVT::i32; 1367 } 1368 1369 /// getJumpTableEncoding - Return the entry encoding for a jump table in the 1370 /// current function. The returned value is a member of the 1371 /// MachineJumpTableInfo::JTEntryKind enum. 1372 unsigned X86TargetLowering::getJumpTableEncoding() const { 1373 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF 1374 // symbol. 1375 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1376 Subtarget->isPICStyleGOT()) 1377 return MachineJumpTableInfo::EK_Custom32; 1378 1379 // Otherwise, use the normal jump table encoding heuristics. 1380 return TargetLowering::getJumpTableEncoding(); 1381 } 1382 1383 const MCExpr * 1384 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 1385 const MachineBasicBlock *MBB, 1386 unsigned uid,MCContext &Ctx) const{ 1387 assert(getTargetMachine().getRelocationModel() == Reloc::PIC_ && 1388 Subtarget->isPICStyleGOT()); 1389 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF 1390 // entries. 1391 return MCSymbolRefExpr::Create(MBB->getSymbol(), 1392 MCSymbolRefExpr::VK_GOTOFF, Ctx); 1393 } 1394 1395 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 1396 /// jumptable. 1397 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1398 SelectionDAG &DAG) const { 1399 if (!Subtarget->is64Bit()) 1400 // This doesn't have DebugLoc associated with it, but is not really the 1401 // same as a Register. 1402 return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()); 1403 return Table; 1404 } 1405 1406 /// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1407 /// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1408 /// MCExpr. 1409 const MCExpr *X86TargetLowering:: 1410 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, 1411 MCContext &Ctx) const { 1412 // X86-64 uses RIP relative addressing based on the jump table label. 1413 if (Subtarget->isPICStyleRIPRel()) 1414 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 1415 1416 // Otherwise, the reference is relative to the PIC base. 1417 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx); 1418 } 1419 1420 // FIXME: Why this routine is here? Move to RegInfo! 1421 std::pair<const TargetRegisterClass*, uint8_t> 1422 X86TargetLowering::findRepresentativeClass(EVT VT) const{ 1423 const TargetRegisterClass *RRC = 0; 1424 uint8_t Cost = 1; 1425 switch (VT.getSimpleVT().SimpleTy) { 1426 default: 1427 return TargetLowering::findRepresentativeClass(VT); 1428 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: 1429 RRC = Subtarget->is64Bit() ? 1430 (const TargetRegisterClass*)&X86::GR64RegClass : 1431 (const TargetRegisterClass*)&X86::GR32RegClass; 1432 break; 1433 case MVT::x86mmx: 1434 RRC = &X86::VR64RegClass; 1435 break; 1436 case MVT::f32: case MVT::f64: 1437 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 1438 case MVT::v4f32: case MVT::v2f64: 1439 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: 1440 case MVT::v4f64: 1441 RRC = &X86::VR128RegClass; 1442 break; 1443 } 1444 return std::make_pair(RRC, Cost); 1445 } 1446 1447 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, 1448 unsigned &Offset) const { 1449 if (!Subtarget->isTargetLinux()) 1450 return false; 1451 1452 if (Subtarget->is64Bit()) { 1453 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: 1454 Offset = 0x28; 1455 if (getTargetMachine().getCodeModel() == CodeModel::Kernel) 1456 AddressSpace = 256; 1457 else 1458 AddressSpace = 257; 1459 } else { 1460 // %gs:0x14 on i386 1461 Offset = 0x14; 1462 AddressSpace = 256; 1463 } 1464 return true; 1465 } 1466 1467 1468 //===----------------------------------------------------------------------===// 1469 // Return Value Calling Convention Implementation 1470 //===----------------------------------------------------------------------===// 1471 1472 #include "X86GenCallingConv.inc" 1473 1474 bool 1475 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, 1476 MachineFunction &MF, bool isVarArg, 1477 const SmallVectorImpl<ISD::OutputArg> &Outs, 1478 LLVMContext &Context) const { 1479 SmallVector<CCValAssign, 16> RVLocs; 1480 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1481 RVLocs, Context); 1482 return CCInfo.CheckReturn(Outs, RetCC_X86); 1483 } 1484 1485 SDValue 1486 X86TargetLowering::LowerReturn(SDValue Chain, 1487 CallingConv::ID CallConv, bool isVarArg, 1488 const SmallVectorImpl<ISD::OutputArg> &Outs, 1489 const SmallVectorImpl<SDValue> &OutVals, 1490 DebugLoc dl, SelectionDAG &DAG) const { 1491 MachineFunction &MF = DAG.getMachineFunction(); 1492 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1493 1494 SmallVector<CCValAssign, 16> RVLocs; 1495 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1496 RVLocs, *DAG.getContext()); 1497 CCInfo.AnalyzeReturn(Outs, RetCC_X86); 1498 1499 // Add the regs to the liveout set for the function. 1500 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1501 for (unsigned i = 0; i != RVLocs.size(); ++i) 1502 if (RVLocs[i].isRegLoc() && !MRI.isLiveOut(RVLocs[i].getLocReg())) 1503 MRI.addLiveOut(RVLocs[i].getLocReg()); 1504 1505 SDValue Flag; 1506 1507 SmallVector<SDValue, 6> RetOps; 1508 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 1509 // Operand #1 = Bytes To Pop 1510 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), 1511 MVT::i16)); 1512 1513 // Copy the result values into the output registers. 1514 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1515 CCValAssign &VA = RVLocs[i]; 1516 assert(VA.isRegLoc() && "Can only return in registers!"); 1517 SDValue ValToCopy = OutVals[i]; 1518 EVT ValVT = ValToCopy.getValueType(); 1519 1520 // Promote values to the appropriate types 1521 if (VA.getLocInfo() == CCValAssign::SExt) 1522 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy); 1523 else if (VA.getLocInfo() == CCValAssign::ZExt) 1524 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy); 1525 else if (VA.getLocInfo() == CCValAssign::AExt) 1526 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy); 1527 else if (VA.getLocInfo() == CCValAssign::BCvt) 1528 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy); 1529 1530 // If this is x86-64, and we disabled SSE, we can't return FP values, 1531 // or SSE or MMX vectors. 1532 if ((ValVT == MVT::f32 || ValVT == MVT::f64 || 1533 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && 1534 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { 1535 report_fatal_error("SSE register return with SSE disabled"); 1536 } 1537 // Likewise we can't return F64 values with SSE1 only. gcc does so, but 1538 // llvm-gcc has never done it right and no one has noticed, so this 1539 // should be OK for now. 1540 if (ValVT == MVT::f64 && 1541 (Subtarget->is64Bit() && !Subtarget->hasSSE2())) 1542 report_fatal_error("SSE2 register return with SSE2 disabled"); 1543 1544 // Returns in ST0/ST1 are handled specially: these are pushed as operands to 1545 // the RET instruction and handled by the FP Stackifier. 1546 if (VA.getLocReg() == X86::ST0 || 1547 VA.getLocReg() == X86::ST1) { 1548 // If this is a copy from an xmm register to ST(0), use an FPExtend to 1549 // change the value to the FP stack register class. 1550 if (isScalarFPTypeInSSEReg(VA.getValVT())) 1551 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); 1552 RetOps.push_back(ValToCopy); 1553 // Don't emit a copytoreg. 1554 continue; 1555 } 1556 1557 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 1558 // which is returned in RAX / RDX. 1559 if (Subtarget->is64Bit()) { 1560 if (ValVT == MVT::x86mmx) { 1561 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { 1562 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy); 1563 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, 1564 ValToCopy); 1565 // If we don't have SSE2 available, convert to v4f32 so the generated 1566 // register is legal. 1567 if (!Subtarget->hasSSE2()) 1568 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy); 1569 } 1570 } 1571 } 1572 1573 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); 1574 Flag = Chain.getValue(1); 1575 } 1576 1577 // The x86-64 ABI for returning structs by value requires that we copy 1578 // the sret argument into %rax for the return. We saved the argument into 1579 // a virtual register in the entry block, so now we copy the value out 1580 // and into %rax. 1581 if (Subtarget->is64Bit() && 1582 DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { 1583 MachineFunction &MF = DAG.getMachineFunction(); 1584 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1585 unsigned Reg = FuncInfo->getSRetReturnReg(); 1586 assert(Reg && 1587 "SRetReturnReg should have been set in LowerFormalArguments()."); 1588 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); 1589 1590 Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag); 1591 Flag = Chain.getValue(1); 1592 1593 // RAX now acts like a return value. 1594 MRI.addLiveOut(X86::RAX); 1595 } 1596 1597 RetOps[0] = Chain; // Update chain. 1598 1599 // Add the flag if we have it. 1600 if (Flag.getNode()) 1601 RetOps.push_back(Flag); 1602 1603 return DAG.getNode(X86ISD::RET_FLAG, dl, 1604 MVT::Other, &RetOps[0], RetOps.size()); 1605 } 1606 1607 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 1608 if (N->getNumValues() != 1) 1609 return false; 1610 if (!N->hasNUsesOfValue(1, 0)) 1611 return false; 1612 1613 SDValue TCChain = Chain; 1614 SDNode *Copy = *N->use_begin(); 1615 if (Copy->getOpcode() == ISD::CopyToReg) { 1616 // If the copy has a glue operand, we conservatively assume it isn't safe to 1617 // perform a tail call. 1618 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 1619 return false; 1620 TCChain = Copy->getOperand(0); 1621 } else if (Copy->getOpcode() != ISD::FP_EXTEND) 1622 return false; 1623 1624 bool HasRet = false; 1625 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 1626 UI != UE; ++UI) { 1627 if (UI->getOpcode() != X86ISD::RET_FLAG) 1628 return false; 1629 HasRet = true; 1630 } 1631 1632 if (!HasRet) 1633 return false; 1634 1635 Chain = TCChain; 1636 return true; 1637 } 1638 1639 EVT 1640 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 1641 ISD::NodeType ExtendKind) const { 1642 MVT ReturnMVT; 1643 // TODO: Is this also valid on 32-bit? 1644 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND) 1645 ReturnMVT = MVT::i8; 1646 else 1647 ReturnMVT = MVT::i32; 1648 1649 EVT MinVT = getRegisterType(Context, ReturnMVT); 1650 return VT.bitsLT(MinVT) ? MinVT : VT; 1651 } 1652 1653 /// LowerCallResult - Lower the result values of a call into the 1654 /// appropriate copies out of appropriate physical registers. 1655 /// 1656 SDValue 1657 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1658 CallingConv::ID CallConv, bool isVarArg, 1659 const SmallVectorImpl<ISD::InputArg> &Ins, 1660 DebugLoc dl, SelectionDAG &DAG, 1661 SmallVectorImpl<SDValue> &InVals) const { 1662 1663 // Assign locations to each value returned by this call. 1664 SmallVector<CCValAssign, 16> RVLocs; 1665 bool Is64Bit = Subtarget->is64Bit(); 1666 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1667 getTargetMachine(), RVLocs, *DAG.getContext()); 1668 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 1669 1670 // Copy all of the result registers out of their specified physreg. 1671 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1672 CCValAssign &VA = RVLocs[i]; 1673 EVT CopyVT = VA.getValVT(); 1674 1675 // If this is x86-64, and we disabled SSE, we can't return FP values 1676 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 1677 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { 1678 report_fatal_error("SSE register return with SSE disabled"); 1679 } 1680 1681 SDValue Val; 1682 1683 // If this is a call to a function that returns an fp value on the floating 1684 // point stack, we must guarantee the value is popped from the stack, so 1685 // a CopyFromReg is not good enough - the copy instruction may be eliminated 1686 // if the return value is not used. We use the FpPOP_RETVAL instruction 1687 // instead. 1688 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { 1689 // If we prefer to use the value in xmm registers, copy it out as f80 and 1690 // use a truncate to move it from fp stack reg to xmm reg. 1691 if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80; 1692 SDValue Ops[] = { Chain, InFlag }; 1693 Chain = SDValue(DAG.getMachineNode(X86::FpPOP_RETVAL, dl, CopyVT, 1694 MVT::Other, MVT::Glue, Ops, 2), 1); 1695 Val = Chain.getValue(0); 1696 1697 // Round the f80 to the right size, which also moves it to the appropriate 1698 // xmm register. 1699 if (CopyVT != VA.getValVT()) 1700 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, 1701 // This truncation won't change the value. 1702 DAG.getIntPtrConstant(1)); 1703 } else { 1704 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), 1705 CopyVT, InFlag).getValue(1); 1706 Val = Chain.getValue(0); 1707 } 1708 InFlag = Chain.getValue(2); 1709 InVals.push_back(Val); 1710 } 1711 1712 return Chain; 1713 } 1714 1715 1716 //===----------------------------------------------------------------------===// 1717 // C & StdCall & Fast Calling Convention implementation 1718 //===----------------------------------------------------------------------===// 1719 // StdCall calling convention seems to be standard for many Windows' API 1720 // routines and around. It differs from C calling convention just a little: 1721 // callee should clean up the stack, not caller. Symbols should be also 1722 // decorated in some fancy way :) It doesn't support any vector arguments. 1723 // For info on fast calling convention see Fast Calling Convention (tail call) 1724 // implementation LowerX86_32FastCCCallTo. 1725 1726 /// CallIsStructReturn - Determines whether a call uses struct return 1727 /// semantics. 1728 enum StructReturnType { 1729 NotStructReturn, 1730 RegStructReturn, 1731 StackStructReturn 1732 }; 1733 static StructReturnType 1734 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) { 1735 if (Outs.empty()) 1736 return NotStructReturn; 1737 1738 const ISD::ArgFlagsTy &Flags = Outs[0].Flags; 1739 if (!Flags.isSRet()) 1740 return NotStructReturn; 1741 if (Flags.isInReg()) 1742 return RegStructReturn; 1743 return StackStructReturn; 1744 } 1745 1746 /// ArgsAreStructReturn - Determines whether a function uses struct 1747 /// return semantics. 1748 static StructReturnType 1749 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) { 1750 if (Ins.empty()) 1751 return NotStructReturn; 1752 1753 const ISD::ArgFlagsTy &Flags = Ins[0].Flags; 1754 if (!Flags.isSRet()) 1755 return NotStructReturn; 1756 if (Flags.isInReg()) 1757 return RegStructReturn; 1758 return StackStructReturn; 1759 } 1760 1761 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 1762 /// by "Src" to address "Dst" with size and alignment information specified by 1763 /// the specific parameter attribute. The copy will be passed as a byval 1764 /// function parameter. 1765 static SDValue 1766 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 1767 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 1768 DebugLoc dl) { 1769 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); 1770 1771 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 1772 /*isVolatile*/false, /*AlwaysInline=*/true, 1773 MachinePointerInfo(), MachinePointerInfo()); 1774 } 1775 1776 /// IsTailCallConvention - Return true if the calling convention is one that 1777 /// supports tail call optimization. 1778 static bool IsTailCallConvention(CallingConv::ID CC) { 1779 return (CC == CallingConv::Fast || CC == CallingConv::GHC); 1780 } 1781 1782 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 1783 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls) 1784 return false; 1785 1786 CallSite CS(CI); 1787 CallingConv::ID CalleeCC = CS.getCallingConv(); 1788 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1789 return false; 1790 1791 return true; 1792 } 1793 1794 /// FuncIsMadeTailCallSafe - Return true if the function is being made into 1795 /// a tailcall target by changing its ABI. 1796 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC, 1797 bool GuaranteedTailCallOpt) { 1798 return GuaranteedTailCallOpt && IsTailCallConvention(CC); 1799 } 1800 1801 SDValue 1802 X86TargetLowering::LowerMemArgument(SDValue Chain, 1803 CallingConv::ID CallConv, 1804 const SmallVectorImpl<ISD::InputArg> &Ins, 1805 DebugLoc dl, SelectionDAG &DAG, 1806 const CCValAssign &VA, 1807 MachineFrameInfo *MFI, 1808 unsigned i) const { 1809 // Create the nodes corresponding to a load from this parameter slot. 1810 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1811 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv, 1812 getTargetMachine().Options.GuaranteedTailCallOpt); 1813 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); 1814 EVT ValVT; 1815 1816 // If value is passed by pointer we have address passed instead of the value 1817 // itself. 1818 if (VA.getLocInfo() == CCValAssign::Indirect) 1819 ValVT = VA.getLocVT(); 1820 else 1821 ValVT = VA.getValVT(); 1822 1823 // FIXME: For now, all byval parameter objects are marked mutable. This can be 1824 // changed with more analysis. 1825 // In case of tail call optimization mark all arguments mutable. Since they 1826 // could be overwritten by lowering of arguments in case of a tail call. 1827 if (Flags.isByVal()) { 1828 unsigned Bytes = Flags.getByValSize(); 1829 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. 1830 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable); 1831 return DAG.getFrameIndex(FI, getPointerTy()); 1832 } else { 1833 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, 1834 VA.getLocMemOffset(), isImmutable); 1835 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1836 return DAG.getLoad(ValVT, dl, Chain, FIN, 1837 MachinePointerInfo::getFixedStack(FI), 1838 false, false, false, 0); 1839 } 1840 } 1841 1842 SDValue 1843 X86TargetLowering::LowerFormalArguments(SDValue Chain, 1844 CallingConv::ID CallConv, 1845 bool isVarArg, 1846 const SmallVectorImpl<ISD::InputArg> &Ins, 1847 DebugLoc dl, 1848 SelectionDAG &DAG, 1849 SmallVectorImpl<SDValue> &InVals) 1850 const { 1851 MachineFunction &MF = DAG.getMachineFunction(); 1852 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1853 1854 const Function* Fn = MF.getFunction(); 1855 if (Fn->hasExternalLinkage() && 1856 Subtarget->isTargetCygMing() && 1857 Fn->getName() == "main") 1858 FuncInfo->setForceFramePointer(true); 1859 1860 MachineFrameInfo *MFI = MF.getFrameInfo(); 1861 bool Is64Bit = Subtarget->is64Bit(); 1862 bool IsWindows = Subtarget->isTargetWindows(); 1863 bool IsWin64 = Subtarget->isTargetWin64(); 1864 1865 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 1866 "Var args not supported with calling convention fastcc or ghc"); 1867 1868 // Assign locations to all of the incoming arguments. 1869 SmallVector<CCValAssign, 16> ArgLocs; 1870 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 1871 ArgLocs, *DAG.getContext()); 1872 1873 // Allocate shadow area for Win64 1874 if (IsWin64) { 1875 CCInfo.AllocateStack(32, 8); 1876 } 1877 1878 CCInfo.AnalyzeFormalArguments(Ins, CC_X86); 1879 1880 unsigned LastVal = ~0U; 1881 SDValue ArgValue; 1882 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1883 CCValAssign &VA = ArgLocs[i]; 1884 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later 1885 // places. 1886 assert(VA.getValNo() != LastVal && 1887 "Don't support value assigned to multiple locs yet"); 1888 (void)LastVal; 1889 LastVal = VA.getValNo(); 1890 1891 if (VA.isRegLoc()) { 1892 EVT RegVT = VA.getLocVT(); 1893 const TargetRegisterClass *RC; 1894 if (RegVT == MVT::i32) 1895 RC = &X86::GR32RegClass; 1896 else if (Is64Bit && RegVT == MVT::i64) 1897 RC = &X86::GR64RegClass; 1898 else if (RegVT == MVT::f32) 1899 RC = &X86::FR32RegClass; 1900 else if (RegVT == MVT::f64) 1901 RC = &X86::FR64RegClass; 1902 else if (RegVT.is256BitVector()) 1903 RC = &X86::VR256RegClass; 1904 else if (RegVT.is128BitVector()) 1905 RC = &X86::VR128RegClass; 1906 else if (RegVT == MVT::x86mmx) 1907 RC = &X86::VR64RegClass; 1908 else 1909 llvm_unreachable("Unknown argument type!"); 1910 1911 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1912 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1913 1914 // If this is an 8 or 16-bit value, it is really passed promoted to 32 1915 // bits. Insert an assert[sz]ext to capture this, then truncate to the 1916 // right size. 1917 if (VA.getLocInfo() == CCValAssign::SExt) 1918 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1919 DAG.getValueType(VA.getValVT())); 1920 else if (VA.getLocInfo() == CCValAssign::ZExt) 1921 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1922 DAG.getValueType(VA.getValVT())); 1923 else if (VA.getLocInfo() == CCValAssign::BCvt) 1924 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 1925 1926 if (VA.isExtInLoc()) { 1927 // Handle MMX values passed in XMM regs. 1928 if (RegVT.isVector()) { 1929 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), 1930 ArgValue); 1931 } else 1932 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1933 } 1934 } else { 1935 assert(VA.isMemLoc()); 1936 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i); 1937 } 1938 1939 // If value is passed via pointer - do a load. 1940 if (VA.getLocInfo() == CCValAssign::Indirect) 1941 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, 1942 MachinePointerInfo(), false, false, false, 0); 1943 1944 InVals.push_back(ArgValue); 1945 } 1946 1947 // The x86-64 ABI for returning structs by value requires that we copy 1948 // the sret argument into %rax for the return. Save the argument into 1949 // a virtual register so that we can access it from the return points. 1950 if (Is64Bit && MF.getFunction()->hasStructRetAttr()) { 1951 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 1952 unsigned Reg = FuncInfo->getSRetReturnReg(); 1953 if (!Reg) { 1954 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); 1955 FuncInfo->setSRetReturnReg(Reg); 1956 } 1957 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); 1958 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); 1959 } 1960 1961 unsigned StackSize = CCInfo.getNextStackOffset(); 1962 // Align stack specially for tail calls. 1963 if (FuncIsMadeTailCallSafe(CallConv, 1964 MF.getTarget().Options.GuaranteedTailCallOpt)) 1965 StackSize = GetAlignedArgumentStackSize(StackSize, DAG); 1966 1967 // If the function takes variable number of arguments, make a frame index for 1968 // the start of the first vararg value... for expansion of llvm.va_start. 1969 if (isVarArg) { 1970 if (Is64Bit || (CallConv != CallingConv::X86_FastCall && 1971 CallConv != CallingConv::X86_ThisCall)) { 1972 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true)); 1973 } 1974 if (Is64Bit) { 1975 unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0; 1976 1977 // FIXME: We should really autogenerate these arrays 1978 static const uint16_t GPR64ArgRegsWin64[] = { 1979 X86::RCX, X86::RDX, X86::R8, X86::R9 1980 }; 1981 static const uint16_t GPR64ArgRegs64Bit[] = { 1982 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 1983 }; 1984 static const uint16_t XMMArgRegs64Bit[] = { 1985 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 1986 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 1987 }; 1988 const uint16_t *GPR64ArgRegs; 1989 unsigned NumXMMRegs = 0; 1990 1991 if (IsWin64) { 1992 // The XMM registers which might contain var arg parameters are shadowed 1993 // in their paired GPR. So we only need to save the GPR to their home 1994 // slots. 1995 TotalNumIntRegs = 4; 1996 GPR64ArgRegs = GPR64ArgRegsWin64; 1997 } else { 1998 TotalNumIntRegs = 6; TotalNumXMMRegs = 8; 1999 GPR64ArgRegs = GPR64ArgRegs64Bit; 2000 2001 NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs64Bit, 2002 TotalNumXMMRegs); 2003 } 2004 unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, 2005 TotalNumIntRegs); 2006 2007 bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat); 2008 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && 2009 "SSE register cannot be used when SSE is disabled!"); 2010 assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && 2011 NoImplicitFloatOps) && 2012 "SSE register cannot be used when SSE is disabled!"); 2013 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps || 2014 !Subtarget->hasSSE1()) 2015 // Kernel mode asks for SSE to be disabled, so don't push them 2016 // on the stack. 2017 TotalNumXMMRegs = 0; 2018 2019 if (IsWin64) { 2020 const TargetFrameLowering &TFI = *getTargetMachine().getFrameLowering(); 2021 // Get to the caller-allocated home save location. Add 8 to account 2022 // for the return address. 2023 int HomeOffset = TFI.getOffsetOfLocalArea() + 8; 2024 FuncInfo->setRegSaveFrameIndex( 2025 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); 2026 // Fixup to set vararg frame on shadow area (4 x i64). 2027 if (NumIntRegs < 4) 2028 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); 2029 } else { 2030 // For X86-64, if there are vararg parameters that are passed via 2031 // registers, then we must store them to their spots on the stack so 2032 // they may be loaded by deferencing the result of va_next. 2033 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); 2034 FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16); 2035 FuncInfo->setRegSaveFrameIndex( 2036 MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16, 2037 false)); 2038 } 2039 2040 // Store the integer parameter registers. 2041 SmallVector<SDValue, 8> MemOps; 2042 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), 2043 getPointerTy()); 2044 unsigned Offset = FuncInfo->getVarArgsGPOffset(); 2045 for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { 2046 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN, 2047 DAG.getIntPtrConstant(Offset)); 2048 unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs], 2049 &X86::GR64RegClass); 2050 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 2051 SDValue Store = 2052 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2053 MachinePointerInfo::getFixedStack( 2054 FuncInfo->getRegSaveFrameIndex(), Offset), 2055 false, false, 0); 2056 MemOps.push_back(Store); 2057 Offset += 8; 2058 } 2059 2060 if (TotalNumXMMRegs != 0 && NumXMMRegs != TotalNumXMMRegs) { 2061 // Now store the XMM (fp + vector) parameter registers. 2062 SmallVector<SDValue, 11> SaveXMMOps; 2063 SaveXMMOps.push_back(Chain); 2064 2065 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass); 2066 SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8); 2067 SaveXMMOps.push_back(ALVal); 2068 2069 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2070 FuncInfo->getRegSaveFrameIndex())); 2071 SaveXMMOps.push_back(DAG.getIntPtrConstant( 2072 FuncInfo->getVarArgsFPOffset())); 2073 2074 for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { 2075 unsigned VReg = MF.addLiveIn(XMMArgRegs64Bit[NumXMMRegs], 2076 &X86::VR128RegClass); 2077 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32); 2078 SaveXMMOps.push_back(Val); 2079 } 2080 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, 2081 MVT::Other, 2082 &SaveXMMOps[0], SaveXMMOps.size())); 2083 } 2084 2085 if (!MemOps.empty()) 2086 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2087 &MemOps[0], MemOps.size()); 2088 } 2089 } 2090 2091 // Some CCs need callee pop. 2092 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2093 MF.getTarget().Options.GuaranteedTailCallOpt)) { 2094 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. 2095 } else { 2096 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. 2097 // If this is an sret function, the return should pop the hidden pointer. 2098 if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2099 argsAreStructReturn(Ins) == StackStructReturn) 2100 FuncInfo->setBytesToPopOnReturn(4); 2101 } 2102 2103 if (!Is64Bit) { 2104 // RegSaveFrameIndex is X86-64 only. 2105 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); 2106 if (CallConv == CallingConv::X86_FastCall || 2107 CallConv == CallingConv::X86_ThisCall) 2108 // fastcc functions can't have varargs. 2109 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); 2110 } 2111 2112 FuncInfo->setArgumentStackSize(StackSize); 2113 2114 return Chain; 2115 } 2116 2117 SDValue 2118 X86TargetLowering::LowerMemOpCallTo(SDValue Chain, 2119 SDValue StackPtr, SDValue Arg, 2120 DebugLoc dl, SelectionDAG &DAG, 2121 const CCValAssign &VA, 2122 ISD::ArgFlagsTy Flags) const { 2123 unsigned LocMemOffset = VA.getLocMemOffset(); 2124 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 2125 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 2126 if (Flags.isByVal()) 2127 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); 2128 2129 return DAG.getStore(Chain, dl, Arg, PtrOff, 2130 MachinePointerInfo::getStack(LocMemOffset), 2131 false, false, 0); 2132 } 2133 2134 /// EmitTailCallLoadRetAddr - Emit a load of return address if tail call 2135 /// optimization is performed and it is required. 2136 SDValue 2137 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 2138 SDValue &OutRetAddr, SDValue Chain, 2139 bool IsTailCall, bool Is64Bit, 2140 int FPDiff, DebugLoc dl) const { 2141 // Adjust the Return address stack slot. 2142 EVT VT = getPointerTy(); 2143 OutRetAddr = getReturnAddressFrameIndex(DAG); 2144 2145 // Load the "old" Return address. 2146 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(), 2147 false, false, false, 0); 2148 return SDValue(OutRetAddr.getNode(), 1); 2149 } 2150 2151 /// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call 2152 /// optimization is performed and it is required (FPDiff!=0). 2153 static SDValue 2154 EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 2155 SDValue Chain, SDValue RetAddrFrIdx, 2156 bool Is64Bit, int FPDiff, DebugLoc dl) { 2157 // Store the return address to the appropriate stack slot. 2158 if (!FPDiff) return Chain; 2159 // Calculate the new stack slot for the return address. 2160 int SlotSize = Is64Bit ? 8 : 4; 2161 int NewReturnAddrFI = 2162 MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); 2163 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2164 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); 2165 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 2166 MachinePointerInfo::getFixedStack(NewReturnAddrFI), 2167 false, false, 0); 2168 return Chain; 2169 } 2170 2171 SDValue 2172 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 2173 SmallVectorImpl<SDValue> &InVals) const { 2174 SelectionDAG &DAG = CLI.DAG; 2175 DebugLoc &dl = CLI.DL; 2176 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 2177 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 2178 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 2179 SDValue Chain = CLI.Chain; 2180 SDValue Callee = CLI.Callee; 2181 CallingConv::ID CallConv = CLI.CallConv; 2182 bool &isTailCall = CLI.IsTailCall; 2183 bool isVarArg = CLI.IsVarArg; 2184 2185 MachineFunction &MF = DAG.getMachineFunction(); 2186 bool Is64Bit = Subtarget->is64Bit(); 2187 bool IsWin64 = Subtarget->isTargetWin64(); 2188 bool IsWindows = Subtarget->isTargetWindows(); 2189 StructReturnType SR = callIsStructReturn(Outs); 2190 bool IsSibcall = false; 2191 2192 if (MF.getTarget().Options.DisableTailCalls) 2193 isTailCall = false; 2194 2195 if (isTailCall) { 2196 // Check if it's really possible to do a tail call. 2197 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 2198 isVarArg, SR != NotStructReturn, 2199 MF.getFunction()->hasStructRetAttr(), 2200 Outs, OutVals, Ins, DAG); 2201 2202 // Sibcalls are automatically detected tailcalls which do not require 2203 // ABI changes. 2204 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) 2205 IsSibcall = true; 2206 2207 if (isTailCall) 2208 ++NumTailCalls; 2209 } 2210 2211 assert(!(isVarArg && IsTailCallConvention(CallConv)) && 2212 "Var args not supported with calling convention fastcc or ghc"); 2213 2214 // Analyze operands of the call, assigning locations to each operand. 2215 SmallVector<CCValAssign, 16> ArgLocs; 2216 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), 2217 ArgLocs, *DAG.getContext()); 2218 2219 // Allocate shadow area for Win64 2220 if (IsWin64) { 2221 CCInfo.AllocateStack(32, 8); 2222 } 2223 2224 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2225 2226 // Get a count of how many bytes are to be pushed on the stack. 2227 unsigned NumBytes = CCInfo.getNextStackOffset(); 2228 if (IsSibcall) 2229 // This is a sibcall. The memory operands are available in caller's 2230 // own caller's stack. 2231 NumBytes = 0; 2232 else if (getTargetMachine().Options.GuaranteedTailCallOpt && 2233 IsTailCallConvention(CallConv)) 2234 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); 2235 2236 int FPDiff = 0; 2237 if (isTailCall && !IsSibcall) { 2238 // Lower arguments at fp - stackoffset + fpdiff. 2239 unsigned NumBytesCallerPushed = 2240 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn(); 2241 FPDiff = NumBytesCallerPushed - NumBytes; 2242 2243 // Set the delta of movement of the returnaddr stackslot. 2244 // But only set if delta is greater than previous delta. 2245 if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta())) 2246 MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff); 2247 } 2248 2249 if (!IsSibcall) 2250 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 2251 2252 SDValue RetAddrFrIdx; 2253 // Load return address for tail calls. 2254 if (isTailCall && FPDiff) 2255 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, 2256 Is64Bit, FPDiff, dl); 2257 2258 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2259 SmallVector<SDValue, 8> MemOpChains; 2260 SDValue StackPtr; 2261 2262 // Walk the register/memloc assignments, inserting copies/loads. In the case 2263 // of tail call optimization arguments are handle later. 2264 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2265 CCValAssign &VA = ArgLocs[i]; 2266 EVT RegVT = VA.getLocVT(); 2267 SDValue Arg = OutVals[i]; 2268 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2269 bool isByVal = Flags.isByVal(); 2270 2271 // Promote the value if needed. 2272 switch (VA.getLocInfo()) { 2273 default: llvm_unreachable("Unknown loc info!"); 2274 case CCValAssign::Full: break; 2275 case CCValAssign::SExt: 2276 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); 2277 break; 2278 case CCValAssign::ZExt: 2279 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); 2280 break; 2281 case CCValAssign::AExt: 2282 if (RegVT.is128BitVector()) { 2283 // Special case: passing MMX values in XMM registers. 2284 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 2285 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); 2286 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); 2287 } else 2288 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); 2289 break; 2290 case CCValAssign::BCvt: 2291 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg); 2292 break; 2293 case CCValAssign::Indirect: { 2294 // Store the argument. 2295 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 2296 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2297 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot, 2298 MachinePointerInfo::getFixedStack(FI), 2299 false, false, 0); 2300 Arg = SpillSlot; 2301 break; 2302 } 2303 } 2304 2305 if (VA.isRegLoc()) { 2306 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2307 if (isVarArg && IsWin64) { 2308 // Win64 ABI requires argument XMM reg to be copied to the corresponding 2309 // shadow reg if callee is a varargs function. 2310 unsigned ShadowReg = 0; 2311 switch (VA.getLocReg()) { 2312 case X86::XMM0: ShadowReg = X86::RCX; break; 2313 case X86::XMM1: ShadowReg = X86::RDX; break; 2314 case X86::XMM2: ShadowReg = X86::R8; break; 2315 case X86::XMM3: ShadowReg = X86::R9; break; 2316 } 2317 if (ShadowReg) 2318 RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); 2319 } 2320 } else if (!IsSibcall && (!isTailCall || isByVal)) { 2321 assert(VA.isMemLoc()); 2322 if (StackPtr.getNode() == 0) 2323 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy()); 2324 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 2325 dl, DAG, VA, Flags)); 2326 } 2327 } 2328 2329 if (!MemOpChains.empty()) 2330 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2331 &MemOpChains[0], MemOpChains.size()); 2332 2333 if (Subtarget->isPICStyleGOT()) { 2334 // ELF / PIC requires GOT in the EBX register before function calls via PLT 2335 // GOT pointer. 2336 if (!isTailCall) { 2337 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX), 2338 DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy()))); 2339 } else { 2340 // If we are tail calling and generating PIC/GOT style code load the 2341 // address of the callee into ECX. The value in ecx is used as target of 2342 // the tail jump. This is done to circumvent the ebx/callee-saved problem 2343 // for tail calls on PIC/GOT architectures. Normally we would just put the 2344 // address of GOT into ebx and then call target@PLT. But for tail calls 2345 // ebx would be restored (since ebx is callee saved) before jumping to the 2346 // target@PLT. 2347 2348 // Note: The actual moving to ECX is done further down. 2349 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 2350 if (G && !G->getGlobal()->hasHiddenVisibility() && 2351 !G->getGlobal()->hasProtectedVisibility()) 2352 Callee = LowerGlobalAddress(Callee, DAG); 2353 else if (isa<ExternalSymbolSDNode>(Callee)) 2354 Callee = LowerExternalSymbol(Callee, DAG); 2355 } 2356 } 2357 2358 if (Is64Bit && isVarArg && !IsWin64) { 2359 // From AMD64 ABI document: 2360 // For calls that may call functions that use varargs or stdargs 2361 // (prototype-less calls or calls to functions containing ellipsis (...) in 2362 // the declaration) %al is used as hidden argument to specify the number 2363 // of SSE registers used. The contents of %al do not need to match exactly 2364 // the number of registers, but must be an ubound on the number of SSE 2365 // registers used and is in the range 0 - 8 inclusive. 2366 2367 // Count the number of XMM registers allocated. 2368 static const uint16_t XMMArgRegs[] = { 2369 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, 2370 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 2371 }; 2372 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); 2373 assert((Subtarget->hasSSE1() || !NumXMMRegs) 2374 && "SSE registers cannot be used when SSE is disabled"); 2375 2376 RegsToPass.push_back(std::make_pair(unsigned(X86::AL), 2377 DAG.getConstant(NumXMMRegs, MVT::i8))); 2378 } 2379 2380 // For tail calls lower the arguments to the 'real' stack slot. 2381 if (isTailCall) { 2382 // Force all the incoming stack arguments to be loaded from the stack 2383 // before any new outgoing arguments are stored to the stack, because the 2384 // outgoing stack slots may alias the incoming argument stack slots, and 2385 // the alias isn't otherwise explicit. This is slightly more conservative 2386 // than necessary, because it means that each store effectively depends 2387 // on every argument instead of just those arguments it would clobber. 2388 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); 2389 2390 SmallVector<SDValue, 8> MemOpChains2; 2391 SDValue FIN; 2392 int FI = 0; 2393 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2394 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2395 CCValAssign &VA = ArgLocs[i]; 2396 if (VA.isRegLoc()) 2397 continue; 2398 assert(VA.isMemLoc()); 2399 SDValue Arg = OutVals[i]; 2400 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2401 // Create frame index. 2402 int32_t Offset = VA.getLocMemOffset()+FPDiff; 2403 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; 2404 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 2405 FIN = DAG.getFrameIndex(FI, getPointerTy()); 2406 2407 if (Flags.isByVal()) { 2408 // Copy relative to framepointer. 2409 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); 2410 if (StackPtr.getNode() == 0) 2411 StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, 2412 getPointerTy()); 2413 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); 2414 2415 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, 2416 ArgChain, 2417 Flags, DAG, dl)); 2418 } else { 2419 // Store relative to framepointer. 2420 MemOpChains2.push_back( 2421 DAG.getStore(ArgChain, dl, Arg, FIN, 2422 MachinePointerInfo::getFixedStack(FI), 2423 false, false, 0)); 2424 } 2425 } 2426 } 2427 2428 if (!MemOpChains2.empty()) 2429 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2430 &MemOpChains2[0], MemOpChains2.size()); 2431 2432 // Store the return address to the appropriate stack slot. 2433 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, 2434 FPDiff, dl); 2435 } 2436 2437 // Build a sequence of copy-to-reg nodes chained together with token chain 2438 // and flag operands which copy the outgoing args into registers. 2439 SDValue InFlag; 2440 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 2441 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 2442 RegsToPass[i].second, InFlag); 2443 InFlag = Chain.getValue(1); 2444 } 2445 2446 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 2447 assert(Is64Bit && "Large code model is only legal in 64-bit mode."); 2448 // In the 64-bit large code model, we have to make all calls 2449 // through a register, since the call instruction's 32-bit 2450 // pc-relative offset may not be large enough to hold the whole 2451 // address. 2452 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2453 // If the callee is a GlobalAddress node (quite common, every direct call 2454 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack 2455 // it. 2456 2457 // We should use extra load for direct calls to dllimported functions in 2458 // non-JIT mode. 2459 const GlobalValue *GV = G->getGlobal(); 2460 if (!GV->hasDLLImportLinkage()) { 2461 unsigned char OpFlags = 0; 2462 bool ExtraLoad = false; 2463 unsigned WrapperKind = ISD::DELETED_NODE; 2464 2465 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to 2466 // external symbols most go through the PLT in PIC mode. If the symbol 2467 // has hidden or protected visibility, or if it is static or local, then 2468 // we don't need to use the PLT - we can directly call it. 2469 if (Subtarget->isTargetELF() && 2470 getTargetMachine().getRelocationModel() == Reloc::PIC_ && 2471 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { 2472 OpFlags = X86II::MO_PLT; 2473 } else if (Subtarget->isPICStyleStubAny() && 2474 (GV->isDeclaration() || GV->isWeakForLinker()) && 2475 (!Subtarget->getTargetTriple().isMacOSX() || 2476 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2477 // PC-relative references to external symbols should go through $stub, 2478 // unless we're building with the leopard linker or later, which 2479 // automatically synthesizes these stubs. 2480 OpFlags = X86II::MO_DARWIN_STUB; 2481 } else if (Subtarget->isPICStyleRIPRel() && 2482 isa<Function>(GV) && 2483 cast<Function>(GV)->hasFnAttr(Attribute::NonLazyBind)) { 2484 // If the function is marked as non-lazy, generate an indirect call 2485 // which loads from the GOT directly. This avoids runtime overhead 2486 // at the cost of eager binding (and one extra byte of encoding). 2487 OpFlags = X86II::MO_GOTPCREL; 2488 WrapperKind = X86ISD::WrapperRIP; 2489 ExtraLoad = true; 2490 } 2491 2492 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 2493 G->getOffset(), OpFlags); 2494 2495 // Add a wrapper if needed. 2496 if (WrapperKind != ISD::DELETED_NODE) 2497 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee); 2498 // Add extra indirection if needed. 2499 if (ExtraLoad) 2500 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee, 2501 MachinePointerInfo::getGOT(), 2502 false, false, false, 0); 2503 } 2504 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2505 unsigned char OpFlags = 0; 2506 2507 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to 2508 // external symbols should go through the PLT. 2509 if (Subtarget->isTargetELF() && 2510 getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2511 OpFlags = X86II::MO_PLT; 2512 } else if (Subtarget->isPICStyleStubAny() && 2513 (!Subtarget->getTargetTriple().isMacOSX() || 2514 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { 2515 // PC-relative references to external symbols should go through $stub, 2516 // unless we're building with the leopard linker or later, which 2517 // automatically synthesizes these stubs. 2518 OpFlags = X86II::MO_DARWIN_STUB; 2519 } 2520 2521 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), 2522 OpFlags); 2523 } 2524 2525 // Returns a chain & a flag for retval copy to use. 2526 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2527 SmallVector<SDValue, 8> Ops; 2528 2529 if (!IsSibcall && isTailCall) { 2530 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 2531 DAG.getIntPtrConstant(0, true), InFlag); 2532 InFlag = Chain.getValue(1); 2533 } 2534 2535 Ops.push_back(Chain); 2536 Ops.push_back(Callee); 2537 2538 if (isTailCall) 2539 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32)); 2540 2541 // Add argument registers to the end of the list so that they are known live 2542 // into the call. 2543 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 2544 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 2545 RegsToPass[i].second.getValueType())); 2546 2547 // Add a register mask operand representing the call-preserved registers. 2548 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2549 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 2550 assert(Mask && "Missing call preserved mask for calling convention"); 2551 Ops.push_back(DAG.getRegisterMask(Mask)); 2552 2553 if (InFlag.getNode()) 2554 Ops.push_back(InFlag); 2555 2556 if (isTailCall) { 2557 // We used to do: 2558 //// If this is the first return lowered for this function, add the regs 2559 //// to the liveout set for the function. 2560 // This isn't right, although it's probably harmless on x86; liveouts 2561 // should be computed from returns not tail calls. Consider a void 2562 // function making a tail call to a function returning int. 2563 return DAG.getNode(X86ISD::TC_RETURN, dl, 2564 NodeTys, &Ops[0], Ops.size()); 2565 } 2566 2567 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, &Ops[0], Ops.size()); 2568 InFlag = Chain.getValue(1); 2569 2570 // Create the CALLSEQ_END node. 2571 unsigned NumBytesForCalleeToPush; 2572 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, 2573 getTargetMachine().Options.GuaranteedTailCallOpt)) 2574 NumBytesForCalleeToPush = NumBytes; // Callee pops everything 2575 else if (!Is64Bit && !IsTailCallConvention(CallConv) && !IsWindows && 2576 SR == StackStructReturn) 2577 // If this is a call to a struct-return function, the callee 2578 // pops the hidden struct pointer, so we have to push it back. 2579 // This is common for Darwin/X86, Linux & Mingw32 targets. 2580 // For MSVC Win32 targets, the caller pops the hidden struct pointer. 2581 NumBytesForCalleeToPush = 4; 2582 else 2583 NumBytesForCalleeToPush = 0; // Callee pops nothing. 2584 2585 // Returns a flag for retval copy to use. 2586 if (!IsSibcall) { 2587 Chain = DAG.getCALLSEQ_END(Chain, 2588 DAG.getIntPtrConstant(NumBytes, true), 2589 DAG.getIntPtrConstant(NumBytesForCalleeToPush, 2590 true), 2591 InFlag); 2592 InFlag = Chain.getValue(1); 2593 } 2594 2595 // Handle result values, copying them out of physregs into vregs that we 2596 // return. 2597 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 2598 Ins, dl, DAG, InVals); 2599 } 2600 2601 2602 //===----------------------------------------------------------------------===// 2603 // Fast Calling Convention (tail call) implementation 2604 //===----------------------------------------------------------------------===// 2605 2606 // Like std call, callee cleans arguments, convention except that ECX is 2607 // reserved for storing the tail called function address. Only 2 registers are 2608 // free for argument passing (inreg). Tail call optimization is performed 2609 // provided: 2610 // * tailcallopt is enabled 2611 // * caller/callee are fastcc 2612 // On X86_64 architecture with GOT-style position independent code only local 2613 // (within module) calls are supported at the moment. 2614 // To keep the stack aligned according to platform abi the function 2615 // GetAlignedArgumentStackSize ensures that argument delta is always multiples 2616 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example) 2617 // If a tail called function callee has more arguments than the caller the 2618 // caller needs to make sure that there is room to move the RETADDR to. This is 2619 // achieved by reserving an area the size of the argument delta right after the 2620 // original REtADDR, but before the saved framepointer or the spilled registers 2621 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) 2622 // stack layout: 2623 // arg1 2624 // arg2 2625 // RETADDR 2626 // [ new RETADDR 2627 // move area ] 2628 // (possible EBP) 2629 // ESI 2630 // EDI 2631 // local1 .. 2632 2633 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned 2634 /// for a 16 byte align requirement. 2635 unsigned 2636 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 2637 SelectionDAG& DAG) const { 2638 MachineFunction &MF = DAG.getMachineFunction(); 2639 const TargetMachine &TM = MF.getTarget(); 2640 const TargetFrameLowering &TFI = *TM.getFrameLowering(); 2641 unsigned StackAlignment = TFI.getStackAlignment(); 2642 uint64_t AlignMask = StackAlignment - 1; 2643 int64_t Offset = StackSize; 2644 uint64_t SlotSize = TD->getPointerSize(); 2645 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { 2646 // Number smaller than 12 so just add the difference. 2647 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); 2648 } else { 2649 // Mask out lower bits, add stackalignment once plus the 12 bytes. 2650 Offset = ((~AlignMask) & Offset) + StackAlignment + 2651 (StackAlignment-SlotSize); 2652 } 2653 return Offset; 2654 } 2655 2656 /// MatchingStackOffset - Return true if the given stack call argument is 2657 /// already available in the same position (relatively) of the caller's 2658 /// incoming argument stack. 2659 static 2660 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 2661 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 2662 const X86InstrInfo *TII) { 2663 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 2664 int FI = INT_MAX; 2665 if (Arg.getOpcode() == ISD::CopyFromReg) { 2666 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 2667 if (!TargetRegisterInfo::isVirtualRegister(VR)) 2668 return false; 2669 MachineInstr *Def = MRI->getVRegDef(VR); 2670 if (!Def) 2671 return false; 2672 if (!Flags.isByVal()) { 2673 if (!TII->isLoadFromStackSlot(Def, FI)) 2674 return false; 2675 } else { 2676 unsigned Opcode = Def->getOpcode(); 2677 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r) && 2678 Def->getOperand(1).isFI()) { 2679 FI = Def->getOperand(1).getIndex(); 2680 Bytes = Flags.getByValSize(); 2681 } else 2682 return false; 2683 } 2684 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 2685 if (Flags.isByVal()) 2686 // ByVal argument is passed in as a pointer but it's now being 2687 // dereferenced. e.g. 2688 // define @foo(%struct.X* %A) { 2689 // tail call @bar(%struct.X* byval %A) 2690 // } 2691 return false; 2692 SDValue Ptr = Ld->getBasePtr(); 2693 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 2694 if (!FINode) 2695 return false; 2696 FI = FINode->getIndex(); 2697 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { 2698 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); 2699 FI = FINode->getIndex(); 2700 Bytes = Flags.getByValSize(); 2701 } else 2702 return false; 2703 2704 assert(FI != INT_MAX); 2705 if (!MFI->isFixedObjectIndex(FI)) 2706 return false; 2707 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 2708 } 2709 2710 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 2711 /// for tail call optimization. Targets which want to do tail call 2712 /// optimization should implement this function. 2713 bool 2714 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 2715 CallingConv::ID CalleeCC, 2716 bool isVarArg, 2717 bool isCalleeStructRet, 2718 bool isCallerStructRet, 2719 const SmallVectorImpl<ISD::OutputArg> &Outs, 2720 const SmallVectorImpl<SDValue> &OutVals, 2721 const SmallVectorImpl<ISD::InputArg> &Ins, 2722 SelectionDAG& DAG) const { 2723 if (!IsTailCallConvention(CalleeCC) && 2724 CalleeCC != CallingConv::C) 2725 return false; 2726 2727 // If -tailcallopt is specified, make fastcc functions tail-callable. 2728 const MachineFunction &MF = DAG.getMachineFunction(); 2729 const Function *CallerF = DAG.getMachineFunction().getFunction(); 2730 CallingConv::ID CallerCC = CallerF->getCallingConv(); 2731 bool CCMatch = CallerCC == CalleeCC; 2732 2733 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 2734 if (IsTailCallConvention(CalleeCC) && CCMatch) 2735 return true; 2736 return false; 2737 } 2738 2739 // Look for obvious safe cases to perform tail call optimization that do not 2740 // require ABI changes. This is what gcc calls sibcall. 2741 2742 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to 2743 // emit a special epilogue. 2744 if (RegInfo->needsStackRealignment(MF)) 2745 return false; 2746 2747 // Also avoid sibcall optimization if either caller or callee uses struct 2748 // return semantics. 2749 if (isCalleeStructRet || isCallerStructRet) 2750 return false; 2751 2752 // An stdcall caller is expected to clean up its arguments; the callee 2753 // isn't going to do that. 2754 if (!CCMatch && CallerCC==CallingConv::X86_StdCall) 2755 return false; 2756 2757 // Do not sibcall optimize vararg calls unless all arguments are passed via 2758 // registers. 2759 if (isVarArg && !Outs.empty()) { 2760 2761 // Optimizing for varargs on Win64 is unlikely to be safe without 2762 // additional testing. 2763 if (Subtarget->isTargetWin64()) 2764 return false; 2765 2766 SmallVector<CCValAssign, 16> ArgLocs; 2767 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2768 getTargetMachine(), ArgLocs, *DAG.getContext()); 2769 2770 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2771 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 2772 if (!ArgLocs[i].isRegLoc()) 2773 return false; 2774 } 2775 2776 // If the call result is in ST0 / ST1, it needs to be popped off the x87 2777 // stack. Therefore, if it's not used by the call it is not safe to optimize 2778 // this into a sibcall. 2779 bool Unused = false; 2780 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 2781 if (!Ins[i].Used) { 2782 Unused = true; 2783 break; 2784 } 2785 } 2786 if (Unused) { 2787 SmallVector<CCValAssign, 16> RVLocs; 2788 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), 2789 getTargetMachine(), RVLocs, *DAG.getContext()); 2790 CCInfo.AnalyzeCallResult(Ins, RetCC_X86); 2791 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 2792 CCValAssign &VA = RVLocs[i]; 2793 if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) 2794 return false; 2795 } 2796 } 2797 2798 // If the calling conventions do not match, then we'd better make sure the 2799 // results are returned in the same way as what the caller expects. 2800 if (!CCMatch) { 2801 SmallVector<CCValAssign, 16> RVLocs1; 2802 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 2803 getTargetMachine(), RVLocs1, *DAG.getContext()); 2804 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); 2805 2806 SmallVector<CCValAssign, 16> RVLocs2; 2807 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 2808 getTargetMachine(), RVLocs2, *DAG.getContext()); 2809 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); 2810 2811 if (RVLocs1.size() != RVLocs2.size()) 2812 return false; 2813 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 2814 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 2815 return false; 2816 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 2817 return false; 2818 if (RVLocs1[i].isRegLoc()) { 2819 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 2820 return false; 2821 } else { 2822 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 2823 return false; 2824 } 2825 } 2826 } 2827 2828 // If the callee takes no arguments then go on to check the results of the 2829 // call. 2830 if (!Outs.empty()) { 2831 // Check if stack adjustment is needed. For now, do not do this if any 2832 // argument is passed on the stack. 2833 SmallVector<CCValAssign, 16> ArgLocs; 2834 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 2835 getTargetMachine(), ArgLocs, *DAG.getContext()); 2836 2837 // Allocate shadow area for Win64 2838 if (Subtarget->isTargetWin64()) { 2839 CCInfo.AllocateStack(32, 8); 2840 } 2841 2842 CCInfo.AnalyzeCallOperands(Outs, CC_X86); 2843 if (CCInfo.getNextStackOffset()) { 2844 MachineFunction &MF = DAG.getMachineFunction(); 2845 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) 2846 return false; 2847 2848 // Check if the arguments are already laid out in the right way as 2849 // the caller's fixed stack objects. 2850 MachineFrameInfo *MFI = MF.getFrameInfo(); 2851 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 2852 const X86InstrInfo *TII = 2853 ((const X86TargetMachine&)getTargetMachine()).getInstrInfo(); 2854 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2855 CCValAssign &VA = ArgLocs[i]; 2856 SDValue Arg = OutVals[i]; 2857 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2858 if (VA.getLocInfo() == CCValAssign::Indirect) 2859 return false; 2860 if (!VA.isRegLoc()) { 2861 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2862 MFI, MRI, TII)) 2863 return false; 2864 } 2865 } 2866 } 2867 2868 // If the tailcall address may be in a register, then make sure it's 2869 // possible to register allocate for it. In 32-bit, the call address can 2870 // only target EAX, EDX, or ECX since the tail call must be scheduled after 2871 // callee-saved registers are restored. These happen to be the same 2872 // registers used to pass 'inreg' arguments so watch out for those. 2873 if (!Subtarget->is64Bit() && 2874 !isa<GlobalAddressSDNode>(Callee) && 2875 !isa<ExternalSymbolSDNode>(Callee)) { 2876 unsigned NumInRegs = 0; 2877 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2878 CCValAssign &VA = ArgLocs[i]; 2879 if (!VA.isRegLoc()) 2880 continue; 2881 unsigned Reg = VA.getLocReg(); 2882 switch (Reg) { 2883 default: break; 2884 case X86::EAX: case X86::EDX: case X86::ECX: 2885 if (++NumInRegs == 3) 2886 return false; 2887 break; 2888 } 2889 } 2890 } 2891 } 2892 2893 return true; 2894 } 2895 2896 FastISel * 2897 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 2898 const TargetLibraryInfo *libInfo) const { 2899 return X86::createFastISel(funcInfo, libInfo); 2900 } 2901 2902 2903 //===----------------------------------------------------------------------===// 2904 // Other Lowering Hooks 2905 //===----------------------------------------------------------------------===// 2906 2907 static bool MayFoldLoad(SDValue Op) { 2908 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); 2909 } 2910 2911 static bool MayFoldIntoStore(SDValue Op) { 2912 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); 2913 } 2914 2915 static bool isTargetShuffle(unsigned Opcode) { 2916 switch(Opcode) { 2917 default: return false; 2918 case X86ISD::PSHUFD: 2919 case X86ISD::PSHUFHW: 2920 case X86ISD::PSHUFLW: 2921 case X86ISD::SHUFP: 2922 case X86ISD::PALIGN: 2923 case X86ISD::MOVLHPS: 2924 case X86ISD::MOVLHPD: 2925 case X86ISD::MOVHLPS: 2926 case X86ISD::MOVLPS: 2927 case X86ISD::MOVLPD: 2928 case X86ISD::MOVSHDUP: 2929 case X86ISD::MOVSLDUP: 2930 case X86ISD::MOVDDUP: 2931 case X86ISD::MOVSS: 2932 case X86ISD::MOVSD: 2933 case X86ISD::UNPCKL: 2934 case X86ISD::UNPCKH: 2935 case X86ISD::VPERMILP: 2936 case X86ISD::VPERM2X128: 2937 case X86ISD::VPERMI: 2938 return true; 2939 } 2940 } 2941 2942 static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2943 SDValue V1, SelectionDAG &DAG) { 2944 switch(Opc) { 2945 default: llvm_unreachable("Unknown x86 shuffle node"); 2946 case X86ISD::MOVSHDUP: 2947 case X86ISD::MOVSLDUP: 2948 case X86ISD::MOVDDUP: 2949 return DAG.getNode(Opc, dl, VT, V1); 2950 } 2951 } 2952 2953 static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2954 SDValue V1, unsigned TargetMask, 2955 SelectionDAG &DAG) { 2956 switch(Opc) { 2957 default: llvm_unreachable("Unknown x86 shuffle node"); 2958 case X86ISD::PSHUFD: 2959 case X86ISD::PSHUFHW: 2960 case X86ISD::PSHUFLW: 2961 case X86ISD::VPERMILP: 2962 case X86ISD::VPERMI: 2963 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); 2964 } 2965 } 2966 2967 static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2968 SDValue V1, SDValue V2, unsigned TargetMask, 2969 SelectionDAG &DAG) { 2970 switch(Opc) { 2971 default: llvm_unreachable("Unknown x86 shuffle node"); 2972 case X86ISD::PALIGN: 2973 case X86ISD::SHUFP: 2974 case X86ISD::VPERM2X128: 2975 return DAG.getNode(Opc, dl, VT, V1, V2, 2976 DAG.getConstant(TargetMask, MVT::i8)); 2977 } 2978 } 2979 2980 static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, 2981 SDValue V1, SDValue V2, SelectionDAG &DAG) { 2982 switch(Opc) { 2983 default: llvm_unreachable("Unknown x86 shuffle node"); 2984 case X86ISD::MOVLHPS: 2985 case X86ISD::MOVLHPD: 2986 case X86ISD::MOVHLPS: 2987 case X86ISD::MOVLPS: 2988 case X86ISD::MOVLPD: 2989 case X86ISD::MOVSS: 2990 case X86ISD::MOVSD: 2991 case X86ISD::UNPCKL: 2992 case X86ISD::UNPCKH: 2993 return DAG.getNode(Opc, dl, VT, V1, V2); 2994 } 2995 } 2996 2997 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { 2998 MachineFunction &MF = DAG.getMachineFunction(); 2999 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); 3000 int ReturnAddrIndex = FuncInfo->getRAIndex(); 3001 3002 if (ReturnAddrIndex == 0) { 3003 // Set up a frame object for the return address. 3004 uint64_t SlotSize = TD->getPointerSize(); 3005 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, 3006 false); 3007 FuncInfo->setRAIndex(ReturnAddrIndex); 3008 } 3009 3010 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy()); 3011 } 3012 3013 3014 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 3015 bool hasSymbolicDisplacement) { 3016 // Offset should fit into 32 bit immediate field. 3017 if (!isInt<32>(Offset)) 3018 return false; 3019 3020 // If we don't have a symbolic displacement - we don't have any extra 3021 // restrictions. 3022 if (!hasSymbolicDisplacement) 3023 return true; 3024 3025 // FIXME: Some tweaks might be needed for medium code model. 3026 if (M != CodeModel::Small && M != CodeModel::Kernel) 3027 return false; 3028 3029 // For small code model we assume that latest object is 16MB before end of 31 3030 // bits boundary. We may also accept pretty large negative constants knowing 3031 // that all objects are in the positive half of address space. 3032 if (M == CodeModel::Small && Offset < 16*1024*1024) 3033 return true; 3034 3035 // For kernel code model we know that all object resist in the negative half 3036 // of 32bits address space. We may not accept negative offsets, since they may 3037 // be just off and we may accept pretty large positive ones. 3038 if (M == CodeModel::Kernel && Offset > 0) 3039 return true; 3040 3041 return false; 3042 } 3043 3044 /// isCalleePop - Determines whether the callee is required to pop its 3045 /// own arguments. Callee pop is necessary to support tail calls. 3046 bool X86::isCalleePop(CallingConv::ID CallingConv, 3047 bool is64Bit, bool IsVarArg, bool TailCallOpt) { 3048 if (IsVarArg) 3049 return false; 3050 3051 switch (CallingConv) { 3052 default: 3053 return false; 3054 case CallingConv::X86_StdCall: 3055 return !is64Bit; 3056 case CallingConv::X86_FastCall: 3057 return !is64Bit; 3058 case CallingConv::X86_ThisCall: 3059 return !is64Bit; 3060 case CallingConv::Fast: 3061 return TailCallOpt; 3062 case CallingConv::GHC: 3063 return TailCallOpt; 3064 } 3065 } 3066 3067 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86 3068 /// specific condition code, returning the condition code and the LHS/RHS of the 3069 /// comparison to make. 3070 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP, 3071 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { 3072 if (!isFP) { 3073 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 3074 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { 3075 // X > -1 -> X == 0, jump !sign. 3076 RHS = DAG.getConstant(0, RHS.getValueType()); 3077 return X86::COND_NS; 3078 } 3079 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { 3080 // X < 0 -> X == 0, jump on sign. 3081 return X86::COND_S; 3082 } 3083 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { 3084 // X < 1 -> X <= 0 3085 RHS = DAG.getConstant(0, RHS.getValueType()); 3086 return X86::COND_LE; 3087 } 3088 } 3089 3090 switch (SetCCOpcode) { 3091 default: llvm_unreachable("Invalid integer condition!"); 3092 case ISD::SETEQ: return X86::COND_E; 3093 case ISD::SETGT: return X86::COND_G; 3094 case ISD::SETGE: return X86::COND_GE; 3095 case ISD::SETLT: return X86::COND_L; 3096 case ISD::SETLE: return X86::COND_LE; 3097 case ISD::SETNE: return X86::COND_NE; 3098 case ISD::SETULT: return X86::COND_B; 3099 case ISD::SETUGT: return X86::COND_A; 3100 case ISD::SETULE: return X86::COND_BE; 3101 case ISD::SETUGE: return X86::COND_AE; 3102 } 3103 } 3104 3105 // First determine if it is required or is profitable to flip the operands. 3106 3107 // If LHS is a foldable load, but RHS is not, flip the condition. 3108 if (ISD::isNON_EXTLoad(LHS.getNode()) && 3109 !ISD::isNON_EXTLoad(RHS.getNode())) { 3110 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); 3111 std::swap(LHS, RHS); 3112 } 3113 3114 switch (SetCCOpcode) { 3115 default: break; 3116 case ISD::SETOLT: 3117 case ISD::SETOLE: 3118 case ISD::SETUGT: 3119 case ISD::SETUGE: 3120 std::swap(LHS, RHS); 3121 break; 3122 } 3123 3124 // On a floating point condition, the flags are set as follows: 3125 // ZF PF CF op 3126 // 0 | 0 | 0 | X > Y 3127 // 0 | 0 | 1 | X < Y 3128 // 1 | 0 | 0 | X == Y 3129 // 1 | 1 | 1 | unordered 3130 switch (SetCCOpcode) { 3131 default: llvm_unreachable("Condcode should be pre-legalized away"); 3132 case ISD::SETUEQ: 3133 case ISD::SETEQ: return X86::COND_E; 3134 case ISD::SETOLT: // flipped 3135 case ISD::SETOGT: 3136 case ISD::SETGT: return X86::COND_A; 3137 case ISD::SETOLE: // flipped 3138 case ISD::SETOGE: 3139 case ISD::SETGE: return X86::COND_AE; 3140 case ISD::SETUGT: // flipped 3141 case ISD::SETULT: 3142 case ISD::SETLT: return X86::COND_B; 3143 case ISD::SETUGE: // flipped 3144 case ISD::SETULE: 3145 case ISD::SETLE: return X86::COND_BE; 3146 case ISD::SETONE: 3147 case ISD::SETNE: return X86::COND_NE; 3148 case ISD::SETUO: return X86::COND_P; 3149 case ISD::SETO: return X86::COND_NP; 3150 case ISD::SETOEQ: 3151 case ISD::SETUNE: return X86::COND_INVALID; 3152 } 3153 } 3154 3155 /// hasFPCMov - is there a floating point cmov for the specific X86 condition 3156 /// code. Current x86 isa includes the following FP cmov instructions: 3157 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. 3158 static bool hasFPCMov(unsigned X86CC) { 3159 switch (X86CC) { 3160 default: 3161 return false; 3162 case X86::COND_B: 3163 case X86::COND_BE: 3164 case X86::COND_E: 3165 case X86::COND_P: 3166 case X86::COND_A: 3167 case X86::COND_AE: 3168 case X86::COND_NE: 3169 case X86::COND_NP: 3170 return true; 3171 } 3172 } 3173 3174 /// isFPImmLegal - Returns true if the target can instruction select the 3175 /// specified FP immediate natively. If false, the legalizer will 3176 /// materialize the FP immediate as a load from a constant pool. 3177 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 3178 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { 3179 if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) 3180 return true; 3181 } 3182 return false; 3183 } 3184 3185 /// isUndefOrInRange - Return true if Val is undef or if its value falls within 3186 /// the specified range (L, H]. 3187 static bool isUndefOrInRange(int Val, int Low, int Hi) { 3188 return (Val < 0) || (Val >= Low && Val < Hi); 3189 } 3190 3191 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the 3192 /// specified value. 3193 static bool isUndefOrEqual(int Val, int CmpVal) { 3194 if (Val < 0 || Val == CmpVal) 3195 return true; 3196 return false; 3197 } 3198 3199 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning 3200 /// from position Pos and ending in Pos+Size, falls within the specified 3201 /// sequential range (L, L+Pos]. or is undef. 3202 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, 3203 unsigned Pos, unsigned Size, int Low) { 3204 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low) 3205 if (!isUndefOrEqual(Mask[i], Low)) 3206 return false; 3207 return true; 3208 } 3209 3210 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that 3211 /// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference 3212 /// the second operand. 3213 static bool isPSHUFDMask(ArrayRef<int> Mask, EVT VT) { 3214 if (VT == MVT::v4f32 || VT == MVT::v4i32 ) 3215 return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); 3216 if (VT == MVT::v2f64 || VT == MVT::v2i64) 3217 return (Mask[0] < 2 && Mask[1] < 2); 3218 return false; 3219 } 3220 3221 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that 3222 /// is suitable for input to PSHUFHW. 3223 static bool isPSHUFHWMask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3224 if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16)) 3225 return false; 3226 3227 // Lower quadword copied in order or undef. 3228 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0)) 3229 return false; 3230 3231 // Upper quadword shuffled. 3232 for (unsigned i = 4; i != 8; ++i) 3233 if (!isUndefOrInRange(Mask[i], 4, 8)) 3234 return false; 3235 3236 if (VT == MVT::v16i16) { 3237 // Lower quadword copied in order or undef. 3238 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8)) 3239 return false; 3240 3241 // Upper quadword shuffled. 3242 for (unsigned i = 12; i != 16; ++i) 3243 if (!isUndefOrInRange(Mask[i], 12, 16)) 3244 return false; 3245 } 3246 3247 return true; 3248 } 3249 3250 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that 3251 /// is suitable for input to PSHUFLW. 3252 static bool isPSHUFLWMask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3253 if (VT != MVT::v8i16 && (!HasAVX2 || VT != MVT::v16i16)) 3254 return false; 3255 3256 // Upper quadword copied in order. 3257 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4)) 3258 return false; 3259 3260 // Lower quadword shuffled. 3261 for (unsigned i = 0; i != 4; ++i) 3262 if (!isUndefOrInRange(Mask[i], 0, 4)) 3263 return false; 3264 3265 if (VT == MVT::v16i16) { 3266 // Upper quadword copied in order. 3267 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12)) 3268 return false; 3269 3270 // Lower quadword shuffled. 3271 for (unsigned i = 8; i != 12; ++i) 3272 if (!isUndefOrInRange(Mask[i], 8, 12)) 3273 return false; 3274 } 3275 3276 return true; 3277 } 3278 3279 /// isPALIGNRMask - Return true if the node specifies a shuffle of elements that 3280 /// is suitable for input to PALIGNR. 3281 static bool isPALIGNRMask(ArrayRef<int> Mask, EVT VT, 3282 const X86Subtarget *Subtarget) { 3283 if ((VT.getSizeInBits() == 128 && !Subtarget->hasSSSE3()) || 3284 (VT.getSizeInBits() == 256 && !Subtarget->hasAVX2())) 3285 return false; 3286 3287 unsigned NumElts = VT.getVectorNumElements(); 3288 unsigned NumLanes = VT.getSizeInBits()/128; 3289 unsigned NumLaneElts = NumElts/NumLanes; 3290 3291 // Do not handle 64-bit element shuffles with palignr. 3292 if (NumLaneElts == 2) 3293 return false; 3294 3295 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) { 3296 unsigned i; 3297 for (i = 0; i != NumLaneElts; ++i) { 3298 if (Mask[i+l] >= 0) 3299 break; 3300 } 3301 3302 // Lane is all undef, go to next lane 3303 if (i == NumLaneElts) 3304 continue; 3305 3306 int Start = Mask[i+l]; 3307 3308 // Make sure its in this lane in one of the sources 3309 if (!isUndefOrInRange(Start, l, l+NumLaneElts) && 3310 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts)) 3311 return false; 3312 3313 // If not lane 0, then we must match lane 0 3314 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l)) 3315 return false; 3316 3317 // Correct second source to be contiguous with first source 3318 if (Start >= (int)NumElts) 3319 Start -= NumElts - NumLaneElts; 3320 3321 // Make sure we're shifting in the right direction. 3322 if (Start <= (int)(i+l)) 3323 return false; 3324 3325 Start -= i; 3326 3327 // Check the rest of the elements to see if they are consecutive. 3328 for (++i; i != NumLaneElts; ++i) { 3329 int Idx = Mask[i+l]; 3330 3331 // Make sure its in this lane 3332 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) && 3333 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts)) 3334 return false; 3335 3336 // If not lane 0, then we must match lane 0 3337 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l)) 3338 return false; 3339 3340 if (Idx >= (int)NumElts) 3341 Idx -= NumElts - NumLaneElts; 3342 3343 if (!isUndefOrEqual(Idx, Start+i)) 3344 return false; 3345 3346 } 3347 } 3348 3349 return true; 3350 } 3351 3352 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming 3353 /// the two vector operands have swapped position. 3354 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, 3355 unsigned NumElems) { 3356 for (unsigned i = 0; i != NumElems; ++i) { 3357 int idx = Mask[i]; 3358 if (idx < 0) 3359 continue; 3360 else if (idx < (int)NumElems) 3361 Mask[i] = idx + NumElems; 3362 else 3363 Mask[i] = idx - NumElems; 3364 } 3365 } 3366 3367 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 3368 /// specifies a shuffle of elements that is suitable for input to 128/256-bit 3369 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be 3370 /// reverse of what x86 shuffles want. 3371 static bool isSHUFPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX, 3372 bool Commuted = false) { 3373 if (!HasAVX && VT.getSizeInBits() == 256) 3374 return false; 3375 3376 unsigned NumElems = VT.getVectorNumElements(); 3377 unsigned NumLanes = VT.getSizeInBits()/128; 3378 unsigned NumLaneElems = NumElems/NumLanes; 3379 3380 if (NumLaneElems != 2 && NumLaneElems != 4) 3381 return false; 3382 3383 // VSHUFPSY divides the resulting vector into 4 chunks. 3384 // The sources are also splitted into 4 chunks, and each destination 3385 // chunk must come from a different source chunk. 3386 // 3387 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0 3388 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9 3389 // 3390 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4, 3391 // Y3..Y0, Y3..Y0, X3..X0, X3..X0 3392 // 3393 // VSHUFPDY divides the resulting vector into 4 chunks. 3394 // The sources are also splitted into 4 chunks, and each destination 3395 // chunk must come from a different source chunk. 3396 // 3397 // SRC1 => X3 X2 X1 X0 3398 // SRC2 => Y3 Y2 Y1 Y0 3399 // 3400 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0 3401 // 3402 unsigned HalfLaneElems = NumLaneElems/2; 3403 for (unsigned l = 0; l != NumElems; l += NumLaneElems) { 3404 for (unsigned i = 0; i != NumLaneElems; ++i) { 3405 int Idx = Mask[i+l]; 3406 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0); 3407 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems)) 3408 return false; 3409 // For VSHUFPSY, the mask of the second half must be the same as the 3410 // first but with the appropriate offsets. This works in the same way as 3411 // VPERMILPS works with masks. 3412 if (NumElems != 8 || l == 0 || Mask[i] < 0) 3413 continue; 3414 if (!isUndefOrEqual(Idx, Mask[i]+l)) 3415 return false; 3416 } 3417 } 3418 3419 return true; 3420 } 3421 3422 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 3423 /// specifies a shuffle of elements that is suitable for input to MOVHLPS. 3424 static bool isMOVHLPSMask(ArrayRef<int> Mask, EVT VT) { 3425 if (!VT.is128BitVector()) 3426 return false; 3427 3428 unsigned NumElems = VT.getVectorNumElements(); 3429 3430 if (NumElems != 4) 3431 return false; 3432 3433 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3 3434 return isUndefOrEqual(Mask[0], 6) && 3435 isUndefOrEqual(Mask[1], 7) && 3436 isUndefOrEqual(Mask[2], 2) && 3437 isUndefOrEqual(Mask[3], 3); 3438 } 3439 3440 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 3441 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 3442 /// <2, 3, 2, 3> 3443 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, EVT VT) { 3444 if (!VT.is128BitVector()) 3445 return false; 3446 3447 unsigned NumElems = VT.getVectorNumElements(); 3448 3449 if (NumElems != 4) 3450 return false; 3451 3452 return isUndefOrEqual(Mask[0], 2) && 3453 isUndefOrEqual(Mask[1], 3) && 3454 isUndefOrEqual(Mask[2], 2) && 3455 isUndefOrEqual(Mask[3], 3); 3456 } 3457 3458 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 3459 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}. 3460 static bool isMOVLPMask(ArrayRef<int> Mask, EVT VT) { 3461 if (!VT.is128BitVector()) 3462 return false; 3463 3464 unsigned NumElems = VT.getVectorNumElements(); 3465 3466 if (NumElems != 2 && NumElems != 4) 3467 return false; 3468 3469 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3470 if (!isUndefOrEqual(Mask[i], i + NumElems)) 3471 return false; 3472 3473 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 3474 if (!isUndefOrEqual(Mask[i], i)) 3475 return false; 3476 3477 return true; 3478 } 3479 3480 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand 3481 /// specifies a shuffle of elements that is suitable for input to MOVLHPS. 3482 static bool isMOVLHPSMask(ArrayRef<int> Mask, EVT VT) { 3483 if (!VT.is128BitVector()) 3484 return false; 3485 3486 unsigned NumElems = VT.getVectorNumElements(); 3487 3488 if (NumElems != 2 && NumElems != 4) 3489 return false; 3490 3491 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3492 if (!isUndefOrEqual(Mask[i], i)) 3493 return false; 3494 3495 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 3496 if (!isUndefOrEqual(Mask[i + e], i + NumElems)) 3497 return false; 3498 3499 return true; 3500 } 3501 3502 // 3503 // Some special combinations that can be optimized. 3504 // 3505 static 3506 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, 3507 SelectionDAG &DAG) { 3508 EVT VT = SVOp->getValueType(0); 3509 DebugLoc dl = SVOp->getDebugLoc(); 3510 3511 if (VT != MVT::v8i32 && VT != MVT::v8f32) 3512 return SDValue(); 3513 3514 ArrayRef<int> Mask = SVOp->getMask(); 3515 3516 // These are the special masks that may be optimized. 3517 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14}; 3518 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15}; 3519 bool MatchEvenMask = true; 3520 bool MatchOddMask = true; 3521 for (int i=0; i<8; ++i) { 3522 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i])) 3523 MatchEvenMask = false; 3524 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i])) 3525 MatchOddMask = false; 3526 } 3527 3528 if (!MatchEvenMask && !MatchOddMask) 3529 return SDValue(); 3530 3531 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT); 3532 3533 SDValue Op0 = SVOp->getOperand(0); 3534 SDValue Op1 = SVOp->getOperand(1); 3535 3536 if (MatchEvenMask) { 3537 // Shift the second operand right to 32 bits. 3538 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 }; 3539 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask); 3540 } else { 3541 // Shift the first operand left to 32 bits. 3542 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 }; 3543 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask); 3544 } 3545 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15}; 3546 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask); 3547 } 3548 3549 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 3550 /// specifies a shuffle of elements that is suitable for input to UNPCKL. 3551 static bool isUNPCKLMask(ArrayRef<int> Mask, EVT VT, 3552 bool HasAVX2, bool V2IsSplat = false) { 3553 unsigned NumElts = VT.getVectorNumElements(); 3554 3555 assert((VT.is128BitVector() || VT.is256BitVector()) && 3556 "Unsupported vector type for unpckh"); 3557 3558 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3559 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3560 return false; 3561 3562 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3563 // independently on 128-bit lanes. 3564 unsigned NumLanes = VT.getSizeInBits()/128; 3565 unsigned NumLaneElts = NumElts/NumLanes; 3566 3567 for (unsigned l = 0; l != NumLanes; ++l) { 3568 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3569 i != (l+1)*NumLaneElts; 3570 i += 2, ++j) { 3571 int BitI = Mask[i]; 3572 int BitI1 = Mask[i+1]; 3573 if (!isUndefOrEqual(BitI, j)) 3574 return false; 3575 if (V2IsSplat) { 3576 if (!isUndefOrEqual(BitI1, NumElts)) 3577 return false; 3578 } else { 3579 if (!isUndefOrEqual(BitI1, j + NumElts)) 3580 return false; 3581 } 3582 } 3583 } 3584 3585 return true; 3586 } 3587 3588 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 3589 /// specifies a shuffle of elements that is suitable for input to UNPCKH. 3590 static bool isUNPCKHMask(ArrayRef<int> Mask, EVT VT, 3591 bool HasAVX2, bool V2IsSplat = false) { 3592 unsigned NumElts = VT.getVectorNumElements(); 3593 3594 assert((VT.is128BitVector() || VT.is256BitVector()) && 3595 "Unsupported vector type for unpckh"); 3596 3597 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3598 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3599 return false; 3600 3601 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3602 // independently on 128-bit lanes. 3603 unsigned NumLanes = VT.getSizeInBits()/128; 3604 unsigned NumLaneElts = NumElts/NumLanes; 3605 3606 for (unsigned l = 0; l != NumLanes; ++l) { 3607 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3608 i != (l+1)*NumLaneElts; i += 2, ++j) { 3609 int BitI = Mask[i]; 3610 int BitI1 = Mask[i+1]; 3611 if (!isUndefOrEqual(BitI, j)) 3612 return false; 3613 if (V2IsSplat) { 3614 if (isUndefOrEqual(BitI1, NumElts)) 3615 return false; 3616 } else { 3617 if (!isUndefOrEqual(BitI1, j+NumElts)) 3618 return false; 3619 } 3620 } 3621 } 3622 return true; 3623 } 3624 3625 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 3626 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 3627 /// <0, 0, 1, 1> 3628 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, EVT VT, 3629 bool HasAVX2) { 3630 unsigned NumElts = VT.getVectorNumElements(); 3631 3632 assert((VT.is128BitVector() || VT.is256BitVector()) && 3633 "Unsupported vector type for unpckh"); 3634 3635 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3636 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3637 return false; 3638 3639 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern 3640 // FIXME: Need a better way to get rid of this, there's no latency difference 3641 // between UNPCKLPD and MOVDDUP, the later should always be checked first and 3642 // the former later. We should also remove the "_undef" special mask. 3643 if (NumElts == 4 && VT.getSizeInBits() == 256) 3644 return false; 3645 3646 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3647 // independently on 128-bit lanes. 3648 unsigned NumLanes = VT.getSizeInBits()/128; 3649 unsigned NumLaneElts = NumElts/NumLanes; 3650 3651 for (unsigned l = 0; l != NumLanes; ++l) { 3652 for (unsigned i = l*NumLaneElts, j = l*NumLaneElts; 3653 i != (l+1)*NumLaneElts; 3654 i += 2, ++j) { 3655 int BitI = Mask[i]; 3656 int BitI1 = Mask[i+1]; 3657 3658 if (!isUndefOrEqual(BitI, j)) 3659 return false; 3660 if (!isUndefOrEqual(BitI1, j)) 3661 return false; 3662 } 3663 } 3664 3665 return true; 3666 } 3667 3668 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 3669 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 3670 /// <2, 2, 3, 3> 3671 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX2) { 3672 unsigned NumElts = VT.getVectorNumElements(); 3673 3674 assert((VT.is128BitVector() || VT.is256BitVector()) && 3675 "Unsupported vector type for unpckh"); 3676 3677 if (VT.getSizeInBits() == 256 && NumElts != 4 && NumElts != 8 && 3678 (!HasAVX2 || (NumElts != 16 && NumElts != 32))) 3679 return false; 3680 3681 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate 3682 // independently on 128-bit lanes. 3683 unsigned NumLanes = VT.getSizeInBits()/128; 3684 unsigned NumLaneElts = NumElts/NumLanes; 3685 3686 for (unsigned l = 0; l != NumLanes; ++l) { 3687 for (unsigned i = l*NumLaneElts, j = (l*NumLaneElts)+NumLaneElts/2; 3688 i != (l+1)*NumLaneElts; i += 2, ++j) { 3689 int BitI = Mask[i]; 3690 int BitI1 = Mask[i+1]; 3691 if (!isUndefOrEqual(BitI, j)) 3692 return false; 3693 if (!isUndefOrEqual(BitI1, j)) 3694 return false; 3695 } 3696 } 3697 return true; 3698 } 3699 3700 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 3701 /// specifies a shuffle of elements that is suitable for input to MOVSS, 3702 /// MOVSD, and MOVD, i.e. setting the lowest element. 3703 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) { 3704 if (VT.getVectorElementType().getSizeInBits() < 32) 3705 return false; 3706 if (!VT.is128BitVector()) 3707 return false; 3708 3709 unsigned NumElts = VT.getVectorNumElements(); 3710 3711 if (!isUndefOrEqual(Mask[0], NumElts)) 3712 return false; 3713 3714 for (unsigned i = 1; i != NumElts; ++i) 3715 if (!isUndefOrEqual(Mask[i], i)) 3716 return false; 3717 3718 return true; 3719 } 3720 3721 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered 3722 /// as permutations between 128-bit chunks or halves. As an example: this 3723 /// shuffle bellow: 3724 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15> 3725 /// The first half comes from the second half of V1 and the second half from the 3726 /// the second half of V2. 3727 static bool isVPERM2X128Mask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3728 if (!HasAVX || !VT.is256BitVector()) 3729 return false; 3730 3731 // The shuffle result is divided into half A and half B. In total the two 3732 // sources have 4 halves, namely: C, D, E, F. The final values of A and 3733 // B must come from C, D, E or F. 3734 unsigned HalfSize = VT.getVectorNumElements()/2; 3735 bool MatchA = false, MatchB = false; 3736 3737 // Check if A comes from one of C, D, E, F. 3738 for (unsigned Half = 0; Half != 4; ++Half) { 3739 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) { 3740 MatchA = true; 3741 break; 3742 } 3743 } 3744 3745 // Check if B comes from one of C, D, E, F. 3746 for (unsigned Half = 0; Half != 4; ++Half) { 3747 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) { 3748 MatchB = true; 3749 break; 3750 } 3751 } 3752 3753 return MatchA && MatchB; 3754 } 3755 3756 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle 3757 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions. 3758 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) { 3759 EVT VT = SVOp->getValueType(0); 3760 3761 unsigned HalfSize = VT.getVectorNumElements()/2; 3762 3763 unsigned FstHalf = 0, SndHalf = 0; 3764 for (unsigned i = 0; i < HalfSize; ++i) { 3765 if (SVOp->getMaskElt(i) > 0) { 3766 FstHalf = SVOp->getMaskElt(i)/HalfSize; 3767 break; 3768 } 3769 } 3770 for (unsigned i = HalfSize; i < HalfSize*2; ++i) { 3771 if (SVOp->getMaskElt(i) > 0) { 3772 SndHalf = SVOp->getMaskElt(i)/HalfSize; 3773 break; 3774 } 3775 } 3776 3777 return (FstHalf | (SndHalf << 4)); 3778 } 3779 3780 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand 3781 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*. 3782 /// Note that VPERMIL mask matching is different depending whether theunderlying 3783 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point 3784 /// to the same elements of the low, but to the higher half of the source. 3785 /// In VPERMILPD the two lanes could be shuffled independently of each other 3786 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY. 3787 static bool isVPERMILPMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3788 if (!HasAVX) 3789 return false; 3790 3791 unsigned NumElts = VT.getVectorNumElements(); 3792 // Only match 256-bit with 32/64-bit types 3793 if (VT.getSizeInBits() != 256 || (NumElts != 4 && NumElts != 8)) 3794 return false; 3795 3796 unsigned NumLanes = VT.getSizeInBits()/128; 3797 unsigned LaneSize = NumElts/NumLanes; 3798 for (unsigned l = 0; l != NumElts; l += LaneSize) { 3799 for (unsigned i = 0; i != LaneSize; ++i) { 3800 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize)) 3801 return false; 3802 if (NumElts != 8 || l == 0) 3803 continue; 3804 // VPERMILPS handling 3805 if (Mask[i] < 0) 3806 continue; 3807 if (!isUndefOrEqual(Mask[i+l], Mask[i]+l)) 3808 return false; 3809 } 3810 } 3811 3812 return true; 3813 } 3814 3815 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse 3816 /// of what x86 movss want. X86 movs requires the lowest element to be lowest 3817 /// element of vector 2 and the other elements to come from vector 1 in order. 3818 static bool isCommutedMOVLMask(ArrayRef<int> Mask, EVT VT, 3819 bool V2IsSplat = false, bool V2IsUndef = false) { 3820 if (!VT.is128BitVector()) 3821 return false; 3822 3823 unsigned NumOps = VT.getVectorNumElements(); 3824 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16) 3825 return false; 3826 3827 if (!isUndefOrEqual(Mask[0], 0)) 3828 return false; 3829 3830 for (unsigned i = 1; i != NumOps; ++i) 3831 if (!(isUndefOrEqual(Mask[i], i+NumOps) || 3832 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) || 3833 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps)))) 3834 return false; 3835 3836 return true; 3837 } 3838 3839 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3840 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 3841 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7> 3842 static bool isMOVSHDUPMask(ArrayRef<int> Mask, EVT VT, 3843 const X86Subtarget *Subtarget) { 3844 if (!Subtarget->hasSSE3()) 3845 return false; 3846 3847 unsigned NumElems = VT.getVectorNumElements(); 3848 3849 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3850 (VT.getSizeInBits() == 256 && NumElems != 8)) 3851 return false; 3852 3853 // "i+1" is the value the indexed mask element must have 3854 for (unsigned i = 0; i != NumElems; i += 2) 3855 if (!isUndefOrEqual(Mask[i], i+1) || 3856 !isUndefOrEqual(Mask[i+1], i+1)) 3857 return false; 3858 3859 return true; 3860 } 3861 3862 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3863 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 3864 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6> 3865 static bool isMOVSLDUPMask(ArrayRef<int> Mask, EVT VT, 3866 const X86Subtarget *Subtarget) { 3867 if (!Subtarget->hasSSE3()) 3868 return false; 3869 3870 unsigned NumElems = VT.getVectorNumElements(); 3871 3872 if ((VT.getSizeInBits() == 128 && NumElems != 4) || 3873 (VT.getSizeInBits() == 256 && NumElems != 8)) 3874 return false; 3875 3876 // "i" is the value the indexed mask element must have 3877 for (unsigned i = 0; i != NumElems; i += 2) 3878 if (!isUndefOrEqual(Mask[i], i) || 3879 !isUndefOrEqual(Mask[i+1], i)) 3880 return false; 3881 3882 return true; 3883 } 3884 3885 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand 3886 /// specifies a shuffle of elements that is suitable for input to 256-bit 3887 /// version of MOVDDUP. 3888 static bool isMOVDDUPYMask(ArrayRef<int> Mask, EVT VT, bool HasAVX) { 3889 if (!HasAVX || !VT.is256BitVector()) 3890 return false; 3891 3892 unsigned NumElts = VT.getVectorNumElements(); 3893 if (NumElts != 4) 3894 return false; 3895 3896 for (unsigned i = 0; i != NumElts/2; ++i) 3897 if (!isUndefOrEqual(Mask[i], 0)) 3898 return false; 3899 for (unsigned i = NumElts/2; i != NumElts; ++i) 3900 if (!isUndefOrEqual(Mask[i], NumElts/2)) 3901 return false; 3902 return true; 3903 } 3904 3905 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 3906 /// specifies a shuffle of elements that is suitable for input to 128-bit 3907 /// version of MOVDDUP. 3908 static bool isMOVDDUPMask(ArrayRef<int> Mask, EVT VT) { 3909 if (!VT.is128BitVector()) 3910 return false; 3911 3912 unsigned e = VT.getVectorNumElements() / 2; 3913 for (unsigned i = 0; i != e; ++i) 3914 if (!isUndefOrEqual(Mask[i], i)) 3915 return false; 3916 for (unsigned i = 0; i != e; ++i) 3917 if (!isUndefOrEqual(Mask[e+i], i)) 3918 return false; 3919 return true; 3920 } 3921 3922 /// isVEXTRACTF128Index - Return true if the specified 3923 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is 3924 /// suitable for input to VEXTRACTF128. 3925 bool X86::isVEXTRACTF128Index(SDNode *N) { 3926 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 3927 return false; 3928 3929 // The index should be aligned on a 128-bit boundary. 3930 uint64_t Index = 3931 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 3932 3933 unsigned VL = N->getValueType(0).getVectorNumElements(); 3934 unsigned VBits = N->getValueType(0).getSizeInBits(); 3935 unsigned ElSize = VBits / VL; 3936 bool Result = (Index * ElSize) % 128 == 0; 3937 3938 return Result; 3939 } 3940 3941 /// isVINSERTF128Index - Return true if the specified INSERT_SUBVECTOR 3942 /// operand specifies a subvector insert that is suitable for input to 3943 /// VINSERTF128. 3944 bool X86::isVINSERTF128Index(SDNode *N) { 3945 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 3946 return false; 3947 3948 // The index should be aligned on a 128-bit boundary. 3949 uint64_t Index = 3950 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 3951 3952 unsigned VL = N->getValueType(0).getVectorNumElements(); 3953 unsigned VBits = N->getValueType(0).getSizeInBits(); 3954 unsigned ElSize = VBits / VL; 3955 bool Result = (Index * ElSize) % 128 == 0; 3956 3957 return Result; 3958 } 3959 3960 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 3961 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions. 3962 /// Handles 128-bit and 256-bit. 3963 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) { 3964 EVT VT = N->getValueType(0); 3965 3966 assert((VT.is128BitVector() || VT.is256BitVector()) && 3967 "Unsupported vector type for PSHUF/SHUFP"); 3968 3969 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate 3970 // independently on 128-bit lanes. 3971 unsigned NumElts = VT.getVectorNumElements(); 3972 unsigned NumLanes = VT.getSizeInBits()/128; 3973 unsigned NumLaneElts = NumElts/NumLanes; 3974 3975 assert((NumLaneElts == 2 || NumLaneElts == 4) && 3976 "Only supports 2 or 4 elements per lane"); 3977 3978 unsigned Shift = (NumLaneElts == 4) ? 1 : 0; 3979 unsigned Mask = 0; 3980 for (unsigned i = 0; i != NumElts; ++i) { 3981 int Elt = N->getMaskElt(i); 3982 if (Elt < 0) continue; 3983 Elt &= NumLaneElts - 1; 3984 unsigned ShAmt = (i << Shift) % 8; 3985 Mask |= Elt << ShAmt; 3986 } 3987 3988 return Mask; 3989 } 3990 3991 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 3992 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction. 3993 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) { 3994 EVT VT = N->getValueType(0); 3995 3996 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 3997 "Unsupported vector type for PSHUFHW"); 3998 3999 unsigned NumElts = VT.getVectorNumElements(); 4000 4001 unsigned Mask = 0; 4002 for (unsigned l = 0; l != NumElts; l += 8) { 4003 // 8 nodes per lane, but we only care about the last 4. 4004 for (unsigned i = 0; i < 4; ++i) { 4005 int Elt = N->getMaskElt(l+i+4); 4006 if (Elt < 0) continue; 4007 Elt &= 0x3; // only 2-bits. 4008 Mask |= Elt << (i * 2); 4009 } 4010 } 4011 4012 return Mask; 4013 } 4014 4015 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 4016 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction. 4017 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) { 4018 EVT VT = N->getValueType(0); 4019 4020 assert((VT == MVT::v8i16 || VT == MVT::v16i16) && 4021 "Unsupported vector type for PSHUFHW"); 4022 4023 unsigned NumElts = VT.getVectorNumElements(); 4024 4025 unsigned Mask = 0; 4026 for (unsigned l = 0; l != NumElts; l += 8) { 4027 // 8 nodes per lane, but we only care about the first 4. 4028 for (unsigned i = 0; i < 4; ++i) { 4029 int Elt = N->getMaskElt(l+i); 4030 if (Elt < 0) continue; 4031 Elt &= 0x3; // only 2-bits 4032 Mask |= Elt << (i * 2); 4033 } 4034 } 4035 4036 return Mask; 4037 } 4038 4039 /// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 4040 /// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 4041 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) { 4042 EVT VT = SVOp->getValueType(0); 4043 unsigned EltSize = VT.getVectorElementType().getSizeInBits() >> 3; 4044 4045 unsigned NumElts = VT.getVectorNumElements(); 4046 unsigned NumLanes = VT.getSizeInBits()/128; 4047 unsigned NumLaneElts = NumElts/NumLanes; 4048 4049 int Val = 0; 4050 unsigned i; 4051 for (i = 0; i != NumElts; ++i) { 4052 Val = SVOp->getMaskElt(i); 4053 if (Val >= 0) 4054 break; 4055 } 4056 if (Val >= (int)NumElts) 4057 Val -= NumElts - NumLaneElts; 4058 4059 assert(Val - i > 0 && "PALIGNR imm should be positive"); 4060 return (Val - i) * EltSize; 4061 } 4062 4063 /// getExtractVEXTRACTF128Immediate - Return the appropriate immediate 4064 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128 4065 /// instructions. 4066 unsigned X86::getExtractVEXTRACTF128Immediate(SDNode *N) { 4067 if (!isa<ConstantSDNode>(N->getOperand(1).getNode())) 4068 llvm_unreachable("Illegal extract subvector for VEXTRACTF128"); 4069 4070 uint64_t Index = 4071 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 4072 4073 EVT VecVT = N->getOperand(0).getValueType(); 4074 EVT ElVT = VecVT.getVectorElementType(); 4075 4076 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4077 return Index / NumElemsPerChunk; 4078 } 4079 4080 /// getInsertVINSERTF128Immediate - Return the appropriate immediate 4081 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128 4082 /// instructions. 4083 unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { 4084 if (!isa<ConstantSDNode>(N->getOperand(2).getNode())) 4085 llvm_unreachable("Illegal insert subvector for VINSERTF128"); 4086 4087 uint64_t Index = 4088 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 4089 4090 EVT VecVT = N->getValueType(0); 4091 EVT ElVT = VecVT.getVectorElementType(); 4092 4093 unsigned NumElemsPerChunk = 128 / ElVT.getSizeInBits(); 4094 return Index / NumElemsPerChunk; 4095 } 4096 4097 /// getShuffleCLImmediate - Return the appropriate immediate to shuffle 4098 /// the specified VECTOR_SHUFFLE mask with VPERMQ and VPERMPD instructions. 4099 /// Handles 256-bit. 4100 static unsigned getShuffleCLImmediate(ShuffleVectorSDNode *N) { 4101 EVT VT = N->getValueType(0); 4102 4103 unsigned NumElts = VT.getVectorNumElements(); 4104 4105 assert((VT.is256BitVector() && NumElts == 4) && 4106 "Unsupported vector type for VPERMQ/VPERMPD"); 4107 4108 unsigned Mask = 0; 4109 for (unsigned i = 0; i != NumElts; ++i) { 4110 int Elt = N->getMaskElt(i); 4111 if (Elt < 0) 4112 continue; 4113 Mask |= Elt << (i*2); 4114 } 4115 4116 return Mask; 4117 } 4118 /// isZeroNode - Returns true if Elt is a constant zero or a floating point 4119 /// constant +0.0. 4120 bool X86::isZeroNode(SDValue Elt) { 4121 return ((isa<ConstantSDNode>(Elt) && 4122 cast<ConstantSDNode>(Elt)->isNullValue()) || 4123 (isa<ConstantFPSDNode>(Elt) && 4124 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero())); 4125 } 4126 4127 /// CommuteVectorShuffle - Swap vector_shuffle operands as well as values in 4128 /// their permute mask. 4129 static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp, 4130 SelectionDAG &DAG) { 4131 EVT VT = SVOp->getValueType(0); 4132 unsigned NumElems = VT.getVectorNumElements(); 4133 SmallVector<int, 8> MaskVec; 4134 4135 for (unsigned i = 0; i != NumElems; ++i) { 4136 int Idx = SVOp->getMaskElt(i); 4137 if (Idx >= 0) { 4138 if (Idx < (int)NumElems) 4139 Idx += NumElems; 4140 else 4141 Idx -= NumElems; 4142 } 4143 MaskVec.push_back(Idx); 4144 } 4145 return DAG.getVectorShuffle(VT, SVOp->getDebugLoc(), SVOp->getOperand(1), 4146 SVOp->getOperand(0), &MaskVec[0]); 4147 } 4148 4149 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to 4150 /// match movhlps. The lower half elements should come from upper half of 4151 /// V1 (and in order), and the upper half elements should come from the upper 4152 /// half of V2 (and in order). 4153 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, EVT VT) { 4154 if (!VT.is128BitVector()) 4155 return false; 4156 if (VT.getVectorNumElements() != 4) 4157 return false; 4158 for (unsigned i = 0, e = 2; i != e; ++i) 4159 if (!isUndefOrEqual(Mask[i], i+2)) 4160 return false; 4161 for (unsigned i = 2; i != 4; ++i) 4162 if (!isUndefOrEqual(Mask[i], i+4)) 4163 return false; 4164 return true; 4165 } 4166 4167 /// isScalarLoadToVector - Returns true if the node is a scalar load that 4168 /// is promoted to a vector. It also returns the LoadSDNode by reference if 4169 /// required. 4170 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) { 4171 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR) 4172 return false; 4173 N = N->getOperand(0).getNode(); 4174 if (!ISD::isNON_EXTLoad(N)) 4175 return false; 4176 if (LD) 4177 *LD = cast<LoadSDNode>(N); 4178 return true; 4179 } 4180 4181 // Test whether the given value is a vector value which will be legalized 4182 // into a load. 4183 static bool WillBeConstantPoolLoad(SDNode *N) { 4184 if (N->getOpcode() != ISD::BUILD_VECTOR) 4185 return false; 4186 4187 // Check for any non-constant elements. 4188 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 4189 switch (N->getOperand(i).getNode()->getOpcode()) { 4190 case ISD::UNDEF: 4191 case ISD::ConstantFP: 4192 case ISD::Constant: 4193 break; 4194 default: 4195 return false; 4196 } 4197 4198 // Vectors of all-zeros and all-ones are materialized with special 4199 // instructions rather than being loaded. 4200 return !ISD::isBuildVectorAllZeros(N) && 4201 !ISD::isBuildVectorAllOnes(N); 4202 } 4203 4204 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to 4205 /// match movlp{s|d}. The lower half elements should come from lower half of 4206 /// V1 (and in order), and the upper half elements should come from the upper 4207 /// half of V2 (and in order). And since V1 will become the source of the 4208 /// MOVLP, it must be either a vector load or a scalar load to vector. 4209 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, 4210 ArrayRef<int> Mask, EVT VT) { 4211 if (!VT.is128BitVector()) 4212 return false; 4213 4214 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1)) 4215 return false; 4216 // Is V2 is a vector load, don't do this transformation. We will try to use 4217 // load folding shufps op. 4218 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2)) 4219 return false; 4220 4221 unsigned NumElems = VT.getVectorNumElements(); 4222 4223 if (NumElems != 2 && NumElems != 4) 4224 return false; 4225 for (unsigned i = 0, e = NumElems/2; i != e; ++i) 4226 if (!isUndefOrEqual(Mask[i], i)) 4227 return false; 4228 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i) 4229 if (!isUndefOrEqual(Mask[i], i+NumElems)) 4230 return false; 4231 return true; 4232 } 4233 4234 /// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are 4235 /// all the same. 4236 static bool isSplatVector(SDNode *N) { 4237 if (N->getOpcode() != ISD::BUILD_VECTOR) 4238 return false; 4239 4240 SDValue SplatValue = N->getOperand(0); 4241 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) 4242 if (N->getOperand(i) != SplatValue) 4243 return false; 4244 return true; 4245 } 4246 4247 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved 4248 /// to an zero vector. 4249 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode 4250 static bool isZeroShuffle(ShuffleVectorSDNode *N) { 4251 SDValue V1 = N->getOperand(0); 4252 SDValue V2 = N->getOperand(1); 4253 unsigned NumElems = N->getValueType(0).getVectorNumElements(); 4254 for (unsigned i = 0; i != NumElems; ++i) { 4255 int Idx = N->getMaskElt(i); 4256 if (Idx >= (int)NumElems) { 4257 unsigned Opc = V2.getOpcode(); 4258 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode())) 4259 continue; 4260 if (Opc != ISD::BUILD_VECTOR || 4261 !X86::isZeroNode(V2.getOperand(Idx-NumElems))) 4262 return false; 4263 } else if (Idx >= 0) { 4264 unsigned Opc = V1.getOpcode(); 4265 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode())) 4266 continue; 4267 if (Opc != ISD::BUILD_VECTOR || 4268 !X86::isZeroNode(V1.getOperand(Idx))) 4269 return false; 4270 } 4271 } 4272 return true; 4273 } 4274 4275 /// getZeroVector - Returns a vector of specified type with all zero elements. 4276 /// 4277 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget, 4278 SelectionDAG &DAG, DebugLoc dl) { 4279 assert(VT.isVector() && "Expected a vector type"); 4280 unsigned Size = VT.getSizeInBits(); 4281 4282 // Always build SSE zero vectors as <4 x i32> bitcasted 4283 // to their dest type. This ensures they get CSE'd. 4284 SDValue Vec; 4285 if (Size == 128) { // SSE 4286 if (Subtarget->hasSSE2()) { // SSE2 4287 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4288 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4289 } else { // SSE1 4290 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4291 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); 4292 } 4293 } else if (Size == 256) { // AVX 4294 if (Subtarget->hasAVX2()) { // AVX2 4295 SDValue Cst = DAG.getTargetConstant(0, MVT::i32); 4296 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4297 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4298 } else { 4299 // 256-bit logic and arithmetic instructions in AVX are all 4300 // floating-point, no support for integer ops. Emit fp zeroed vectors. 4301 SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); 4302 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4303 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); 4304 } 4305 } else 4306 llvm_unreachable("Unexpected vector type"); 4307 4308 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4309 } 4310 4311 /// getOnesVector - Returns a vector of specified type with all bits set. 4312 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with 4313 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately. 4314 /// Then bitcast to their original type, ensuring they get CSE'd. 4315 static SDValue getOnesVector(EVT VT, bool HasAVX2, SelectionDAG &DAG, 4316 DebugLoc dl) { 4317 assert(VT.isVector() && "Expected a vector type"); 4318 unsigned Size = VT.getSizeInBits(); 4319 4320 SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); 4321 SDValue Vec; 4322 if (Size == 256) { 4323 if (HasAVX2) { // AVX2 4324 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; 4325 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops, 8); 4326 } else { // AVX 4327 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4328 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl); 4329 } 4330 } else if (Size == 128) { 4331 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); 4332 } else 4333 llvm_unreachable("Unexpected vector type"); 4334 4335 return DAG.getNode(ISD::BITCAST, dl, VT, Vec); 4336 } 4337 4338 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements 4339 /// that point to V2 points to its first element. 4340 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) { 4341 for (unsigned i = 0; i != NumElems; ++i) { 4342 if (Mask[i] > (int)NumElems) { 4343 Mask[i] = NumElems; 4344 } 4345 } 4346 } 4347 4348 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd 4349 /// operation of specified width. 4350 static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4351 SDValue V2) { 4352 unsigned NumElems = VT.getVectorNumElements(); 4353 SmallVector<int, 8> Mask; 4354 Mask.push_back(NumElems); 4355 for (unsigned i = 1; i != NumElems; ++i) 4356 Mask.push_back(i); 4357 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4358 } 4359 4360 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation. 4361 static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4362 SDValue V2) { 4363 unsigned NumElems = VT.getVectorNumElements(); 4364 SmallVector<int, 8> Mask; 4365 for (unsigned i = 0, e = NumElems/2; i != e; ++i) { 4366 Mask.push_back(i); 4367 Mask.push_back(i + NumElems); 4368 } 4369 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4370 } 4371 4372 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation. 4373 static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, 4374 SDValue V2) { 4375 unsigned NumElems = VT.getVectorNumElements(); 4376 SmallVector<int, 8> Mask; 4377 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) { 4378 Mask.push_back(i + Half); 4379 Mask.push_back(i + NumElems + Half); 4380 } 4381 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); 4382 } 4383 4384 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by 4385 // a generic shuffle instruction because the target has no such instructions. 4386 // Generate shuffles which repeat i16 and i8 several times until they can be 4387 // represented by v4f32 and then be manipulated by target suported shuffles. 4388 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) { 4389 EVT VT = V.getValueType(); 4390 int NumElems = VT.getVectorNumElements(); 4391 DebugLoc dl = V.getDebugLoc(); 4392 4393 while (NumElems > 4) { 4394 if (EltNo < NumElems/2) { 4395 V = getUnpackl(DAG, dl, VT, V, V); 4396 } else { 4397 V = getUnpackh(DAG, dl, VT, V, V); 4398 EltNo -= NumElems/2; 4399 } 4400 NumElems >>= 1; 4401 } 4402 return V; 4403 } 4404 4405 /// getLegalSplat - Generate a legal splat with supported x86 shuffles 4406 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) { 4407 EVT VT = V.getValueType(); 4408 DebugLoc dl = V.getDebugLoc(); 4409 unsigned Size = VT.getSizeInBits(); 4410 4411 if (Size == 128) { 4412 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V); 4413 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; 4414 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32), 4415 &SplatMask[0]); 4416 } else if (Size == 256) { 4417 // To use VPERMILPS to splat scalars, the second half of indicies must 4418 // refer to the higher part, which is a duplication of the lower one, 4419 // because VPERMILPS can only handle in-lane permutations. 4420 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo, 4421 EltNo+4, EltNo+4, EltNo+4, EltNo+4 }; 4422 4423 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V); 4424 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32), 4425 &SplatMask[0]); 4426 } else 4427 llvm_unreachable("Vector size not supported"); 4428 4429 return DAG.getNode(ISD::BITCAST, dl, VT, V); 4430 } 4431 4432 /// PromoteSplat - Splat is promoted to target supported vector shuffles. 4433 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { 4434 EVT SrcVT = SV->getValueType(0); 4435 SDValue V1 = SV->getOperand(0); 4436 DebugLoc dl = SV->getDebugLoc(); 4437 4438 int EltNo = SV->getSplatIndex(); 4439 int NumElems = SrcVT.getVectorNumElements(); 4440 unsigned Size = SrcVT.getSizeInBits(); 4441 4442 assert(((Size == 128 && NumElems > 4) || Size == 256) && 4443 "Unknown how to promote splat for type"); 4444 4445 // Extract the 128-bit part containing the splat element and update 4446 // the splat element index when it refers to the higher register. 4447 if (Size == 256) { 4448 V1 = Extract128BitVector(V1, EltNo, DAG, dl); 4449 if (EltNo >= NumElems/2) 4450 EltNo -= NumElems/2; 4451 } 4452 4453 // All i16 and i8 vector types can't be used directly by a generic shuffle 4454 // instruction because the target has no such instruction. Generate shuffles 4455 // which repeat i16 and i8 several times until they fit in i32, and then can 4456 // be manipulated by target suported shuffles. 4457 EVT EltVT = SrcVT.getVectorElementType(); 4458 if (EltVT == MVT::i8 || EltVT == MVT::i16) 4459 V1 = PromoteSplati8i16(V1, DAG, EltNo); 4460 4461 // Recreate the 256-bit vector and place the same 128-bit vector 4462 // into the low and high part. This is necessary because we want 4463 // to use VPERM* to shuffle the vectors 4464 if (Size == 256) { 4465 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1); 4466 } 4467 4468 return getLegalSplat(DAG, V1, EltNo); 4469 } 4470 4471 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified 4472 /// vector of zero or undef vector. This produces a shuffle where the low 4473 /// element of V2 is swizzled into the zero/undef vector, landing at element 4474 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). 4475 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, 4476 bool IsZero, 4477 const X86Subtarget *Subtarget, 4478 SelectionDAG &DAG) { 4479 EVT VT = V2.getValueType(); 4480 SDValue V1 = IsZero 4481 ? getZeroVector(VT, Subtarget, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT); 4482 unsigned NumElems = VT.getVectorNumElements(); 4483 SmallVector<int, 16> MaskVec; 4484 for (unsigned i = 0; i != NumElems; ++i) 4485 // If this is the insertion idx, put the low elt of V2 here. 4486 MaskVec.push_back(i == Idx ? NumElems : i); 4487 return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); 4488 } 4489 4490 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the 4491 /// target specific opcode. Returns true if the Mask could be calculated. 4492 /// Sets IsUnary to true if only uses one source. 4493 static bool getTargetShuffleMask(SDNode *N, MVT VT, 4494 SmallVectorImpl<int> &Mask, bool &IsUnary) { 4495 unsigned NumElems = VT.getVectorNumElements(); 4496 SDValue ImmN; 4497 4498 IsUnary = false; 4499 switch(N->getOpcode()) { 4500 case X86ISD::SHUFP: 4501 ImmN = N->getOperand(N->getNumOperands()-1); 4502 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4503 break; 4504 case X86ISD::UNPCKH: 4505 DecodeUNPCKHMask(VT, Mask); 4506 break; 4507 case X86ISD::UNPCKL: 4508 DecodeUNPCKLMask(VT, Mask); 4509 break; 4510 case X86ISD::MOVHLPS: 4511 DecodeMOVHLPSMask(NumElems, Mask); 4512 break; 4513 case X86ISD::MOVLHPS: 4514 DecodeMOVLHPSMask(NumElems, Mask); 4515 break; 4516 case X86ISD::PSHUFD: 4517 case X86ISD::VPERMILP: 4518 ImmN = N->getOperand(N->getNumOperands()-1); 4519 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4520 IsUnary = true; 4521 break; 4522 case X86ISD::PSHUFHW: 4523 ImmN = N->getOperand(N->getNumOperands()-1); 4524 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4525 IsUnary = true; 4526 break; 4527 case X86ISD::PSHUFLW: 4528 ImmN = N->getOperand(N->getNumOperands()-1); 4529 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4530 IsUnary = true; 4531 break; 4532 case X86ISD::VPERMI: 4533 ImmN = N->getOperand(N->getNumOperands()-1); 4534 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4535 IsUnary = true; 4536 break; 4537 case X86ISD::MOVSS: 4538 case X86ISD::MOVSD: { 4539 // The index 0 always comes from the first element of the second source, 4540 // this is why MOVSS and MOVSD are used in the first place. The other 4541 // elements come from the other positions of the first source vector 4542 Mask.push_back(NumElems); 4543 for (unsigned i = 1; i != NumElems; ++i) { 4544 Mask.push_back(i); 4545 } 4546 break; 4547 } 4548 case X86ISD::VPERM2X128: 4549 ImmN = N->getOperand(N->getNumOperands()-1); 4550 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); 4551 if (Mask.empty()) return false; 4552 break; 4553 case X86ISD::MOVDDUP: 4554 case X86ISD::MOVLHPD: 4555 case X86ISD::MOVLPD: 4556 case X86ISD::MOVLPS: 4557 case X86ISD::MOVSHDUP: 4558 case X86ISD::MOVSLDUP: 4559 case X86ISD::PALIGN: 4560 // Not yet implemented 4561 return false; 4562 default: llvm_unreachable("unknown target shuffle node"); 4563 } 4564 4565 return true; 4566 } 4567 4568 /// getShuffleScalarElt - Returns the scalar element that will make up the ith 4569 /// element of the result of the vector shuffle. 4570 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG, 4571 unsigned Depth) { 4572 if (Depth == 6) 4573 return SDValue(); // Limit search depth. 4574 4575 SDValue V = SDValue(N, 0); 4576 EVT VT = V.getValueType(); 4577 unsigned Opcode = V.getOpcode(); 4578 4579 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. 4580 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { 4581 int Elt = SV->getMaskElt(Index); 4582 4583 if (Elt < 0) 4584 return DAG.getUNDEF(VT.getVectorElementType()); 4585 4586 unsigned NumElems = VT.getVectorNumElements(); 4587 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0) 4588 : SV->getOperand(1); 4589 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1); 4590 } 4591 4592 // Recurse into target specific vector shuffles to find scalars. 4593 if (isTargetShuffle(Opcode)) { 4594 MVT ShufVT = V.getValueType().getSimpleVT(); 4595 unsigned NumElems = ShufVT.getVectorNumElements(); 4596 SmallVector<int, 16> ShuffleMask; 4597 SDValue ImmN; 4598 bool IsUnary; 4599 4600 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary)) 4601 return SDValue(); 4602 4603 int Elt = ShuffleMask[Index]; 4604 if (Elt < 0) 4605 return DAG.getUNDEF(ShufVT.getVectorElementType()); 4606 4607 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0) 4608 : N->getOperand(1); 4609 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, 4610 Depth+1); 4611 } 4612 4613 // Actual nodes that may contain scalar elements 4614 if (Opcode == ISD::BITCAST) { 4615 V = V.getOperand(0); 4616 EVT SrcVT = V.getValueType(); 4617 unsigned NumElems = VT.getVectorNumElements(); 4618 4619 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) 4620 return SDValue(); 4621 } 4622 4623 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) 4624 return (Index == 0) ? V.getOperand(0) 4625 : DAG.getUNDEF(VT.getVectorElementType()); 4626 4627 if (V.getOpcode() == ISD::BUILD_VECTOR) 4628 return V.getOperand(Index); 4629 4630 return SDValue(); 4631 } 4632 4633 /// getNumOfConsecutiveZeros - Return the number of elements of a vector 4634 /// shuffle operation which come from a consecutively from a zero. The 4635 /// search can start in two different directions, from left or right. 4636 static 4637 unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, unsigned NumElems, 4638 bool ZerosFromLeft, SelectionDAG &DAG) { 4639 unsigned i; 4640 for (i = 0; i != NumElems; ++i) { 4641 unsigned Index = ZerosFromLeft ? i : NumElems-i-1; 4642 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0); 4643 if (!(Elt.getNode() && 4644 (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) 4645 break; 4646 } 4647 4648 return i; 4649 } 4650 4651 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE) 4652 /// correspond consecutively to elements from one of the vector operands, 4653 /// starting from its index OpIdx. Also tell OpNum which source vector operand. 4654 static 4655 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, 4656 unsigned MaskI, unsigned MaskE, unsigned OpIdx, 4657 unsigned NumElems, unsigned &OpNum) { 4658 bool SeenV1 = false; 4659 bool SeenV2 = false; 4660 4661 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) { 4662 int Idx = SVOp->getMaskElt(i); 4663 // Ignore undef indicies 4664 if (Idx < 0) 4665 continue; 4666 4667 if (Idx < (int)NumElems) 4668 SeenV1 = true; 4669 else 4670 SeenV2 = true; 4671 4672 // Only accept consecutive elements from the same vector 4673 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) 4674 return false; 4675 } 4676 4677 OpNum = SeenV1 ? 0 : 1; 4678 return true; 4679 } 4680 4681 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a 4682 /// logical left shift of a vector. 4683 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4684 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4685 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4686 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4687 false /* check zeros from right */, DAG); 4688 unsigned OpSrc; 4689 4690 if (!NumZeros) 4691 return false; 4692 4693 // Considering the elements in the mask that are not consecutive zeros, 4694 // check if they consecutively come from only one of the source vectors. 4695 // 4696 // V1 = {X, A, B, C} 0 4697 // \ \ \ / 4698 // vector_shuffle V1, V2 <1, 2, 3, X> 4699 // 4700 if (!isShuffleMaskConsecutive(SVOp, 4701 0, // Mask Start Index 4702 NumElems-NumZeros, // Mask End Index(exclusive) 4703 NumZeros, // Where to start looking in the src vector 4704 NumElems, // Number of elements in vector 4705 OpSrc)) // Which source operand ? 4706 return false; 4707 4708 isLeft = false; 4709 ShAmt = NumZeros; 4710 ShVal = SVOp->getOperand(OpSrc); 4711 return true; 4712 } 4713 4714 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a 4715 /// logical left shift of a vector. 4716 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4717 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4718 unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); 4719 unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, 4720 true /* check zeros from left */, DAG); 4721 unsigned OpSrc; 4722 4723 if (!NumZeros) 4724 return false; 4725 4726 // Considering the elements in the mask that are not consecutive zeros, 4727 // check if they consecutively come from only one of the source vectors. 4728 // 4729 // 0 { A, B, X, X } = V2 4730 // / \ / / 4731 // vector_shuffle V1, V2 <X, X, 4, 5> 4732 // 4733 if (!isShuffleMaskConsecutive(SVOp, 4734 NumZeros, // Mask Start Index 4735 NumElems, // Mask End Index(exclusive) 4736 0, // Where to start looking in the src vector 4737 NumElems, // Number of elements in vector 4738 OpSrc)) // Which source operand ? 4739 return false; 4740 4741 isLeft = true; 4742 ShAmt = NumZeros; 4743 ShVal = SVOp->getOperand(OpSrc); 4744 return true; 4745 } 4746 4747 /// isVectorShift - Returns true if the shuffle can be implemented as a 4748 /// logical left or right shift of a vector. 4749 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, 4750 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { 4751 // Although the logic below support any bitwidth size, there are no 4752 // shift instructions which handle more than 128-bit vectors. 4753 if (!SVOp->getValueType(0).is128BitVector()) 4754 return false; 4755 4756 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || 4757 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) 4758 return true; 4759 4760 return false; 4761 } 4762 4763 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. 4764 /// 4765 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, 4766 unsigned NumNonZero, unsigned NumZero, 4767 SelectionDAG &DAG, 4768 const X86Subtarget* Subtarget, 4769 const TargetLowering &TLI) { 4770 if (NumNonZero > 8) 4771 return SDValue(); 4772 4773 DebugLoc dl = Op.getDebugLoc(); 4774 SDValue V(0, 0); 4775 bool First = true; 4776 for (unsigned i = 0; i < 16; ++i) { 4777 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; 4778 if (ThisIsNonZero && First) { 4779 if (NumZero) 4780 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4781 else 4782 V = DAG.getUNDEF(MVT::v8i16); 4783 First = false; 4784 } 4785 4786 if ((i & 1) != 0) { 4787 SDValue ThisElt(0, 0), LastElt(0, 0); 4788 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; 4789 if (LastIsNonZero) { 4790 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 4791 MVT::i16, Op.getOperand(i-1)); 4792 } 4793 if (ThisIsNonZero) { 4794 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); 4795 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, 4796 ThisElt, DAG.getConstant(8, MVT::i8)); 4797 if (LastIsNonZero) 4798 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); 4799 } else 4800 ThisElt = LastElt; 4801 4802 if (ThisElt.getNode()) 4803 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, 4804 DAG.getIntPtrConstant(i/2)); 4805 } 4806 } 4807 4808 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V); 4809 } 4810 4811 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. 4812 /// 4813 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, 4814 unsigned NumNonZero, unsigned NumZero, 4815 SelectionDAG &DAG, 4816 const X86Subtarget* Subtarget, 4817 const TargetLowering &TLI) { 4818 if (NumNonZero > 4) 4819 return SDValue(); 4820 4821 DebugLoc dl = Op.getDebugLoc(); 4822 SDValue V(0, 0); 4823 bool First = true; 4824 for (unsigned i = 0; i < 8; ++i) { 4825 bool isNonZero = (NonZeros & (1 << i)) != 0; 4826 if (isNonZero) { 4827 if (First) { 4828 if (NumZero) 4829 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); 4830 else 4831 V = DAG.getUNDEF(MVT::v8i16); 4832 First = false; 4833 } 4834 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 4835 MVT::v8i16, V, Op.getOperand(i), 4836 DAG.getIntPtrConstant(i)); 4837 } 4838 } 4839 4840 return V; 4841 } 4842 4843 /// getVShift - Return a vector logical shift node. 4844 /// 4845 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, 4846 unsigned NumBits, SelectionDAG &DAG, 4847 const TargetLowering &TLI, DebugLoc dl) { 4848 assert(VT.is128BitVector() && "Unknown type for VShift"); 4849 EVT ShVT = MVT::v2i64; 4850 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ; 4851 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp); 4852 return DAG.getNode(ISD::BITCAST, dl, VT, 4853 DAG.getNode(Opc, dl, ShVT, SrcOp, 4854 DAG.getConstant(NumBits, 4855 TLI.getShiftAmountTy(SrcOp.getValueType())))); 4856 } 4857 4858 SDValue 4859 X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 4860 SelectionDAG &DAG) const { 4861 4862 // Check if the scalar load can be widened into a vector load. And if 4863 // the address is "base + cst" see if the cst can be "absorbed" into 4864 // the shuffle mask. 4865 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { 4866 SDValue Ptr = LD->getBasePtr(); 4867 if (!ISD::isNormalLoad(LD) || LD->isVolatile()) 4868 return SDValue(); 4869 EVT PVT = LD->getValueType(0); 4870 if (PVT != MVT::i32 && PVT != MVT::f32) 4871 return SDValue(); 4872 4873 int FI = -1; 4874 int64_t Offset = 0; 4875 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { 4876 FI = FINode->getIndex(); 4877 Offset = 0; 4878 } else if (DAG.isBaseWithConstantOffset(Ptr) && 4879 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 4880 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4881 Offset = Ptr.getConstantOperandVal(1); 4882 Ptr = Ptr.getOperand(0); 4883 } else { 4884 return SDValue(); 4885 } 4886 4887 // FIXME: 256-bit vector instructions don't require a strict alignment, 4888 // improve this code to support it better. 4889 unsigned RequiredAlign = VT.getSizeInBits()/8; 4890 SDValue Chain = LD->getChain(); 4891 // Make sure the stack object alignment is at least 16 or 32. 4892 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4893 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) { 4894 if (MFI->isFixedObjectIndex(FI)) { 4895 // Can't change the alignment. FIXME: It's possible to compute 4896 // the exact stack offset and reference FI + adjust offset instead. 4897 // If someone *really* cares about this. That's the way to implement it. 4898 return SDValue(); 4899 } else { 4900 MFI->setObjectAlignment(FI, RequiredAlign); 4901 } 4902 } 4903 4904 // (Offset % 16 or 32) must be multiple of 4. Then address is then 4905 // Ptr + (Offset & ~15). 4906 if (Offset < 0) 4907 return SDValue(); 4908 if ((Offset % RequiredAlign) & 3) 4909 return SDValue(); 4910 int64_t StartOffset = Offset & ~(RequiredAlign-1); 4911 if (StartOffset) 4912 Ptr = DAG.getNode(ISD::ADD, Ptr.getDebugLoc(), Ptr.getValueType(), 4913 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType())); 4914 4915 int EltNo = (Offset - StartOffset) >> 2; 4916 unsigned NumElems = VT.getVectorNumElements(); 4917 4918 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); 4919 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, 4920 LD->getPointerInfo().getWithOffset(StartOffset), 4921 false, false, false, 0); 4922 4923 SmallVector<int, 8> Mask; 4924 for (unsigned i = 0; i != NumElems; ++i) 4925 Mask.push_back(EltNo); 4926 4927 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]); 4928 } 4929 4930 return SDValue(); 4931 } 4932 4933 /// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a 4934 /// vector of type 'VT', see if the elements can be replaced by a single large 4935 /// load which has the same value as a build_vector whose operands are 'elts'. 4936 /// 4937 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a 4938 /// 4939 /// FIXME: we'd also like to handle the case where the last elements are zero 4940 /// rather than undef via VZEXT_LOAD, but we do not detect that case today. 4941 /// There's even a handy isZeroNode for that purpose. 4942 static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, 4943 DebugLoc &DL, SelectionDAG &DAG) { 4944 EVT EltVT = VT.getVectorElementType(); 4945 unsigned NumElems = Elts.size(); 4946 4947 LoadSDNode *LDBase = NULL; 4948 unsigned LastLoadedElt = -1U; 4949 4950 // For each element in the initializer, see if we've found a load or an undef. 4951 // If we don't find an initial load element, or later load elements are 4952 // non-consecutive, bail out. 4953 for (unsigned i = 0; i < NumElems; ++i) { 4954 SDValue Elt = Elts[i]; 4955 4956 if (!Elt.getNode() || 4957 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode()))) 4958 return SDValue(); 4959 if (!LDBase) { 4960 if (Elt.getNode()->getOpcode() == ISD::UNDEF) 4961 return SDValue(); 4962 LDBase = cast<LoadSDNode>(Elt.getNode()); 4963 LastLoadedElt = i; 4964 continue; 4965 } 4966 if (Elt.getOpcode() == ISD::UNDEF) 4967 continue; 4968 4969 LoadSDNode *LD = cast<LoadSDNode>(Elt); 4970 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i)) 4971 return SDValue(); 4972 LastLoadedElt = i; 4973 } 4974 4975 // If we have found an entire vector of loads and undefs, then return a large 4976 // load of the entire vector width starting at the base pointer. If we found 4977 // consecutive loads for the low half, generate a vzext_load node. 4978 if (LastLoadedElt == NumElems - 1) { 4979 if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16) 4980 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4981 LDBase->getPointerInfo(), 4982 LDBase->isVolatile(), LDBase->isNonTemporal(), 4983 LDBase->isInvariant(), 0); 4984 return DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), 4985 LDBase->getPointerInfo(), 4986 LDBase->isVolatile(), LDBase->isNonTemporal(), 4987 LDBase->isInvariant(), LDBase->getAlignment()); 4988 } 4989 if (NumElems == 4 && LastLoadedElt == 1 && 4990 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) { 4991 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); 4992 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; 4993 SDValue ResNode = 4994 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, 2, MVT::i64, 4995 LDBase->getPointerInfo(), 4996 LDBase->getAlignment(), 4997 false/*isVolatile*/, true/*ReadMem*/, 4998 false/*WriteMem*/); 4999 5000 // Make sure the newly-created LOAD is in the same position as LDBase in 5001 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and 5002 // update uses of LDBase's output chain to use the TokenFactor. 5003 if (LDBase->hasAnyUseOfValue(1)) { 5004 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 5005 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1)); 5006 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain); 5007 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1), 5008 SDValue(ResNode.getNode(), 1)); 5009 } 5010 5011 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); 5012 } 5013 return SDValue(); 5014 } 5015 5016 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction 5017 /// to generate a splat value for the following cases: 5018 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant. 5019 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from 5020 /// a scalar load, or a constant. 5021 /// The VBROADCAST node is returned when a pattern is found, 5022 /// or SDValue() otherwise. 5023 SDValue 5024 X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { 5025 if (!Subtarget->hasAVX()) 5026 return SDValue(); 5027 5028 EVT VT = Op.getValueType(); 5029 DebugLoc dl = Op.getDebugLoc(); 5030 5031 assert((VT.is128BitVector() || VT.is256BitVector()) && 5032 "Unsupported vector type for broadcast."); 5033 5034 SDValue Ld; 5035 bool ConstSplatVal; 5036 5037 switch (Op.getOpcode()) { 5038 default: 5039 // Unknown pattern found. 5040 return SDValue(); 5041 5042 case ISD::BUILD_VECTOR: { 5043 // The BUILD_VECTOR node must be a splat. 5044 if (!isSplatVector(Op.getNode())) 5045 return SDValue(); 5046 5047 Ld = Op.getOperand(0); 5048 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5049 Ld.getOpcode() == ISD::ConstantFP); 5050 5051 // The suspected load node has several users. Make sure that all 5052 // of its users are from the BUILD_VECTOR node. 5053 // Constants may have multiple users. 5054 if (!ConstSplatVal && !Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0)) 5055 return SDValue(); 5056 break; 5057 } 5058 5059 case ISD::VECTOR_SHUFFLE: { 5060 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5061 5062 // Shuffles must have a splat mask where the first element is 5063 // broadcasted. 5064 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0) 5065 return SDValue(); 5066 5067 SDValue Sc = Op.getOperand(0); 5068 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR && 5069 Sc.getOpcode() != ISD::BUILD_VECTOR) { 5070 5071 if (!Subtarget->hasAVX2()) 5072 return SDValue(); 5073 5074 // Use the register form of the broadcast instruction available on AVX2. 5075 if (VT.is256BitVector()) 5076 Sc = Extract128BitVector(Sc, 0, DAG, dl); 5077 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc); 5078 } 5079 5080 Ld = Sc.getOperand(0); 5081 ConstSplatVal = (Ld.getOpcode() == ISD::Constant || 5082 Ld.getOpcode() == ISD::ConstantFP); 5083 5084 // The scalar_to_vector node and the suspected 5085 // load node must have exactly one user. 5086 // Constants may have multiple users. 5087 if (!ConstSplatVal && (!Sc.hasOneUse() || !Ld.hasOneUse())) 5088 return SDValue(); 5089 break; 5090 } 5091 } 5092 5093 bool Is256 = VT.is256BitVector(); 5094 5095 // Handle the broadcasting a single constant scalar from the constant pool 5096 // into a vector. On Sandybridge it is still better to load a constant vector 5097 // from the constant pool and not to broadcast it from a scalar. 5098 if (ConstSplatVal && Subtarget->hasAVX2()) { 5099 EVT CVT = Ld.getValueType(); 5100 assert(!CVT.isVector() && "Must not broadcast a vector type"); 5101 unsigned ScalarSize = CVT.getSizeInBits(); 5102 5103 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) { 5104 const Constant *C = 0; 5105 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld)) 5106 C = CI->getConstantIntValue(); 5107 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld)) 5108 C = CF->getConstantFPValue(); 5109 5110 assert(C && "Invalid constant type"); 5111 5112 SDValue CP = DAG.getConstantPool(C, getPointerTy()); 5113 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); 5114 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP, 5115 MachinePointerInfo::getConstantPool(), 5116 false, false, false, Alignment); 5117 5118 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5119 } 5120 } 5121 5122 bool IsLoad = ISD::isNormalLoad(Ld.getNode()); 5123 unsigned ScalarSize = Ld.getValueType().getSizeInBits(); 5124 5125 // Handle AVX2 in-register broadcasts. 5126 if (!IsLoad && Subtarget->hasAVX2() && 5127 (ScalarSize == 32 || (Is256 && ScalarSize == 64))) 5128 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5129 5130 // The scalar source must be a normal load. 5131 if (!IsLoad) 5132 return SDValue(); 5133 5134 if (ScalarSize == 32 || (Is256 && ScalarSize == 64)) 5135 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5136 5137 // The integer check is needed for the 64-bit into 128-bit so it doesn't match 5138 // double since there is no vbroadcastsd xmm 5139 if (Subtarget->hasAVX2() && Ld.getValueType().isInteger()) { 5140 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64) 5141 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); 5142 } 5143 5144 // Unsupported broadcast. 5145 return SDValue(); 5146 } 5147 5148 // LowerVectorFpExtend - Recognize the scalarized FP_EXTEND from v2f32 to v2f64 5149 // and convert it into X86ISD::VFPEXT due to the current ISD::FP_EXTEND has the 5150 // constraint of matching input/output vector elements. 5151 SDValue 5152 X86TargetLowering::LowerVectorFpExtend(SDValue &Op, SelectionDAG &DAG) const { 5153 DebugLoc DL = Op.getDebugLoc(); 5154 SDNode *N = Op.getNode(); 5155 EVT VT = Op.getValueType(); 5156 unsigned NumElts = Op.getNumOperands(); 5157 5158 // Check supported types and sub-targets. 5159 // 5160 // Only v2f32 -> v2f64 needs special handling. 5161 if (VT != MVT::v2f64 || !Subtarget->hasSSE2()) 5162 return SDValue(); 5163 5164 SDValue VecIn; 5165 EVT VecInVT; 5166 SmallVector<int, 8> Mask; 5167 EVT SrcVT = MVT::Other; 5168 5169 // Check the patterns could be translated into X86vfpext. 5170 for (unsigned i = 0; i < NumElts; ++i) { 5171 SDValue In = N->getOperand(i); 5172 unsigned Opcode = In.getOpcode(); 5173 5174 // Skip if the element is undefined. 5175 if (Opcode == ISD::UNDEF) { 5176 Mask.push_back(-1); 5177 continue; 5178 } 5179 5180 // Quit if one of the elements is not defined from 'fpext'. 5181 if (Opcode != ISD::FP_EXTEND) 5182 return SDValue(); 5183 5184 // Check how the source of 'fpext' is defined. 5185 SDValue L2In = In.getOperand(0); 5186 EVT L2InVT = L2In.getValueType(); 5187 5188 // Check the original type 5189 if (SrcVT == MVT::Other) 5190 SrcVT = L2InVT; 5191 else if (SrcVT != L2InVT) // Quit if non-homogenous typed. 5192 return SDValue(); 5193 5194 // Check whether the value being 'fpext'ed is extracted from the same 5195 // source. 5196 Opcode = L2In.getOpcode(); 5197 5198 // Quit if it's not extracted with a constant index. 5199 if (Opcode != ISD::EXTRACT_VECTOR_ELT || 5200 !isa<ConstantSDNode>(L2In.getOperand(1))) 5201 return SDValue(); 5202 5203 SDValue ExtractedFromVec = L2In.getOperand(0); 5204 5205 if (VecIn.getNode() == 0) { 5206 VecIn = ExtractedFromVec; 5207 VecInVT = ExtractedFromVec.getValueType(); 5208 } else if (VecIn != ExtractedFromVec) // Quit if built from more than 1 vec. 5209 return SDValue(); 5210 5211 Mask.push_back(cast<ConstantSDNode>(L2In.getOperand(1))->getZExtValue()); 5212 } 5213 5214 // Quit if all operands of BUILD_VECTOR are undefined. 5215 if (!VecIn.getNode()) 5216 return SDValue(); 5217 5218 // Fill the remaining mask as undef. 5219 for (unsigned i = NumElts; i < VecInVT.getVectorNumElements(); ++i) 5220 Mask.push_back(-1); 5221 5222 return DAG.getNode(X86ISD::VFPEXT, DL, VT, 5223 DAG.getVectorShuffle(VecInVT, DL, 5224 VecIn, DAG.getUNDEF(VecInVT), 5225 &Mask[0])); 5226 } 5227 5228 SDValue 5229 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { 5230 DebugLoc dl = Op.getDebugLoc(); 5231 5232 EVT VT = Op.getValueType(); 5233 EVT ExtVT = VT.getVectorElementType(); 5234 unsigned NumElems = Op.getNumOperands(); 5235 5236 // Vectors containing all zeros can be matched by pxor and xorps later 5237 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 5238 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd 5239 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts. 5240 if (VT == MVT::v4i32 || VT == MVT::v8i32) 5241 return Op; 5242 5243 return getZeroVector(VT, Subtarget, DAG, dl); 5244 } 5245 5246 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width 5247 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use 5248 // vpcmpeqd on 256-bit vectors. 5249 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 5250 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasAVX2())) 5251 return Op; 5252 5253 return getOnesVector(VT, Subtarget->hasAVX2(), DAG, dl); 5254 } 5255 5256 SDValue Broadcast = LowerVectorBroadcast(Op, DAG); 5257 if (Broadcast.getNode()) 5258 return Broadcast; 5259 5260 SDValue FpExt = LowerVectorFpExtend(Op, DAG); 5261 if (FpExt.getNode()) 5262 return FpExt; 5263 5264 unsigned EVTBits = ExtVT.getSizeInBits(); 5265 5266 unsigned NumZero = 0; 5267 unsigned NumNonZero = 0; 5268 unsigned NonZeros = 0; 5269 bool IsAllConstants = true; 5270 SmallSet<SDValue, 8> Values; 5271 for (unsigned i = 0; i < NumElems; ++i) { 5272 SDValue Elt = Op.getOperand(i); 5273 if (Elt.getOpcode() == ISD::UNDEF) 5274 continue; 5275 Values.insert(Elt); 5276 if (Elt.getOpcode() != ISD::Constant && 5277 Elt.getOpcode() != ISD::ConstantFP) 5278 IsAllConstants = false; 5279 if (X86::isZeroNode(Elt)) 5280 NumZero++; 5281 else { 5282 NonZeros |= (1 << i); 5283 NumNonZero++; 5284 } 5285 } 5286 5287 // All undef vector. Return an UNDEF. All zero vectors were handled above. 5288 if (NumNonZero == 0) 5289 return DAG.getUNDEF(VT); 5290 5291 // Special case for single non-zero, non-undef, element. 5292 if (NumNonZero == 1) { 5293 unsigned Idx = CountTrailingZeros_32(NonZeros); 5294 SDValue Item = Op.getOperand(Idx); 5295 5296 // If this is an insertion of an i64 value on x86-32, and if the top bits of 5297 // the value are obviously zero, truncate the value to i32 and do the 5298 // insertion that way. Only do this if the value is non-constant or if the 5299 // value is a constant being inserted into element 0. It is cheaper to do 5300 // a constant pool load than it is to do a movd + shuffle. 5301 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() && 5302 (!IsAllConstants || Idx == 0)) { 5303 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) { 5304 // Handle SSE only. 5305 assert(VT == MVT::v2i64 && "Expected an SSE value type!"); 5306 EVT VecVT = MVT::v4i32; 5307 unsigned VecElts = 4; 5308 5309 // Truncate the value (which may itself be a constant) to i32, and 5310 // convert it to a vector with movd (S2V+shuffle to zero extend). 5311 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item); 5312 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item); 5313 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5314 5315 // Now we have our 32-bit value zero extended in the low element of 5316 // a vector. If Idx != 0, swizzle it into place. 5317 if (Idx != 0) { 5318 SmallVector<int, 4> Mask; 5319 Mask.push_back(Idx); 5320 for (unsigned i = 1; i != VecElts; ++i) 5321 Mask.push_back(i); 5322 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT), 5323 &Mask[0]); 5324 } 5325 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5326 } 5327 } 5328 5329 // If we have a constant or non-constant insertion into the low element of 5330 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into 5331 // the rest of the elements. This will be matched as movd/movq/movss/movsd 5332 // depending on what the source datatype is. 5333 if (Idx == 0) { 5334 if (NumZero == 0) 5335 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5336 5337 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 || 5338 (ExtVT == MVT::i64 && Subtarget->is64Bit())) { 5339 if (VT.is256BitVector()) { 5340 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl); 5341 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec, 5342 Item, DAG.getIntPtrConstant(0)); 5343 } 5344 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5345 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5346 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. 5347 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5348 } 5349 5350 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) { 5351 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); 5352 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); 5353 if (VT.is256BitVector()) { 5354 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl); 5355 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl); 5356 } else { 5357 assert(VT.is128BitVector() && "Expected an SSE value type!"); 5358 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 5359 } 5360 return DAG.getNode(ISD::BITCAST, dl, VT, Item); 5361 } 5362 } 5363 5364 // Is it a vector logical left shift? 5365 if (NumElems == 2 && Idx == 1 && 5366 X86::isZeroNode(Op.getOperand(0)) && 5367 !X86::isZeroNode(Op.getOperand(1))) { 5368 unsigned NumBits = VT.getSizeInBits(); 5369 return getVShift(true, VT, 5370 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 5371 VT, Op.getOperand(1)), 5372 NumBits/2, DAG, *this, dl); 5373 } 5374 5375 if (IsAllConstants) // Otherwise, it's better to do a constpool load. 5376 return SDValue(); 5377 5378 // Otherwise, if this is a vector with i32 or f32 elements, and the element 5379 // is a non-constant being inserted into an element other than the low one, 5380 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka 5381 // movd/movss) to move this into the low element, then shuffle it into 5382 // place. 5383 if (EVTBits == 32) { 5384 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); 5385 5386 // Turn it into a shuffle of zero and zero-extended scalar to vector. 5387 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG); 5388 SmallVector<int, 8> MaskVec; 5389 for (unsigned i = 0; i != NumElems; ++i) 5390 MaskVec.push_back(i == Idx ? 0 : 1); 5391 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]); 5392 } 5393 } 5394 5395 // Splat is obviously ok. Let legalizer expand it to a shuffle. 5396 if (Values.size() == 1) { 5397 if (EVTBits == 32) { 5398 // Instead of a shuffle like this: 5399 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> 5400 // Check if it's possible to issue this instead. 5401 // shuffle (vload ptr)), undef, <1, 1, 1, 1> 5402 unsigned Idx = CountTrailingZeros_32(NonZeros); 5403 SDValue Item = Op.getOperand(Idx); 5404 if (Op.getNode()->isOnlyUserOf(Item.getNode())) 5405 return LowerAsSplatVectorLoad(Item, VT, dl, DAG); 5406 } 5407 return SDValue(); 5408 } 5409 5410 // A vector full of immediates; various special cases are already 5411 // handled, so this is best done with a single constant-pool load. 5412 if (IsAllConstants) 5413 return SDValue(); 5414 5415 // For AVX-length vectors, build the individual 128-bit pieces and use 5416 // shuffles to put them in place. 5417 if (VT.is256BitVector()) { 5418 SmallVector<SDValue, 32> V; 5419 for (unsigned i = 0; i != NumElems; ++i) 5420 V.push_back(Op.getOperand(i)); 5421 5422 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2); 5423 5424 // Build both the lower and upper subvector. 5425 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[0], NumElems/2); 5426 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT, &V[NumElems / 2], 5427 NumElems/2); 5428 5429 // Recreate the wider vector with the lower and upper part. 5430 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl); 5431 } 5432 5433 // Let legalizer expand 2-wide build_vectors. 5434 if (EVTBits == 64) { 5435 if (NumNonZero == 1) { 5436 // One half is zero or undef. 5437 unsigned Idx = CountTrailingZeros_32(NonZeros); 5438 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, 5439 Op.getOperand(Idx)); 5440 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG); 5441 } 5442 return SDValue(); 5443 } 5444 5445 // If element VT is < 32 bits, convert it to inserts into a zero vector. 5446 if (EVTBits == 8 && NumElems == 16) { 5447 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, 5448 Subtarget, *this); 5449 if (V.getNode()) return V; 5450 } 5451 5452 if (EVTBits == 16 && NumElems == 8) { 5453 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, 5454 Subtarget, *this); 5455 if (V.getNode()) return V; 5456 } 5457 5458 // If element VT is == 32 bits, turn it into a number of shuffles. 5459 SmallVector<SDValue, 8> V(NumElems); 5460 if (NumElems == 4 && NumZero > 0) { 5461 for (unsigned i = 0; i < 4; ++i) { 5462 bool isZero = !(NonZeros & (1 << i)); 5463 if (isZero) 5464 V[i] = getZeroVector(VT, Subtarget, DAG, dl); 5465 else 5466 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5467 } 5468 5469 for (unsigned i = 0; i < 2; ++i) { 5470 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) { 5471 default: break; 5472 case 0: 5473 V[i] = V[i*2]; // Must be a zero vector. 5474 break; 5475 case 1: 5476 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]); 5477 break; 5478 case 2: 5479 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]); 5480 break; 5481 case 3: 5482 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]); 5483 break; 5484 } 5485 } 5486 5487 bool Reverse1 = (NonZeros & 0x3) == 2; 5488 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2; 5489 int MaskVec[] = { 5490 Reverse1 ? 1 : 0, 5491 Reverse1 ? 0 : 1, 5492 static_cast<int>(Reverse2 ? NumElems+1 : NumElems), 5493 static_cast<int>(Reverse2 ? NumElems : NumElems+1) 5494 }; 5495 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]); 5496 } 5497 5498 if (Values.size() > 1 && VT.is128BitVector()) { 5499 // Check for a build vector of consecutive loads. 5500 for (unsigned i = 0; i < NumElems; ++i) 5501 V[i] = Op.getOperand(i); 5502 5503 // Check for elements which are consecutive loads. 5504 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG); 5505 if (LD.getNode()) 5506 return LD; 5507 5508 // For SSE 4.1, use insertps to put the high elements into the low element. 5509 if (getSubtarget()->hasSSE41()) { 5510 SDValue Result; 5511 if (Op.getOperand(0).getOpcode() != ISD::UNDEF) 5512 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); 5513 else 5514 Result = DAG.getUNDEF(VT); 5515 5516 for (unsigned i = 1; i < NumElems; ++i) { 5517 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; 5518 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, 5519 Op.getOperand(i), DAG.getIntPtrConstant(i)); 5520 } 5521 return Result; 5522 } 5523 5524 // Otherwise, expand into a number of unpckl*, start by extending each of 5525 // our (non-undef) elements to the full vector width with the element in the 5526 // bottom slot of the vector (which generates no code for SSE). 5527 for (unsigned i = 0; i < NumElems; ++i) { 5528 if (Op.getOperand(i).getOpcode() != ISD::UNDEF) 5529 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); 5530 else 5531 V[i] = DAG.getUNDEF(VT); 5532 } 5533 5534 // Next, we iteratively mix elements, e.g. for v4f32: 5535 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> 5536 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> 5537 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> 5538 unsigned EltStride = NumElems >> 1; 5539 while (EltStride != 0) { 5540 for (unsigned i = 0; i < EltStride; ++i) { 5541 // If V[i+EltStride] is undef and this is the first round of mixing, 5542 // then it is safe to just drop this shuffle: V[i] is already in the 5543 // right place, the one element (since it's the first round) being 5544 // inserted as undef can be dropped. This isn't safe for successive 5545 // rounds because they will permute elements within both vectors. 5546 if (V[i+EltStride].getOpcode() == ISD::UNDEF && 5547 EltStride == NumElems/2) 5548 continue; 5549 5550 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); 5551 } 5552 EltStride >>= 1; 5553 } 5554 return V[0]; 5555 } 5556 return SDValue(); 5557 } 5558 5559 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction 5560 // to create 256-bit vectors from two other 128-bit ones. 5561 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5562 DebugLoc dl = Op.getDebugLoc(); 5563 EVT ResVT = Op.getValueType(); 5564 5565 assert(ResVT.is256BitVector() && "Value type must be 256-bit wide"); 5566 5567 SDValue V1 = Op.getOperand(0); 5568 SDValue V2 = Op.getOperand(1); 5569 unsigned NumElems = ResVT.getVectorNumElements(); 5570 5571 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl); 5572 } 5573 5574 SDValue 5575 X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { 5576 assert(Op.getNumOperands() == 2); 5577 5578 // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors 5579 // from two other 128-bit ones. 5580 return LowerAVXCONCAT_VECTORS(Op, DAG); 5581 } 5582 5583 // Try to lower a shuffle node into a simple blend instruction. 5584 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, 5585 const X86Subtarget *Subtarget, 5586 SelectionDAG &DAG) { 5587 SDValue V1 = SVOp->getOperand(0); 5588 SDValue V2 = SVOp->getOperand(1); 5589 DebugLoc dl = SVOp->getDebugLoc(); 5590 MVT VT = SVOp->getValueType(0).getSimpleVT(); 5591 unsigned NumElems = VT.getVectorNumElements(); 5592 5593 if (!Subtarget->hasSSE41()) 5594 return SDValue(); 5595 5596 unsigned ISDNo = 0; 5597 MVT OpTy; 5598 5599 switch (VT.SimpleTy) { 5600 default: return SDValue(); 5601 case MVT::v8i16: 5602 ISDNo = X86ISD::BLENDPW; 5603 OpTy = MVT::v8i16; 5604 break; 5605 case MVT::v4i32: 5606 case MVT::v4f32: 5607 ISDNo = X86ISD::BLENDPS; 5608 OpTy = MVT::v4f32; 5609 break; 5610 case MVT::v2i64: 5611 case MVT::v2f64: 5612 ISDNo = X86ISD::BLENDPD; 5613 OpTy = MVT::v2f64; 5614 break; 5615 case MVT::v8i32: 5616 case MVT::v8f32: 5617 if (!Subtarget->hasAVX()) 5618 return SDValue(); 5619 ISDNo = X86ISD::BLENDPS; 5620 OpTy = MVT::v8f32; 5621 break; 5622 case MVT::v4i64: 5623 case MVT::v4f64: 5624 if (!Subtarget->hasAVX()) 5625 return SDValue(); 5626 ISDNo = X86ISD::BLENDPD; 5627 OpTy = MVT::v4f64; 5628 break; 5629 } 5630 assert(ISDNo && "Invalid Op Number"); 5631 5632 unsigned MaskVals = 0; 5633 5634 for (unsigned i = 0; i != NumElems; ++i) { 5635 int EltIdx = SVOp->getMaskElt(i); 5636 if (EltIdx == (int)i || EltIdx < 0) 5637 MaskVals |= (1<<i); 5638 else if (EltIdx == (int)(i + NumElems)) 5639 continue; // Bit is set to zero; 5640 else 5641 return SDValue(); 5642 } 5643 5644 V1 = DAG.getNode(ISD::BITCAST, dl, OpTy, V1); 5645 V2 = DAG.getNode(ISD::BITCAST, dl, OpTy, V2); 5646 SDValue Ret = DAG.getNode(ISDNo, dl, OpTy, V1, V2, 5647 DAG.getConstant(MaskVals, MVT::i32)); 5648 return DAG.getNode(ISD::BITCAST, dl, VT, Ret); 5649 } 5650 5651 // v8i16 shuffles - Prefer shuffles in the following order: 5652 // 1. [all] pshuflw, pshufhw, optional move 5653 // 2. [ssse3] 1 x pshufb 5654 // 3. [ssse3] 2 x pshufb + 1 x por 5655 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) 5656 SDValue 5657 X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, 5658 SelectionDAG &DAG) const { 5659 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 5660 SDValue V1 = SVOp->getOperand(0); 5661 SDValue V2 = SVOp->getOperand(1); 5662 DebugLoc dl = SVOp->getDebugLoc(); 5663 SmallVector<int, 8> MaskVals; 5664 5665 // Determine if more than 1 of the words in each of the low and high quadwords 5666 // of the result come from the same quadword of one of the two inputs. Undef 5667 // mask values count as coming from any quadword, for better codegen. 5668 unsigned LoQuad[] = { 0, 0, 0, 0 }; 5669 unsigned HiQuad[] = { 0, 0, 0, 0 }; 5670 std::bitset<4> InputQuads; 5671 for (unsigned i = 0; i < 8; ++i) { 5672 unsigned *Quad = i < 4 ? LoQuad : HiQuad; 5673 int EltIdx = SVOp->getMaskElt(i); 5674 MaskVals.push_back(EltIdx); 5675 if (EltIdx < 0) { 5676 ++Quad[0]; 5677 ++Quad[1]; 5678 ++Quad[2]; 5679 ++Quad[3]; 5680 continue; 5681 } 5682 ++Quad[EltIdx / 4]; 5683 InputQuads.set(EltIdx / 4); 5684 } 5685 5686 int BestLoQuad = -1; 5687 unsigned MaxQuad = 1; 5688 for (unsigned i = 0; i < 4; ++i) { 5689 if (LoQuad[i] > MaxQuad) { 5690 BestLoQuad = i; 5691 MaxQuad = LoQuad[i]; 5692 } 5693 } 5694 5695 int BestHiQuad = -1; 5696 MaxQuad = 1; 5697 for (unsigned i = 0; i < 4; ++i) { 5698 if (HiQuad[i] > MaxQuad) { 5699 BestHiQuad = i; 5700 MaxQuad = HiQuad[i]; 5701 } 5702 } 5703 5704 // For SSSE3, If all 8 words of the result come from only 1 quadword of each 5705 // of the two input vectors, shuffle them into one input vector so only a 5706 // single pshufb instruction is necessary. If There are more than 2 input 5707 // quads, disable the next transformation since it does not help SSSE3. 5708 bool V1Used = InputQuads[0] || InputQuads[1]; 5709 bool V2Used = InputQuads[2] || InputQuads[3]; 5710 if (Subtarget->hasSSSE3()) { 5711 if (InputQuads.count() == 2 && V1Used && V2Used) { 5712 BestLoQuad = InputQuads[0] ? 0 : 1; 5713 BestHiQuad = InputQuads[2] ? 2 : 3; 5714 } 5715 if (InputQuads.count() > 2) { 5716 BestLoQuad = -1; 5717 BestHiQuad = -1; 5718 } 5719 } 5720 5721 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update 5722 // the shuffle mask. If a quad is scored as -1, that means that it contains 5723 // words from all 4 input quadwords. 5724 SDValue NewV; 5725 if (BestLoQuad >= 0 || BestHiQuad >= 0) { 5726 int MaskV[] = { 5727 BestLoQuad < 0 ? 0 : BestLoQuad, 5728 BestHiQuad < 0 ? 1 : BestHiQuad 5729 }; 5730 NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 5731 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1), 5732 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]); 5733 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV); 5734 5735 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the 5736 // source words for the shuffle, to aid later transformations. 5737 bool AllWordsInNewV = true; 5738 bool InOrder[2] = { true, true }; 5739 for (unsigned i = 0; i != 8; ++i) { 5740 int idx = MaskVals[i]; 5741 if (idx != (int)i) 5742 InOrder[i/4] = false; 5743 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad) 5744 continue; 5745 AllWordsInNewV = false; 5746 break; 5747 } 5748 5749 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV; 5750 if (AllWordsInNewV) { 5751 for (int i = 0; i != 8; ++i) { 5752 int idx = MaskVals[i]; 5753 if (idx < 0) 5754 continue; 5755 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4; 5756 if ((idx != i) && idx < 4) 5757 pshufhw = false; 5758 if ((idx != i) && idx > 3) 5759 pshuflw = false; 5760 } 5761 V1 = NewV; 5762 V2Used = false; 5763 BestLoQuad = 0; 5764 BestHiQuad = 1; 5765 } 5766 5767 // If we've eliminated the use of V2, and the new mask is a pshuflw or 5768 // pshufhw, that's as cheap as it gets. Return the new shuffle. 5769 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { 5770 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; 5771 unsigned TargetMask = 0; 5772 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 5773 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); 5774 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode()); 5775 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp): 5776 getShufflePSHUFLWImmediate(SVOp); 5777 V1 = NewV.getOperand(0); 5778 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); 5779 } 5780 } 5781 5782 // If we have SSSE3, and all words of the result are from 1 input vector, 5783 // case 2 is generated, otherwise case 3 is generated. If no SSSE3 5784 // is present, fall back to case 4. 5785 if (Subtarget->hasSSSE3()) { 5786 SmallVector<SDValue,16> pshufbMask; 5787 5788 // If we have elements from both input vectors, set the high bit of the 5789 // shuffle mask element to zero out elements that come from V2 in the V1 5790 // mask, and elements that come from V1 in the V2 mask, so that the two 5791 // results can be OR'd together. 5792 bool TwoInputs = V1Used && V2Used; 5793 for (unsigned i = 0; i != 8; ++i) { 5794 int EltIdx = MaskVals[i] * 2; 5795 int Idx0 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx; 5796 int Idx1 = (TwoInputs && (EltIdx >= 16)) ? 0x80 : EltIdx+1; 5797 pshufbMask.push_back(DAG.getConstant(Idx0, MVT::i8)); 5798 pshufbMask.push_back(DAG.getConstant(Idx1, MVT::i8)); 5799 } 5800 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1); 5801 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 5802 DAG.getNode(ISD::BUILD_VECTOR, dl, 5803 MVT::v16i8, &pshufbMask[0], 16)); 5804 if (!TwoInputs) 5805 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 5806 5807 // Calculate the shuffle mask for the second input, shuffle it, and 5808 // OR it with the first shuffled input. 5809 pshufbMask.clear(); 5810 for (unsigned i = 0; i != 8; ++i) { 5811 int EltIdx = MaskVals[i] * 2; 5812 int Idx0 = (EltIdx < 16) ? 0x80 : EltIdx - 16; 5813 int Idx1 = (EltIdx < 16) ? 0x80 : EltIdx - 15;