1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the TargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Target/TargetLowering.h" 15 #include "llvm/MC/MCAsmInfo.h" 16 #include "llvm/MC/MCExpr.h" 17 #include "llvm/Target/TargetData.h" 18 #include "llvm/Target/TargetLoweringObjectFile.h" 19 #include "llvm/Target/TargetMachine.h" 20 #include "llvm/Target/TargetRegisterInfo.h" 21 #include "llvm/GlobalVariable.h" 22 #include "llvm/DerivedTypes.h" 23 #include "llvm/CodeGen/Analysis.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineJumpTableInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/SelectionDAG.h" 28 #include "llvm/ADT/STLExtras.h" 29 #include "llvm/Support/CommandLine.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/MathExtras.h" 32 #include <cctype> 33 using namespace llvm; 34 35 /// We are in the process of implementing a new TypeLegalization action 36 /// - the promotion of vector elements. This feature is disabled by default 37 /// and only enabled using this flag. 38 static cl::opt<bool> 39 AllowPromoteIntElem("promote-elements", cl::Hidden, 40 cl::desc("Allow promotion of integer vector element types")); 41 42 namespace llvm { 43 TLSModel::Model getTLSModel(const GlobalValue *GV, Reloc::Model reloc) { 44 bool isLocal = GV->hasLocalLinkage(); 45 bool isDeclaration = GV->isDeclaration(); 46 // FIXME: what should we do for protected and internal visibility? 47 // For variables, is internal different from hidden? 48 bool isHidden = GV->hasHiddenVisibility(); 49 50 if (reloc == Reloc::PIC_) { 51 if (isLocal || isHidden) 52 return TLSModel::LocalDynamic; 53 else 54 return TLSModel::GeneralDynamic; 55 } else { 56 if (!isDeclaration || isHidden) 57 return TLSModel::LocalExec; 58 else 59 return TLSModel::InitialExec; 60 } 61 } 62 } 63 64 /// InitLibcallNames - Set default libcall names. 65 /// 66 static void InitLibcallNames(const char **Names) { 67 Names[RTLIB::SHL_I16] = "__ashlhi3"; 68 Names[RTLIB::SHL_I32] = "__ashlsi3"; 69 Names[RTLIB::SHL_I64] = "__ashldi3"; 70 Names[RTLIB::SHL_I128] = "__ashlti3"; 71 Names[RTLIB::SRL_I16] = "__lshrhi3"; 72 Names[RTLIB::SRL_I32] = "__lshrsi3"; 73 Names[RTLIB::SRL_I64] = "__lshrdi3"; 74 Names[RTLIB::SRL_I128] = "__lshrti3"; 75 Names[RTLIB::SRA_I16] = "__ashrhi3"; 76 Names[RTLIB::SRA_I32] = "__ashrsi3"; 77 Names[RTLIB::SRA_I64] = "__ashrdi3"; 78 Names[RTLIB::SRA_I128] = "__ashrti3"; 79 Names[RTLIB::MUL_I8] = "__mulqi3"; 80 Names[RTLIB::MUL_I16] = "__mulhi3"; 81 Names[RTLIB::MUL_I32] = "__mulsi3"; 82 Names[RTLIB::MUL_I64] = "__muldi3"; 83 Names[RTLIB::MUL_I128] = "__multi3"; 84 Names[RTLIB::MULO_I32] = "__mulosi4"; 85 Names[RTLIB::MULO_I64] = "__mulodi4"; 86 Names[RTLIB::MULO_I128] = "__muloti4"; 87 Names[RTLIB::SDIV_I8] = "__divqi3"; 88 Names[RTLIB::SDIV_I16] = "__divhi3"; 89 Names[RTLIB::SDIV_I32] = "__divsi3"; 90 Names[RTLIB::SDIV_I64] = "__divdi3"; 91 Names[RTLIB::SDIV_I128] = "__divti3"; 92 Names[RTLIB::UDIV_I8] = "__udivqi3"; 93 Names[RTLIB::UDIV_I16] = "__udivhi3"; 94 Names[RTLIB::UDIV_I32] = "__udivsi3"; 95 Names[RTLIB::UDIV_I64] = "__udivdi3"; 96 Names[RTLIB::UDIV_I128] = "__udivti3"; 97 Names[RTLIB::SREM_I8] = "__modqi3"; 98 Names[RTLIB::SREM_I16] = "__modhi3"; 99 Names[RTLIB::SREM_I32] = "__modsi3"; 100 Names[RTLIB::SREM_I64] = "__moddi3"; 101 Names[RTLIB::SREM_I128] = "__modti3"; 102 Names[RTLIB::UREM_I8] = "__umodqi3"; 103 Names[RTLIB::UREM_I16] = "__umodhi3"; 104 Names[RTLIB::UREM_I32] = "__umodsi3"; 105 Names[RTLIB::UREM_I64] = "__umoddi3"; 106 Names[RTLIB::UREM_I128] = "__umodti3"; 107 108 // These are generally not available. 109 Names[RTLIB::SDIVREM_I8] = 0; 110 Names[RTLIB::SDIVREM_I16] = 0; 111 Names[RTLIB::SDIVREM_I32] = 0; 112 Names[RTLIB::SDIVREM_I64] = 0; 113 Names[RTLIB::SDIVREM_I128] = 0; 114 Names[RTLIB::UDIVREM_I8] = 0; 115 Names[RTLIB::UDIVREM_I16] = 0; 116 Names[RTLIB::UDIVREM_I32] = 0; 117 Names[RTLIB::UDIVREM_I64] = 0; 118 Names[RTLIB::UDIVREM_I128] = 0; 119 120 Names[RTLIB::NEG_I32] = "__negsi2"; 121 Names[RTLIB::NEG_I64] = "__negdi2"; 122 Names[RTLIB::ADD_F32] = "__addsf3"; 123 Names[RTLIB::ADD_F64] = "__adddf3"; 124 Names[RTLIB::ADD_F80] = "__addxf3"; 125 Names[RTLIB::ADD_PPCF128] = "__gcc_qadd"; 126 Names[RTLIB::SUB_F32] = "__subsf3"; 127 Names[RTLIB::SUB_F64] = "__subdf3"; 128 Names[RTLIB::SUB_F80] = "__subxf3"; 129 Names[RTLIB::SUB_PPCF128] = "__gcc_qsub"; 130 Names[RTLIB::MUL_F32] = "__mulsf3"; 131 Names[RTLIB::MUL_F64] = "__muldf3"; 132 Names[RTLIB::MUL_F80] = "__mulxf3"; 133 Names[RTLIB::MUL_PPCF128] = "__gcc_qmul"; 134 Names[RTLIB::DIV_F32] = "__divsf3"; 135 Names[RTLIB::DIV_F64] = "__divdf3"; 136 Names[RTLIB::DIV_F80] = "__divxf3"; 137 Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv"; 138 Names[RTLIB::REM_F32] = "fmodf"; 139 Names[RTLIB::REM_F64] = "fmod"; 140 Names[RTLIB::REM_F80] = "fmodl"; 141 Names[RTLIB::REM_PPCF128] = "fmodl"; 142 Names[RTLIB::FMA_F32] = "fmaf"; 143 Names[RTLIB::FMA_F64] = "fma"; 144 Names[RTLIB::FMA_F80] = "fmal"; 145 Names[RTLIB::FMA_PPCF128] = "fmal"; 146 Names[RTLIB::POWI_F32] = "__powisf2"; 147 Names[RTLIB::POWI_F64] = "__powidf2"; 148 Names[RTLIB::POWI_F80] = "__powixf2"; 149 Names[RTLIB::POWI_PPCF128] = "__powitf2"; 150 Names[RTLIB::SQRT_F32] = "sqrtf"; 151 Names[RTLIB::SQRT_F64] = "sqrt"; 152 Names[RTLIB::SQRT_F80] = "sqrtl"; 153 Names[RTLIB::SQRT_PPCF128] = "sqrtl"; 154 Names[RTLIB::LOG_F32] = "logf"; 155 Names[RTLIB::LOG_F64] = "log"; 156 Names[RTLIB::LOG_F80] = "logl"; 157 Names[RTLIB::LOG_PPCF128] = "logl"; 158 Names[RTLIB::LOG2_F32] = "log2f"; 159 Names[RTLIB::LOG2_F64] = "log2"; 160 Names[RTLIB::LOG2_F80] = "log2l"; 161 Names[RTLIB::LOG2_PPCF128] = "log2l"; 162 Names[RTLIB::LOG10_F32] = "log10f"; 163 Names[RTLIB::LOG10_F64] = "log10"; 164 Names[RTLIB::LOG10_F80] = "log10l"; 165 Names[RTLIB::LOG10_PPCF128] = "log10l"; 166 Names[RTLIB::EXP_F32] = "expf"; 167 Names[RTLIB::EXP_F64] = "exp"; 168 Names[RTLIB::EXP_F80] = "expl"; 169 Names[RTLIB::EXP_PPCF128] = "expl"; 170 Names[RTLIB::EXP2_F32] = "exp2f"; 171 Names[RTLIB::EXP2_F64] = "exp2"; 172 Names[RTLIB::EXP2_F80] = "exp2l"; 173 Names[RTLIB::EXP2_PPCF128] = "exp2l"; 174 Names[RTLIB::SIN_F32] = "sinf"; 175 Names[RTLIB::SIN_F64] = "sin"; 176 Names[RTLIB::SIN_F80] = "sinl"; 177 Names[RTLIB::SIN_PPCF128] = "sinl"; 178 Names[RTLIB::COS_F32] = "cosf"; 179 Names[RTLIB::COS_F64] = "cos"; 180 Names[RTLIB::COS_F80] = "cosl"; 181 Names[RTLIB::COS_PPCF128] = "cosl"; 182 Names[RTLIB::POW_F32] = "powf"; 183 Names[RTLIB::POW_F64] = "pow"; 184 Names[RTLIB::POW_F80] = "powl"; 185 Names[RTLIB::POW_PPCF128] = "powl"; 186 Names[RTLIB::CEIL_F32] = "ceilf"; 187 Names[RTLIB::CEIL_F64] = "ceil"; 188 Names[RTLIB::CEIL_F80] = "ceill"; 189 Names[RTLIB::CEIL_PPCF128] = "ceill"; 190 Names[RTLIB::TRUNC_F32] = "truncf"; 191 Names[RTLIB::TRUNC_F64] = "trunc"; 192 Names[RTLIB::TRUNC_F80] = "truncl"; 193 Names[RTLIB::TRUNC_PPCF128] = "truncl"; 194 Names[RTLIB::RINT_F32] = "rintf"; 195 Names[RTLIB::RINT_F64] = "rint"; 196 Names[RTLIB::RINT_F80] = "rintl"; 197 Names[RTLIB::RINT_PPCF128] = "rintl"; 198 Names[RTLIB::NEARBYINT_F32] = "nearbyintf"; 199 Names[RTLIB::NEARBYINT_F64] = "nearbyint"; 200 Names[RTLIB::NEARBYINT_F80] = "nearbyintl"; 201 Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl"; 202 Names[RTLIB::FLOOR_F32] = "floorf"; 203 Names[RTLIB::FLOOR_F64] = "floor"; 204 Names[RTLIB::FLOOR_F80] = "floorl"; 205 Names[RTLIB::FLOOR_PPCF128] = "floorl"; 206 Names[RTLIB::COPYSIGN_F32] = "copysignf"; 207 Names[RTLIB::COPYSIGN_F64] = "copysign"; 208 Names[RTLIB::COPYSIGN_F80] = "copysignl"; 209 Names[RTLIB::COPYSIGN_PPCF128] = "copysignl"; 210 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2"; 211 Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee"; 212 Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee"; 213 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2"; 214 Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2"; 215 Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2"; 216 Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2"; 217 Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2"; 218 Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfqi"; 219 Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfhi"; 220 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi"; 221 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi"; 222 Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti"; 223 Names[RTLIB::FPTOSINT_F64_I8] = "__fixdfqi"; 224 Names[RTLIB::FPTOSINT_F64_I16] = "__fixdfhi"; 225 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi"; 226 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi"; 227 Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti"; 228 Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi"; 229 Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi"; 230 Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti"; 231 Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi"; 232 Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi"; 233 Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti"; 234 Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfqi"; 235 Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfhi"; 236 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi"; 237 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi"; 238 Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti"; 239 Names[RTLIB::FPTOUINT_F64_I8] = "__fixunsdfqi"; 240 Names[RTLIB::FPTOUINT_F64_I16] = "__fixunsdfhi"; 241 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi"; 242 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi"; 243 Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti"; 244 Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi"; 245 Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi"; 246 Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti"; 247 Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi"; 248 Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi"; 249 Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti"; 250 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf"; 251 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf"; 252 Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf"; 253 Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf"; 254 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf"; 255 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf"; 256 Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf"; 257 Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf"; 258 Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf"; 259 Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf"; 260 Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf"; 261 Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf"; 262 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf"; 263 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf"; 264 Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf"; 265 Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf"; 266 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf"; 267 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf"; 268 Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf"; 269 Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf"; 270 Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf"; 271 Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf"; 272 Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf"; 273 Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf"; 274 Names[RTLIB::OEQ_F32] = "__eqsf2"; 275 Names[RTLIB::OEQ_F64] = "__eqdf2"; 276 Names[RTLIB::UNE_F32] = "__nesf2"; 277 Names[RTLIB::UNE_F64] = "__nedf2"; 278 Names[RTLIB::OGE_F32] = "__gesf2"; 279 Names[RTLIB::OGE_F64] = "__gedf2"; 280 Names[RTLIB::OLT_F32] = "__ltsf2"; 281 Names[RTLIB::OLT_F64] = "__ltdf2"; 282 Names[RTLIB::OLE_F32] = "__lesf2"; 283 Names[RTLIB::OLE_F64] = "__ledf2"; 284 Names[RTLIB::OGT_F32] = "__gtsf2"; 285 Names[RTLIB::OGT_F64] = "__gtdf2"; 286 Names[RTLIB::UO_F32] = "__unordsf2"; 287 Names[RTLIB::UO_F64] = "__unorddf2"; 288 Names[RTLIB::O_F32] = "__unordsf2"; 289 Names[RTLIB::O_F64] = "__unorddf2"; 290 Names[RTLIB::MEMCPY] = "memcpy"; 291 Names[RTLIB::MEMMOVE] = "memmove"; 292 Names[RTLIB::MEMSET] = "memset"; 293 Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume"; 294 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1"; 295 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2"; 296 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = "__sync_val_compare_and_swap_4"; 297 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = "__sync_val_compare_and_swap_8"; 298 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1"; 299 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2"; 300 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4"; 301 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8"; 302 Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1"; 303 Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2"; 304 Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4"; 305 Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8"; 306 Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1"; 307 Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2"; 308 Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4"; 309 Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8"; 310 Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1"; 311 Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2"; 312 Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4"; 313 Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8"; 314 Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1"; 315 Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2"; 316 Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4"; 317 Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8"; 318 Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1"; 319 Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2"; 320 Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and-xor_4"; 321 Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8"; 322 Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1"; 323 Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2"; 324 Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4"; 325 Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8"; 326 } 327 328 /// InitLibcallCallingConvs - Set default libcall CallingConvs. 329 /// 330 static void InitLibcallCallingConvs(CallingConv::ID *CCs) { 331 for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) { 332 CCs[i] = CallingConv::C; 333 } 334 } 335 336 /// getFPEXT - Return the FPEXT_*_* value for the given types, or 337 /// UNKNOWN_LIBCALL if there is none. 338 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) { 339 if (OpVT == MVT::f32) { 340 if (RetVT == MVT::f64) 341 return FPEXT_F32_F64; 342 } 343 344 return UNKNOWN_LIBCALL; 345 } 346 347 /// getFPROUND - Return the FPROUND_*_* value for the given types, or 348 /// UNKNOWN_LIBCALL if there is none. 349 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) { 350 if (RetVT == MVT::f32) { 351 if (OpVT == MVT::f64) 352 return FPROUND_F64_F32; 353 if (OpVT == MVT::f80) 354 return FPROUND_F80_F32; 355 if (OpVT == MVT::ppcf128) 356 return FPROUND_PPCF128_F32; 357 } else if (RetVT == MVT::f64) { 358 if (OpVT == MVT::f80) 359 return FPROUND_F80_F64; 360 if (OpVT == MVT::ppcf128) 361 return FPROUND_PPCF128_F64; 362 } 363 364 return UNKNOWN_LIBCALL; 365 } 366 367 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 368 /// UNKNOWN_LIBCALL if there is none. 369 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) { 370 if (OpVT == MVT::f32) { 371 if (RetVT == MVT::i8) 372 return FPTOSINT_F32_I8; 373 if (RetVT == MVT::i16) 374 return FPTOSINT_F32_I16; 375 if (RetVT == MVT::i32) 376 return FPTOSINT_F32_I32; 377 if (RetVT == MVT::i64) 378 return FPTOSINT_F32_I64; 379 if (RetVT == MVT::i128) 380 return FPTOSINT_F32_I128; 381 } else if (OpVT == MVT::f64) { 382 if (RetVT == MVT::i8) 383 return FPTOSINT_F64_I8; 384 if (RetVT == MVT::i16) 385 return FPTOSINT_F64_I16; 386 if (RetVT == MVT::i32) 387 return FPTOSINT_F64_I32; 388 if (RetVT == MVT::i64) 389 return FPTOSINT_F64_I64; 390 if (RetVT == MVT::i128) 391 return FPTOSINT_F64_I128; 392 } else if (OpVT == MVT::f80) { 393 if (RetVT == MVT::i32) 394 return FPTOSINT_F80_I32; 395 if (RetVT == MVT::i64) 396 return FPTOSINT_F80_I64; 397 if (RetVT == MVT::i128) 398 return FPTOSINT_F80_I128; 399 } else if (OpVT == MVT::ppcf128) { 400 if (RetVT == MVT::i32) 401 return FPTOSINT_PPCF128_I32; 402 if (RetVT == MVT::i64) 403 return FPTOSINT_PPCF128_I64; 404 if (RetVT == MVT::i128) 405 return FPTOSINT_PPCF128_I128; 406 } 407 return UNKNOWN_LIBCALL; 408 } 409 410 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 411 /// UNKNOWN_LIBCALL if there is none. 412 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) { 413 if (OpVT == MVT::f32) { 414 if (RetVT == MVT::i8) 415 return FPTOUINT_F32_I8; 416 if (RetVT == MVT::i16) 417 return FPTOUINT_F32_I16; 418 if (RetVT == MVT::i32) 419 return FPTOUINT_F32_I32; 420 if (RetVT == MVT::i64) 421 return FPTOUINT_F32_I64; 422 if (RetVT == MVT::i128) 423 return FPTOUINT_F32_I128; 424 } else if (OpVT == MVT::f64) { 425 if (RetVT == MVT::i8) 426 return FPTOUINT_F64_I8; 427 if (RetVT == MVT::i16) 428 return FPTOUINT_F64_I16; 429 if (RetVT == MVT::i32) 430 return FPTOUINT_F64_I32; 431 if (RetVT == MVT::i64) 432 return FPTOUINT_F64_I64; 433 if (RetVT == MVT::i128) 434 return FPTOUINT_F64_I128; 435 } else if (OpVT == MVT::f80) { 436 if (RetVT == MVT::i32) 437 return FPTOUINT_F80_I32; 438 if (RetVT == MVT::i64) 439 return FPTOUINT_F80_I64; 440 if (RetVT == MVT::i128) 441 return FPTOUINT_F80_I128; 442 } else if (OpVT == MVT::ppcf128) { 443 if (RetVT == MVT::i32) 444 return FPTOUINT_PPCF128_I32; 445 if (RetVT == MVT::i64) 446 return FPTOUINT_PPCF128_I64; 447 if (RetVT == MVT::i128) 448 return FPTOUINT_PPCF128_I128; 449 } 450 return UNKNOWN_LIBCALL; 451 } 452 453 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 454 /// UNKNOWN_LIBCALL if there is none. 455 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { 456 if (OpVT == MVT::i32) { 457 if (RetVT == MVT::f32) 458 return SINTTOFP_I32_F32; 459 else if (RetVT == MVT::f64) 460 return SINTTOFP_I32_F64; 461 else if (RetVT == MVT::f80) 462 return SINTTOFP_I32_F80; 463 else if (RetVT == MVT::ppcf128) 464 return SINTTOFP_I32_PPCF128; 465 } else if (OpVT == MVT::i64) { 466 if (RetVT == MVT::f32) 467 return SINTTOFP_I64_F32; 468 else if (RetVT == MVT::f64) 469 return SINTTOFP_I64_F64; 470 else if (RetVT == MVT::f80) 471 return SINTTOFP_I64_F80; 472 else if (RetVT == MVT::ppcf128) 473 return SINTTOFP_I64_PPCF128; 474 } else if (OpVT == MVT::i128) { 475 if (RetVT == MVT::f32) 476 return SINTTOFP_I128_F32; 477 else if (RetVT == MVT::f64) 478 return SINTTOFP_I128_F64; 479 else if (RetVT == MVT::f80) 480 return SINTTOFP_I128_F80; 481 else if (RetVT == MVT::ppcf128) 482 return SINTTOFP_I128_PPCF128; 483 } 484 return UNKNOWN_LIBCALL; 485 } 486 487 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 488 /// UNKNOWN_LIBCALL if there is none. 489 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { 490 if (OpVT == MVT::i32) { 491 if (RetVT == MVT::f32) 492 return UINTTOFP_I32_F32; 493 else if (RetVT == MVT::f64) 494 return UINTTOFP_I32_F64; 495 else if (RetVT == MVT::f80) 496 return UINTTOFP_I32_F80; 497 else if (RetVT == MVT::ppcf128) 498 return UINTTOFP_I32_PPCF128; 499 } else if (OpVT == MVT::i64) { 500 if (RetVT == MVT::f32) 501 return UINTTOFP_I64_F32; 502 else if (RetVT == MVT::f64) 503 return UINTTOFP_I64_F64; 504 else if (RetVT == MVT::f80) 505 return UINTTOFP_I64_F80; 506 else if (RetVT == MVT::ppcf128) 507 return UINTTOFP_I64_PPCF128; 508 } else if (OpVT == MVT::i128) { 509 if (RetVT == MVT::f32) 510 return UINTTOFP_I128_F32; 511 else if (RetVT == MVT::f64) 512 return UINTTOFP_I128_F64; 513 else if (RetVT == MVT::f80) 514 return UINTTOFP_I128_F80; 515 else if (RetVT == MVT::ppcf128) 516 return UINTTOFP_I128_PPCF128; 517 } 518 return UNKNOWN_LIBCALL; 519 } 520 521 /// InitCmpLibcallCCs - Set default comparison libcall CC. 522 /// 523 static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 524 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL); 525 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 526 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 527 CCs[RTLIB::UNE_F32] = ISD::SETNE; 528 CCs[RTLIB::UNE_F64] = ISD::SETNE; 529 CCs[RTLIB::OGE_F32] = ISD::SETGE; 530 CCs[RTLIB::OGE_F64] = ISD::SETGE; 531 CCs[RTLIB::OLT_F32] = ISD::SETLT; 532 CCs[RTLIB::OLT_F64] = ISD::SETLT; 533 CCs[RTLIB::OLE_F32] = ISD::SETLE; 534 CCs[RTLIB::OLE_F64] = ISD::SETLE; 535 CCs[RTLIB::OGT_F32] = ISD::SETGT; 536 CCs[RTLIB::OGT_F64] = ISD::SETGT; 537 CCs[RTLIB::UO_F32] = ISD::SETNE; 538 CCs[RTLIB::UO_F64] = ISD::SETNE; 539 CCs[RTLIB::O_F32] = ISD::SETEQ; 540 CCs[RTLIB::O_F64] = ISD::SETEQ; 541 } 542 543 /// NOTE: The constructor takes ownership of TLOF. 544 TargetLowering::TargetLowering(const TargetMachine &tm, 545 const TargetLoweringObjectFile *tlof) 546 : TM(tm), TD(TM.getTargetData()), TLOF(*tlof), 547 mayPromoteElements(AllowPromoteIntElem) { 548 // All operations default to being supported. 549 memset(OpActions, 0, sizeof(OpActions)); 550 memset(LoadExtActions, 0, sizeof(LoadExtActions)); 551 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 552 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 553 memset(CondCodeActions, 0, sizeof(CondCodeActions)); 554 555 // Set default actions for various operations. 556 for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { 557 // Default all indexed load / store to expand. 558 for (unsigned IM = (unsigned)ISD::PRE_INC; 559 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 560 setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand); 561 setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand); 562 } 563 564 // These operations default to expand. 565 setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand); 566 setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT, Expand); 567 } 568 569 // Most targets ignore the @llvm.prefetch intrinsic. 570 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 571 572 // ConstantFP nodes default to expand. Targets can either change this to 573 // Legal, in which case all fp constants are legal, or use isFPImmLegal() 574 // to optimize expansions for certain constants. 575 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 576 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 577 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 578 579 // These library functions default to expand. 580 setOperationAction(ISD::FLOG , MVT::f64, Expand); 581 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 582 setOperationAction(ISD::FLOG10,MVT::f64, Expand); 583 setOperationAction(ISD::FEXP , MVT::f64, Expand); 584 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 585 setOperationAction(ISD::FLOG , MVT::f32, Expand); 586 setOperationAction(ISD::FLOG2, MVT::f32, Expand); 587 setOperationAction(ISD::FLOG10,MVT::f32, Expand); 588 setOperationAction(ISD::FEXP , MVT::f32, Expand); 589 setOperationAction(ISD::FEXP2, MVT::f32, Expand); 590 591 // Default ISD::TRAP to expand (which turns it into abort). 592 setOperationAction(ISD::TRAP, MVT::Other, Expand); 593 594 IsLittleEndian = TD->isLittleEndian(); 595 PointerTy = MVT::getIntegerVT(8*TD->getPointerSize()); 596 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); 597 memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); 598 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; 599 maxStoresPerMemsetOptSize = maxStoresPerMemcpyOptSize 600 = maxStoresPerMemmoveOptSize = 4; 601 benefitFromCodePlacementOpt = false; 602 UseUnderscoreSetJmp = false; 603 UseUnderscoreLongJmp = false; 604 SelectIsExpensive = false; 605 IntDivIsCheap = false; 606 Pow2DivIsCheap = false; 607 JumpIsExpensive = false; 608 StackPointerRegisterToSaveRestore = 0; 609 ExceptionPointerRegister = 0; 610 ExceptionSelectorRegister = 0; 611 BooleanContents = UndefinedBooleanContent; 612 SchedPreferenceInfo = Sched::Latency; 613 JumpBufSize = 0; 614 JumpBufAlignment = 0; 615 MinFunctionAlignment = 0; 616 PrefFunctionAlignment = 0; 617 PrefLoopAlignment = 0; 618 MinStackArgumentAlignment = 1; 619 ShouldFoldAtomicFences = false; 620 621 InitLibcallNames(LibcallRoutineNames); 622 InitCmpLibcallCCs(CmpLibcallCCs); 623 InitLibcallCallingConvs(LibcallCallingConvs); 624 } 625 626 TargetLowering::~TargetLowering() { 627 delete &TLOF; 628 } 629 630 MVT TargetLowering::getShiftAmountTy(EVT LHSTy) const { 631 return MVT::getIntegerVT(8*TD->getPointerSize()); 632 } 633 634 /// canOpTrap - Returns true if the operation can trap for the value type. 635 /// VT must be a legal type. 636 bool TargetLowering::canOpTrap(unsigned Op, EVT VT) const { 637 assert(isTypeLegal(VT)); 638 switch (Op) { 639 default: 640 return false; 641 case ISD::FDIV: 642 case ISD::FREM: 643 case ISD::SDIV: 644 case ISD::UDIV: 645 case ISD::SREM: 646 case ISD::UREM: 647 return true; 648 } 649 } 650 651 652 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, 653 unsigned &NumIntermediates, 654 EVT &RegisterVT, 655 TargetLowering *TLI) { 656 // Figure out the right, legal destination reg to copy into. 657 unsigned NumElts = VT.getVectorNumElements(); 658 MVT EltTy = VT.getVectorElementType(); 659 660 unsigned NumVectorRegs = 1; 661 662 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we 663 // could break down into LHS/RHS like LegalizeDAG does. 664 if (!isPowerOf2_32(NumElts)) { 665 NumVectorRegs = NumElts; 666 NumElts = 1; 667 } 668 669 // Divide the input until we get to a supported size. This will always 670 // end with a scalar if the target doesn't support vectors. 671 while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) { 672 NumElts >>= 1; 673 NumVectorRegs <<= 1; 674 } 675 676 NumIntermediates = NumVectorRegs; 677 678 MVT NewVT = MVT::getVectorVT(EltTy, NumElts); 679 if (!TLI->isTypeLegal(NewVT)) 680 NewVT = EltTy; 681 IntermediateVT = NewVT; 682 683 unsigned NewVTSize = NewVT.getSizeInBits(); 684 685 // Convert sizes such as i33 to i64. 686 if (!isPowerOf2_32(NewVTSize)) 687 NewVTSize = NextPowerOf2(NewVTSize); 688 689 EVT DestVT = TLI->getRegisterType(NewVT); 690 RegisterVT = DestVT; 691 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. 692 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); 693 694 // Otherwise, promotion or legal types use the same number of registers as 695 // the vector decimated to the appropriate level. 696 return NumVectorRegs; 697 } 698 699 /// isLegalRC - Return true if the value types that can be represented by the 700 /// specified register class are all legal. 701 bool TargetLowering::isLegalRC(const TargetRegisterClass *RC) const { 702 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 703 I != E; ++I) { 704 if (isTypeLegal(*I)) 705 return true; 706 } 707 return false; 708 } 709 710 /// hasLegalSuperRegRegClasses - Return true if the specified register class 711 /// has one or more super-reg register classes that are legal. 712 bool 713 TargetLowering::hasLegalSuperRegRegClasses(const TargetRegisterClass *RC) const{ 714 if (*RC->superregclasses_begin() == 0) 715 return false; 716 for (TargetRegisterInfo::regclass_iterator I = RC->superregclasses_begin(), 717 E = RC->superregclasses_end(); I != E; ++I) { 718 const TargetRegisterClass *RRC = *I; 719 if (isLegalRC(RRC)) 720 return true; 721 } 722 return false; 723 } 724 725 /// findRepresentativeClass - Return the largest legal super-reg register class 726 /// of the register class for the specified type and its associated "cost". 727 std::pair<const TargetRegisterClass*, uint8_t> 728 TargetLowering::findRepresentativeClass(EVT VT) const { 729 const TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy]; 730 if (!RC) 731 return std::make_pair(RC, 0); 732 const TargetRegisterClass *BestRC = RC; 733 for (TargetRegisterInfo::regclass_iterator I = RC->superregclasses_begin(), 734 E = RC->superregclasses_end(); I != E; ++I) { 735 const TargetRegisterClass *RRC = *I; 736 if (RRC->isASubClass() || !isLegalRC(RRC)) 737 continue; 738 if (!hasLegalSuperRegRegClasses(RRC)) 739 return std::make_pair(RRC, 1); 740 BestRC = RRC; 741 } 742 return std::make_pair(BestRC, 1); 743 } 744 745 746 /// computeRegisterProperties - Once all of the register classes are added, 747 /// this allows us to compute derived properties we expose. 748 void TargetLowering::computeRegisterProperties() { 749 assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE && 750 "Too many value types for ValueTypeActions to hold!"); 751 752 // Everything defaults to needing one register. 753 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 754 NumRegistersForVT[i] = 1; 755 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 756 } 757 // ...except isVoid, which doesn't need any registers. 758 NumRegistersForVT[MVT::isVoid] = 0; 759 760 // Find the largest integer register class. 761 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 762 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg) 763 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 764 765 // Every integer value type larger than this largest register takes twice as 766 // many registers to represent as the previous ValueType. 767 for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) { 768 EVT ExpandedVT = (MVT::SimpleValueType)ExpandedReg; 769 if (!ExpandedVT.isInteger()) 770 break; 771 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 772 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 773 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 774 ValueTypeActions.setTypeAction(ExpandedVT, TypeExpandInteger); 775 } 776 777 // Inspect all of the ValueType's smaller than the largest integer 778 // register to see which ones need promotion. 779 unsigned LegalIntReg = LargestIntReg; 780 for (unsigned IntReg = LargestIntReg - 1; 781 IntReg >= (unsigned)MVT::i1; --IntReg) { 782 EVT IVT = (MVT::SimpleValueType)IntReg; 783 if (isTypeLegal(IVT)) { 784 LegalIntReg = IntReg; 785 } else { 786 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 787 (MVT::SimpleValueType)LegalIntReg; 788 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger); 789 } 790 } 791 792 // ppcf128 type is really two f64's. 793 if (!isTypeLegal(MVT::ppcf128)) { 794 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 795 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 796 TransformToType[MVT::ppcf128] = MVT::f64; 797 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat); 798 } 799 800 // Decide how to handle f64. If the target does not have native f64 support, 801 // expand it to i64 and we will be generating soft float library calls. 802 if (!isTypeLegal(MVT::f64)) { 803 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 804 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 805 TransformToType[MVT::f64] = MVT::i64; 806 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat); 807 } 808 809 // Decide how to handle f32. If the target does not have native support for 810 // f32, promote it to f64 if it is legal. Otherwise, expand it to i32. 811 if (!isTypeLegal(MVT::f32)) { 812 if (isTypeLegal(MVT::f64)) { 813 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64]; 814 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64]; 815 TransformToType[MVT::f32] = MVT::f64; 816 ValueTypeActions.setTypeAction(MVT::f32, TypePromoteInteger); 817 } else { 818 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 819 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 820 TransformToType[MVT::f32] = MVT::i32; 821 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat); 822 } 823 } 824 825 // Loop over all of the vector value types to see which need transformations. 826 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 827 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 828 MVT VT = (MVT::SimpleValueType)i; 829 if (isTypeLegal(VT)) continue; 830 831 // Determine if there is a legal wider type. If so, we should promote to 832 // that wider vector type. 833 EVT EltVT = VT.getVectorElementType(); 834 unsigned NElts = VT.getVectorNumElements(); 835 if (NElts != 1) { 836 bool IsLegalWiderType = false; 837 // If we allow the promotion of vector elements using a flag, 838 // then return TypePromoteInteger on vector elements. 839 // First try to promote the elements of integer vectors. If no legal 840 // promotion was found, fallback to the widen-vector method. 841 if (mayPromoteElements) 842 for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 843 EVT SVT = (MVT::SimpleValueType)nVT; 844 // Promote vectors of integers to vectors with the same number 845 // of elements, with a wider element type. 846 if (SVT.getVectorElementType().getSizeInBits() > EltVT.getSizeInBits() 847 && SVT.getVectorNumElements() == NElts && 848 isTypeLegal(SVT) && SVT.getScalarType().isInteger()) { 849 TransformToType[i] = SVT; 850 RegisterTypeForVT[i] = SVT; 851 NumRegistersForVT[i] = 1; 852 ValueTypeActions.setTypeAction(VT, TypePromoteInteger); 853 IsLegalWiderType = true; 854 break; 855 } 856 } 857 858 if (IsLegalWiderType) continue; 859 860 // Try to widen the vector. 861 for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 862 EVT SVT = (MVT::SimpleValueType)nVT; 863 if (SVT.getVectorElementType() == EltVT && 864 SVT.getVectorNumElements() > NElts && 865 isTypeLegal(SVT)) { 866 TransformToType[i] = SVT; 867 RegisterTypeForVT[i] = SVT; 868 NumRegistersForVT[i] = 1; 869 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 870 IsLegalWiderType = true; 871 break; 872 } 873 } 874 if (IsLegalWiderType) continue; 875 } 876 877 MVT IntermediateVT; 878 EVT RegisterVT; 879 unsigned NumIntermediates; 880 NumRegistersForVT[i] = 881 getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates, 882 RegisterVT, this); 883 RegisterTypeForVT[i] = RegisterVT; 884 885 EVT NVT = VT.getPow2VectorType(); 886 if (NVT == VT) { 887 // Type is already a power of 2. The default action is to split. 888 TransformToType[i] = MVT::Other; 889 unsigned NumElts = VT.getVectorNumElements(); 890 ValueTypeActions.setTypeAction(VT, 891 NumElts > 1 ? TypeSplitVector : TypeScalarizeVector); 892 } else { 893 TransformToType[i] = NVT; 894 ValueTypeActions.setTypeAction(VT, TypeWidenVector); 895 } 896 } 897 898 // Determine the 'representative' register class for each value type. 899 // An representative register class is the largest (meaning one which is 900 // not a sub-register class / subreg register class) legal register class for 901 // a group of value types. For example, on i386, i8, i16, and i32 902 // representative would be GR32; while on x86_64 it's GR64. 903 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 904 const TargetRegisterClass* RRC; 905 uint8_t Cost; 906 tie(RRC, Cost) = findRepresentativeClass((MVT::SimpleValueType)i); 907 RepRegClassForVT[i] = RRC; 908 RepRegClassCostForVT[i] = Cost; 909 } 910 } 911 912 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 913 return NULL; 914 } 915 916 917 MVT::SimpleValueType TargetLowering::getSetCCResultType(EVT VT) const { 918 return PointerTy.SimpleTy; 919 } 920 921 MVT::SimpleValueType TargetLowering::getCmpLibcallReturnType() const { 922 return MVT::i32; // return the default value 923 } 924 925 /// getVectorTypeBreakdown - Vector types are broken down into some number of 926 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 927 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 928 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 929 /// 930 /// This method returns the number of registers needed, and the VT for each 931 /// register. It also returns the VT and quantity of the intermediate values 932 /// before they are promoted/expanded. 933 /// 934 unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 935 EVT &IntermediateVT, 936 unsigned &NumIntermediates, 937 EVT &RegisterVT) const { 938 unsigned NumElts = VT.getVectorNumElements(); 939 940 // If there is a wider vector type with the same element type as this one, 941 // we should widen to that legal vector type. This handles things like 942 // <2 x float> -> <4 x float>. 943 if (NumElts != 1 && getTypeAction(Context, VT) == TypeWidenVector) { 944 RegisterVT = getTypeToTransformTo(Context, VT); 945 if (isTypeLegal(RegisterVT)) { 946 IntermediateVT = RegisterVT; 947 NumIntermediates = 1; 948 return 1; 949 } 950 } 951 952 // Figure out the right, legal destination reg to copy into. 953 EVT EltTy = VT.getVectorElementType(); 954 955 unsigned NumVectorRegs = 1; 956 957 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we 958 // could break down into LHS/RHS like LegalizeDAG does. 959 if (!isPowerOf2_32(NumElts)) { 960 NumVectorRegs = NumElts; 961 NumElts = 1; 962 } 963 964 // Divide the input until we get to a supported size. This will always 965 // end with a scalar if the target doesn't support vectors. 966 while (NumElts > 1 && !isTypeLegal( 967 EVT::getVectorVT(Context, EltTy, NumElts))) { 968 NumElts >>= 1; 969 NumVectorRegs <<= 1; 970 } 971 972 NumIntermediates = NumVectorRegs; 973 974 EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts); 975 if (!isTypeLegal(NewVT)) 976 NewVT = EltTy; 977 IntermediateVT = NewVT; 978 979 EVT DestVT = getRegisterType(Context, NewVT); 980 RegisterVT = DestVT; 981 unsigned NewVTSize = NewVT.getSizeInBits(); 982 983 // Convert sizes such as i33 to i64. 984 if (!isPowerOf2_32(NewVTSize)) 985 NewVTSize = NextPowerOf2(NewVTSize); 986 987 if (DestVT.bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. 988 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); 989 990 // Otherwise, promotion or legal types use the same number of registers as 991 // the vector decimated to the appropriate level. 992 return NumVectorRegs; 993 } 994 995 /// Get the EVTs and ArgFlags collections that represent the legalized return 996 /// type of the given function. This does not require a DAG or a return value, 997 /// and is suitable for use before any DAGs for the function are constructed. 998 /// TODO: Move this out of TargetLowering.cpp. 999 void llvm::GetReturnInfo(Type* ReturnType, Attributes attr, 1000 SmallVectorImpl<ISD::OutputArg> &Outs, 1001 const TargetLowering &TLI, 1002 SmallVectorImpl<uint64_t> *Offsets) { 1003 SmallVector<EVT, 4> ValueVTs; 1004 ComputeValueVTs(TLI, ReturnType, ValueVTs); 1005 unsigned NumValues = ValueVTs.size(); 1006 if (NumValues == 0) return; 1007 unsigned Offset = 0; 1008 1009 for (unsigned j = 0, f = NumValues; j != f; ++j) { 1010 EVT VT = ValueVTs[j]; 1011 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1012 1013 if (attr & Attribute::SExt) 1014 ExtendKind = ISD::SIGN_EXTEND; 1015 else if (attr & Attribute::ZExt) 1016 ExtendKind = ISD::ZERO_EXTEND; 1017 1018 // FIXME: C calling convention requires the return type to be promoted to 1019 // at least 32-bit. But this is not necessary for non-C calling 1020 // conventions. The frontend should mark functions whose return values 1021 // require promoting with signext or zeroext attributes. 1022 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { 1023 EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32); 1024 if (VT.bitsLT(MinVT)) 1025 VT = MinVT; 1026 } 1027 1028 unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT); 1029 EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT); 1030 unsigned PartSize = TLI.getTargetData()->getTypeAllocSize( 1031 PartVT.getTypeForEVT(ReturnType->getContext())); 1032 1033 // 'inreg' on function refers to return value 1034 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1035 if (attr & Attribute::InReg) 1036 Flags.setInReg(); 1037 1038 // Propagate extension type if any 1039 if (attr & Attribute::SExt) 1040 Flags.setSExt(); 1041 else if (attr & Attribute::ZExt) 1042 Flags.setZExt(); 1043 1044 for (unsigned i = 0; i < NumParts; ++i) { 1045 Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true)); 1046 if (Offsets) { 1047 Offsets->push_back(Offset); 1048 Offset += PartSize; 1049 } 1050 } 1051 } 1052 } 1053 1054 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1055 /// function arguments in the caller parameter area. This is the actual 1056 /// alignment, not its logarithm. 1057 unsigned TargetLowering::getByValTypeAlignment(Type *Ty) const { 1058 return TD->getCallFrameTypeAlignment(Ty); 1059 } 1060 1061 /// getJumpTableEncoding - Return the entry encoding for a jump table in the 1062 /// current function. The returned value is a member of the 1063 /// MachineJumpTableInfo::JTEntryKind enum. 1064 unsigned TargetLowering::getJumpTableEncoding() const { 1065 // In non-pic modes, just use the address of a block. 1066 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 1067 return MachineJumpTableInfo::EK_BlockAddress; 1068 1069 // In PIC mode, if the target supports a GPRel32 directive, use it. 1070 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != 0) 1071 return MachineJumpTableInfo::EK_GPRel32BlockAddress; 1072 1073 // Otherwise, use a label difference. 1074 return MachineJumpTableInfo::EK_LabelDifference32; 1075 } 1076 1077 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 1078 SelectionDAG &DAG) const { 1079 // If our PIC model is GP relative, use the global offset table as the base. 1080 if (getJumpTableEncoding() == MachineJumpTableInfo::EK_GPRel32BlockAddress) 1081 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy()); 1082 return Table; 1083 } 1084 1085 /// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 1086 /// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 1087 /// MCExpr. 1088 const MCExpr * 1089 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 1090 unsigned JTI,MCContext &Ctx) const{ 1091 // The normal PIC reloc base is the label at the start of the jump table. 1092 return MCSymbolRefExpr::Create(MF->getJTISymbol(JTI, Ctx), Ctx); 1093 } 1094 1095 bool 1096 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 1097 // Assume that everything is safe in static mode. 1098 if (getTargetMachine().getRelocationModel() == Reloc::Static) 1099 return true; 1100 1101 // In dynamic-no-pic mode, assume that known defined values are safe. 1102 if (getTargetMachine().getRelocationModel() == Reloc::DynamicNoPIC && 1103 GA && 1104 !GA->getGlobal()->isDeclaration() && 1105 !GA->getGlobal()->isWeakForLinker()) 1106 return true; 1107 1108 // Otherwise assume nothing is safe. 1109 return false; 1110 } 1111 1112 //===----------------------------------------------------------------------===// 1113 // Optimization Methods 1114 //===----------------------------------------------------------------------===// 1115 1116 /// ShrinkDemandedConstant - Check to see if the specified operand of the 1117 /// specified instruction is a constant integer. If so, check to see if there 1118 /// are any bits set in the constant that are not demanded. If so, shrink the 1119 /// constant and return true. 1120 bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op, 1121 const APInt &Demanded) { 1122 DebugLoc dl = Op.getDebugLoc(); 1123 1124 // FIXME: ISD::SELECT, ISD::SELECT_CC 1125 switch (Op.getOpcode()) { 1126 default: break; 1127 case ISD::XOR: 1128 case ISD::AND: 1129 case ISD::OR: { 1130 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 1131 if (!C) return false; 1132 1133 if (Op.getOpcode() == ISD::XOR && 1134 (C->getAPIntValue() | (~Demanded)).isAllOnesValue()) 1135 return false; 1136 1137 // if we can expand it to have all bits set, do it 1138 if (C->getAPIntValue().intersects(~Demanded)) { 1139 EVT VT = Op.getValueType(); 1140 SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0), 1141 DAG.getConstant(Demanded & 1142 C->getAPIntValue(), 1143 VT)); 1144 return CombineTo(Op, New); 1145 } 1146 1147 break; 1148 } 1149 } 1150 1151 return false; 1152 } 1153 1154 /// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the 1155 /// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening 1156 /// cast, but it could be generalized for targets with other types of 1157 /// implicit widening casts. 1158 bool 1159 TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op, 1160 unsigned BitWidth, 1161 const APInt &Demanded, 1162 DebugLoc dl) { 1163 assert(Op.getNumOperands() == 2 && 1164 "ShrinkDemandedOp only supports binary operators!"); 1165 assert(Op.getNode()->getNumValues() == 1 && 1166 "ShrinkDemandedOp only supports nodes with one result!"); 1167 1168 // Don't do this if the node has another user, which may require the 1169 // full value. 1170 if (!Op.getNode()->hasOneUse()) 1171 return false; 1172 1173 // Search for the smallest integer type with free casts to and from 1174 // Op's type. For expedience, just check power-of-2 integer types. 1175 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1176 unsigned SmallVTBits = BitWidth - Demanded.countLeadingZeros(); 1177 if (!isPowerOf2_32(SmallVTBits)) 1178 SmallVTBits = NextPowerOf2(SmallVTBits); 1179 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 1180 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 1181 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 1182 TLI.isZExtFree(SmallVT, Op.getValueType())) { 1183 // We found a type with free casts. 1184 SDValue X = DAG.getNode(Op.getOpcode(), dl, SmallVT, 1185 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, 1186 Op.getNode()->getOperand(0)), 1187 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, 1188 Op.getNode()->getOperand(1))); 1189 SDValue Z = DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), X); 1190 return CombineTo(Op, Z); 1191 } 1192 } 1193 return false; 1194 } 1195 1196 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the 1197 /// DemandedMask bits of the result of Op are ever used downstream. If we can 1198 /// use this information to simplify Op, create a new simplified DAG node and 1199 /// return true, returning the original and new nodes in Old and New. Otherwise, 1200 /// analyze the expression and return a mask of KnownOne and KnownZero bits for 1201 /// the expression (used to simplify the caller). The KnownZero/One bits may 1202 /// only be accurate for those bits in the DemandedMask. 1203 bool TargetLowering::SimplifyDemandedBits(SDValue Op, 1204 const APInt &DemandedMask, 1205 APInt &KnownZero, 1206 APInt &KnownOne, 1207 TargetLoweringOpt &TLO, 1208 unsigned Depth) const { 1209 unsigned BitWidth = DemandedMask.getBitWidth(); 1210 assert(Op.getValueType().getScalarType().getSizeInBits() == BitWidth && 1211 "Mask size mismatches value type size!"); 1212 APInt NewMask = DemandedMask; 1213 DebugLoc dl = Op.getDebugLoc(); 1214 1215 // Don't know anything. 1216 KnownZero = KnownOne = APInt(BitWidth, 0); 1217 1218 // Other users may use these bits. 1219 if (!Op.getNode()->hasOneUse()) { 1220 if (Depth != 0) { 1221 // If not at the root, Just compute the KnownZero/KnownOne bits to 1222 // simplify things downstream. 1223 TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 1224 return false; 1225 } 1226 // If this is the root being simplified, allow it to have multiple uses, 1227 // just set the NewMask to all bits. 1228 NewMask = APInt::getAllOnesValue(BitWidth); 1229 } else if (DemandedMask == 0) { 1230 // Not demanding any bits from Op. 1231 if (Op.getOpcode() != ISD::UNDEF) 1232 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType())); 1233 return false; 1234 } else if (Depth == 6) { // Limit search depth. 1235 return false; 1236 } 1237 1238 APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut; 1239 switch (Op.getOpcode()) { 1240 case ISD::Constant: 1241 // We know all of the bits for a constant! 1242 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & NewMask; 1243 KnownZero = ~KnownOne & NewMask; 1244 return false; // Don't fall through, will infinitely loop. 1245 case ISD::AND: 1246 // If the RHS is a constant, check to see if the LHS would be zero without 1247 // using the bits from the RHS. Below, we use knowledge about the RHS to 1248 // simplify the LHS, here we're using information from the LHS to simplify 1249 // the RHS. 1250 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1251 APInt LHSZero, LHSOne; 1252 // Do not increment Depth here; that can cause an infinite loop. 1253 TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask, 1254 LHSZero, LHSOne, Depth); 1255 // If the LHS already has zeros where RHSC does, this and is dead. 1256 if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask)) 1257 return TLO.CombineTo(Op, Op.getOperand(0)); 1258 // If any of the set bits in the RHS are known zero on the LHS, shrink 1259 // the constant. 1260 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask)) 1261 return true; 1262 } 1263 1264 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 1265 KnownOne, TLO, Depth+1)) 1266 return true; 1267 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1268 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask, 1269 KnownZero2, KnownOne2, TLO, Depth+1)) 1270 return true; 1271 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1272 1273 // If all of the demanded bits are known one on one side, return the other. 1274 // These bits cannot contribute to the result of the 'and'. 1275 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 1276 return TLO.CombineTo(Op, Op.getOperand(0)); 1277 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 1278 return TLO.CombineTo(Op, Op.getOperand(1)); 1279 // If all of the demanded bits in the inputs are known zeros, return zero. 1280 if ((NewMask & (KnownZero|KnownZero2)) == NewMask) 1281 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType())); 1282 // If the RHS is a constant, see if we can simplify it. 1283 if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask)) 1284 return true; 1285 // If the operation can be done in a smaller type, do so. 1286 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 1287 return true; 1288 1289 // Output known-1 bits are only known if set in both the LHS & RHS. 1290 KnownOne &= KnownOne2; 1291 // Output known-0 are known to be clear if zero in either the LHS | RHS. 1292 KnownZero |= KnownZero2; 1293 break; 1294 case ISD::OR: 1295 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 1296 KnownOne, TLO, Depth+1)) 1297 return true; 1298 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1299 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask, 1300 KnownZero2, KnownOne2, TLO, Depth+1)) 1301 return true; 1302 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1303 1304 // If all of the demanded bits are known zero on one side, return the other. 1305 // These bits cannot contribute to the result of the 'or'. 1306 if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask)) 1307 return TLO.CombineTo(Op, Op.getOperand(0)); 1308 if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask)) 1309 return TLO.CombineTo(Op, Op.getOperand(1)); 1310 // If all of the potentially set bits on one side are known to be set on 1311 // the other side, just use the 'other' side. 1312 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 1313 return TLO.CombineTo(Op, Op.getOperand(0)); 1314 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 1315 return TLO.CombineTo(Op, Op.getOperand(1)); 1316 // If the RHS is a constant, see if we can simplify it. 1317 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 1318 return true; 1319 // If the operation can be done in a smaller type, do so. 1320 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 1321 return true; 1322 1323 // Output known-0 bits are only known if clear in both the LHS & RHS. 1324 KnownZero &= KnownZero2; 1325 // Output known-1 are known to be set if set in either the LHS | RHS. 1326 KnownOne |= KnownOne2; 1327 break; 1328 case ISD::XOR: 1329 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 1330 KnownOne, TLO, Depth+1)) 1331 return true; 1332 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1333 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2, 1334 KnownOne2, TLO, Depth+1)) 1335 return true; 1336 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1337 1338 // If all of the demanded bits are known zero on one side, return the other. 1339 // These bits cannot contribute to the result of the 'xor'. 1340 if ((KnownZero & NewMask) == NewMask) 1341 return TLO.CombineTo(Op, Op.getOperand(0)); 1342 if ((KnownZero2 & NewMask) == NewMask) 1343 return TLO.CombineTo(Op, Op.getOperand(1)); 1344 // If the operation can be done in a smaller type, do so. 1345 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 1346 return true; 1347 1348 // If all of the unknown bits are known to be zero on one side or the other 1349 // (but not both) turn this into an *inclusive* or. 1350 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1351 if ((NewMask & ~KnownZero & ~KnownZero2) == 0) 1352 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(), 1353 Op.getOperand(0), 1354 Op.getOperand(1))); 1355 1356 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1357 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 1358 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1359 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 1360 1361 // If all of the demanded bits on one side are known, and all of the set 1362 // bits on that side are also known to be set on the other side, turn this 1363 // into an AND, as we know the bits will be cleared. 1364 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1365 if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known 1366 if ((KnownOne & KnownOne2) == KnownOne) { 1367 EVT VT = Op.getValueType(); 1368 SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT); 1369 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, 1370 Op.getOperand(0), ANDC)); 1371 } 1372 } 1373 1374 // If the RHS is a constant, see if we can simplify it. 1375 // for XOR, we prefer to force bits to 1 if they will make a -1. 1376 // if we can't force bits, try to shrink constant 1377 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1378 APInt Expanded = C->getAPIntValue() | (~NewMask); 1379 // if we can expand it to have all bits set, do it 1380 if (Expanded.isAllOnesValue()) { 1381 if (Expanded != C->getAPIntValue()) { 1382 EVT VT = Op.getValueType(); 1383 SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0), 1384 TLO.DAG.getConstant(Expanded, VT)); 1385 return TLO.CombineTo(Op, New); 1386 } 1387 // if it already has all the bits set, nothing to change 1388 // but don't shrink either! 1389 } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) { 1390 return true; 1391 } 1392 } 1393 1394 KnownZero = KnownZeroOut; 1395 KnownOne = KnownOneOut; 1396 break; 1397 case ISD::SELECT: 1398 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero, 1399 KnownOne, TLO, Depth+1)) 1400 return true; 1401 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2, 1402 KnownOne2, TLO, Depth+1)) 1403 return true; 1404 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1405 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1406 1407 // If the operands are constants, see if we can simplify them. 1408 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 1409 return true; 1410 1411 // Only known if known in both the LHS and RHS. 1412 KnownOne &= KnownOne2; 1413 KnownZero &= KnownZero2; 1414 break; 1415 case ISD::SELECT_CC: 1416 if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero, 1417 KnownOne, TLO, Depth+1)) 1418 return true; 1419 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2, 1420 KnownOne2, TLO, Depth+1)) 1421 return true; 1422 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1423 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1424 1425 // If the operands are constants, see if we can simplify them. 1426 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 1427 return true; 1428 1429 // Only known if known in both the LHS and RHS. 1430 KnownOne &= KnownOne2; 1431 KnownZero &= KnownZero2; 1432 break; 1433 case ISD::SHL: 1434 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1435 unsigned ShAmt = SA->getZExtValue(); 1436 SDValue InOp = Op.getOperand(0); 1437 1438 // If the shift count is an invalid immediate, don't do anything. 1439 if (ShAmt >= BitWidth) 1440 break; 1441 1442 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1443 // single shift. We can do this if the bottom bits (which are shifted 1444 // out) are never demanded. 1445 if (InOp.getOpcode() == ISD::SRL && 1446 isa<ConstantSDNode>(InOp.getOperand(1))) { 1447 if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) { 1448 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue(); 1449 unsigned Opc = ISD::SHL; 1450 int Diff = ShAmt-C1; 1451 if (Diff < 0) { 1452 Diff = -Diff; 1453 Opc = ISD::SRL; 1454 } 1455 1456 SDValue NewSA = 1457 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 1458 EVT VT = Op.getValueType(); 1459 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, 1460 InOp.getOperand(0), NewSA)); 1461 } 1462 } 1463 1464 if (SimplifyDemandedBits(InOp, NewMask.lshr(ShAmt), 1465 KnownZero, KnownOne, TLO, Depth+1)) 1466 return true; 1467 1468 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits 1469 // are not demanded. This will likely allow the anyext to be folded away. 1470 if (InOp.getNode()->getOpcode() == ISD::ANY_EXTEND) { 1471 SDValue InnerOp = InOp.getNode()->getOperand(0); 1472 EVT InnerVT = InnerOp.getValueType(); 1473 if ((APInt::getHighBitsSet(BitWidth, 1474 BitWidth - InnerVT.getSizeInBits()) & 1475 DemandedMask) == 0 && 1476 isTypeDesirableForOp(ISD::SHL, InnerVT)) { 1477 EVT ShTy = getShiftAmountTy(InnerVT); 1478 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits())) 1479 ShTy = InnerVT; 1480 SDValue NarrowShl = 1481 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp, 1482 TLO.DAG.getConstant(ShAmt, ShTy)); 1483 return 1484 TLO.CombineTo(Op, 1485 TLO.DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), 1486 NarrowShl)); 1487 } 1488 } 1489 1490 KnownZero <<= SA->getZExtValue(); 1491 KnownOne <<= SA->getZExtValue(); 1492 // low bits known zero. 1493 KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue()); 1494 } 1495 break; 1496 case ISD::SRL: 1497 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1498 EVT VT = Op.getValueType(); 1499 unsigned ShAmt = SA->getZExtValue(); 1500 unsigned VTSize = VT.getSizeInBits(); 1501 SDValue InOp = Op.getOperand(0); 1502 1503 // If the shift count is an invalid immediate, don't do anything. 1504 if (ShAmt >= BitWidth) 1505 break; 1506 1507 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1508 // single shift. We can do this if the top bits (which are shifted out) 1509 // are never demanded. 1510 if (InOp.getOpcode() == ISD::SHL && 1511 isa<ConstantSDNode>(InOp.getOperand(1))) { 1512 if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) { 1513 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue(); 1514 unsigned Opc = ISD::SRL; 1515 int Diff = ShAmt-C1; 1516 if (Diff < 0) { 1517 Diff = -Diff; 1518 Opc = ISD::SHL; 1519 } 1520 1521 SDValue NewSA = 1522 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 1523 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, 1524 InOp.getOperand(0), NewSA)); 1525 } 1526 } 1527 1528 // Compute the new bits that are at the top now. 1529 if (SimplifyDemandedBits(InOp, (NewMask << ShAmt), 1530 KnownZero, KnownOne, TLO, Depth+1)) 1531 return true; 1532 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1533 KnownZero = KnownZero.lshr(ShAmt); 1534 KnownOne = KnownOne.lshr(ShAmt); 1535 1536 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1537 KnownZero |= HighBits; // High bits known zero. 1538 } 1539 break; 1540 case ISD::SRA: 1541 // If this is an arithmetic shift right and only the low-bit is set, we can 1542 // always convert this into a logical shr, even if the shift amount is 1543 // variable. The low bit of the shift cannot be an input sign bit unless 1544 // the shift amount is >= the size of the datatype, which is undefined. 1545 if (DemandedMask == 1) 1546 return TLO.CombineTo(Op, 1547 TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(), 1548 Op.getOperand(0), Op.getOperand(1))); 1549 1550 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1551 EVT VT = Op.getValueType(); 1552 unsigned ShAmt = SA->getZExtValue(); 1553 1554 // If the shift count is an invalid immediate, don't do anything. 1555 if (ShAmt >= BitWidth) 1556 break; 1557 1558 APInt InDemandedMask = (NewMask << ShAmt); 1559 1560 // If any of the demanded bits are produced by the sign extension, we also 1561 // demand the input sign bit. 1562 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1563 if (HighBits.intersects(NewMask)) 1564 InDemandedMask |= APInt::getSignBit(VT.getScalarType().getSizeInBits()); 1565 1566 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask, 1567 KnownZero, KnownOne, TLO, Depth+1)) 1568 return true; 1569 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1570 KnownZero = KnownZero.lshr(ShAmt); 1571 KnownOne = KnownOne.lshr(ShAmt); 1572 1573 // Handle the sign bit, adjusted to where it is now in the mask. 1574 APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt); 1575 1576 // If the input sign bit is known to be zero, or if none of the top bits 1577 // are demanded, turn this into an unsigned shift right. 1578 if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) { 1579 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, 1580 Op.getOperand(0), 1581 Op.getOperand(1))); 1582 } else if (KnownOne.intersects(SignBit)) { // New bits are known one. 1583 KnownOne |= HighBits; 1584 } 1585 } 1586 break; 1587 case ISD::SIGN_EXTEND_INREG: { 1588 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1589 1590 // Sign extension. Compute the demanded bits in the result that are not 1591 // present in the input. 1592 APInt NewBits = 1593 APInt::getHighBitsSet(BitWidth, 1594 BitWidth - EVT.getScalarType().getSizeInBits()); 1595 1596 // If none of the extended bits are demanded, eliminate the sextinreg. 1597 if ((NewBits & NewMask) == 0) 1598 return TLO.CombineTo(Op, Op.getOperand(0)); 1599 1600 APInt InSignBit = 1601 APInt::getSignBit(EVT.getScalarType().getSizeInBits()).zext(BitWidth); 1602 APInt InputDemandedBits = 1603 APInt::getLowBitsSet(BitWidth, 1604 EVT.getScalarType().getSizeInBits()) & 1605 NewMask; 1606 1607 // Since the sign extended bits are demanded, we know that the sign 1608 // bit is demanded. 1609 InputDemandedBits |= InSignBit; 1610 1611 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits, 1612 KnownZero, KnownOne, TLO, Depth+1)) 1613 return true; 1614 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1615 1616 // If the sign bit of the input is known set or clear, then we know the 1617 // top bits of the result. 1618 1619 // If the input sign bit is known zero, convert this into a zero extension. 1620 if (KnownZero.intersects(InSignBit)) 1621 return TLO.CombineTo(Op, 1622 TLO.DAG.getZeroExtendInReg(Op.getOperand(0),dl,EVT)); 1623 1624 if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 1625 KnownOne |= NewBits; 1626 KnownZero &= ~NewBits; 1627 } else { // Input sign bit unknown 1628 KnownZero &= ~NewBits; 1629 KnownOne &= ~NewBits; 1630 } 1631 break; 1632 } 1633 case ISD::ZERO_EXTEND: { 1634 unsigned OperandBitWidth = 1635 Op.getOperand(0).getValueType().getScalarType().getSizeInBits(); 1636 APInt InMask = NewMask.trunc(OperandBitWidth); 1637 1638 // If none of the top bits are demanded, convert this into an any_extend. 1639 APInt NewBits = 1640 APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask; 1641 if (!NewBits.intersects(NewMask)) 1642 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, 1643 Op.getValueType(), 1644 Op.getOperand(0))); 1645 1646 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1647 KnownZero, KnownOne, TLO, Depth+1)) 1648 return true; 1649 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1650 KnownZero = KnownZero.zext(BitWidth); 1651 KnownOne = KnownOne.zext(BitWidth); 1652 KnownZero |= NewBits; 1653 break; 1654 } 1655 case ISD::SIGN_EXTEND: { 1656 EVT InVT = Op.getOperand(0).getValueType(); 1657 unsigned InBits = InVT.getScalarType().getSizeInBits(); 1658 APInt InMask = APInt::getLowBitsSet(BitWidth, InBits); 1659 APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits); 1660 APInt NewBits = ~InMask & NewMask; 1661 1662 // If none of the top bits are demanded, convert this into an any_extend. 1663 if (NewBits == 0) 1664 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl, 1665 Op.getValueType(), 1666 Op.getOperand(0))); 1667 1668 // Since some of the sign extended bits are demanded, we know that the sign 1669 // bit is demanded. 1670 APInt InDemandedBits = InMask & NewMask; 1671 InDemandedBits |= InSignBit; 1672 InDemandedBits = InDemandedBits.trunc(InBits); 1673 1674 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero, 1675 KnownOne, TLO, Depth+1)) 1676 return true; 1677 KnownZero = KnownZero.zext(BitWidth); 1678 KnownOne = KnownOne.zext(BitWidth); 1679 1680 // If the sign bit is known zero, convert this to a zero extend. 1681 if (KnownZero.intersects(InSignBit)) 1682 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, 1683 Op.getValueType(), 1684 Op.getOperand(0))); 1685 1686 // If the sign bit is known one, the top bits match. 1687 if (KnownOne.intersects(InSignBit)) { 1688 KnownOne |= NewBits; 1689 KnownZero &= ~NewBits; 1690 } else { // Otherwise, top bits aren't known. 1691 KnownOne &= ~NewBits; 1692 KnownZero &= ~NewBits; 1693 } 1694 break; 1695 } 1696 case ISD::ANY_EXTEND: { 1697 unsigned OperandBitWidth = 1698 Op.getOperand(0).getValueType().getScalarType().getSizeInBits(); 1699 APInt InMask = NewMask.trunc(OperandBitWidth); 1700 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1701 KnownZero, KnownOne, TLO, Depth+1)) 1702 return true; 1703 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1704 KnownZero = KnownZero.zext(BitWidth); 1705 KnownOne = KnownOne.zext(BitWidth); 1706 break; 1707 } 1708 case ISD::TRUNCATE: { 1709 // Simplify the input, using demanded bit information, and compute the known 1710 // zero/one bits live out. 1711 unsigned OperandBitWidth = 1712 Op.getOperand(0).getValueType().getScalarType().getSizeInBits(); 1713 APInt TruncMask = NewMask.zext(OperandBitWidth); 1714 if (SimplifyDemandedBits(Op.getOperand(0), TruncMask, 1715 KnownZero, KnownOne, TLO, Depth+1)) 1716 return true; 1717 KnownZero = KnownZero.trunc(BitWidth); 1718 KnownOne = KnownOne.trunc(BitWidth); 1719 1720 // If the input is only used by this truncate, see if we can shrink it based 1721 // on the known demanded bits. 1722 if (Op.getOperand(0).getNode()->hasOneUse()) { 1723 SDValue In = Op.getOperand(0); 1724 switch (In.getOpcode()) { 1725 default: break; 1726 case ISD::SRL: 1727 // Shrink SRL by a constant if none of the high bits shifted in are 1728 // demanded. 1729 if (TLO.LegalTypes() && 1730 !isTypeDesirableForOp(ISD::SRL, Op.getValueType())) 1731 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is 1732 // undesirable. 1733 break; 1734 ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1)); 1735 if (!ShAmt) 1736 break; 1737 SDValue Shift = In.getOperand(1); 1738 if (TLO.LegalTypes()) { 1739 uint64_t ShVal = ShAmt->getZExtValue(); 1740 Shift = 1741 TLO.DAG.getConstant(ShVal, getShiftAmountTy(Op.getValueType())); 1742 } 1743 1744 APInt HighBits = APInt::getHighBitsSet(OperandBitWidth, 1745 OperandBitWidth - BitWidth); 1746 HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth); 1747 1748 if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) { 1749 // None of the shifted in bits are needed. Add a truncate of the 1750 // shift input, then shift it. 1751 SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl, 1752 Op.getValueType(), 1753 In.getOperand(0)); 1754 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, 1755 Op.getValueType(), 1756 NewTrunc, 1757 Shift)); 1758 } 1759 break; 1760 } 1761 } 1762 1763 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1764 break; 1765 } 1766 case ISD::AssertZext: { 1767 // Demand all the bits of the input that are demanded in the output. 1768 // The low bits are obvious; the high bits are demanded because we're 1769 // asserting that they're zero here. 1770 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, 1771 KnownZero, KnownOne, TLO, Depth+1)) 1772 return true; 1773 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1774 1775 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1776 APInt InMask = APInt::getLowBitsSet(BitWidth, 1777 VT.getSizeInBits()); 1778 KnownZero |= ~InMask & NewMask; 1779 break; 1780 } 1781 case ISD::BITCAST: 1782 // If this is an FP->Int bitcast and if the sign bit is the only 1783 // thing demanded, turn this into a FGETSIGN. 1784 if (!Op.getOperand(0).getValueType().isVector() && 1785 NewMask == APInt::getSignBit(Op.getValueType().getSizeInBits()) && 1786 Op.getOperand(0).getValueType().isFloatingPoint()) { 1787 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, Op.getValueType()); 1788 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); 1789 if ((OpVTLegal || i32Legal) && Op.getValueType().isSimple()) { 1790 EVT Ty = OpVTLegal ? Op.getValueType() : MVT::i32; 1791 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 1792 // place. We expect the SHL to be eliminated by other optimizations. 1793 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Op.getOperand(0)); 1794 unsigned OpVTSizeInBits = Op.getValueType().getSizeInBits(); 1795 if (!OpVTLegal && OpVTSizeInBits > 32) 1796 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), Sign); 1797 unsigned ShVal = Op.getValueType().getSizeInBits()-1; 1798 SDValue ShAmt = TLO.DAG.getConstant(ShVal, Op.getValueType()); 1799 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, 1800 Op.getValueType(), 1801 Sign, ShAmt)); 1802 } 1803 } 1804 break; 1805 case ISD::ADD: 1806 case ISD::MUL: 1807 case ISD::SUB: { 1808 // Add, Sub, and Mul don't demand any bits in positions beyond that 1809 // of the highest bit demanded of them. 1810 APInt LoMask = APInt::getLowBitsSet(BitWidth, 1811 BitWidth - NewMask.countLeadingZeros()); 1812 if (SimplifyDemandedBits(Op.getOperand(0), LoMask, KnownZero2, 1813 KnownOne2, TLO, Depth+1)) 1814 return true; 1815 if (SimplifyDemandedBits(Op.getOperand(1), LoMask, KnownZero2, 1816 KnownOne2, TLO, Depth+1)) 1817 return true; 1818 // See if the operation should be performed at a smaller bit width. 1819 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 1820 return true; 1821 } 1822 // FALL THROUGH 1823 default: 1824 // Just use ComputeMaskedBits to compute output bits. 1825 TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth); 1826 break; 1827 } 1828 1829 // If we know the value of all of the demanded bits, return this as a 1830 // constant. 1831 if ((NewMask & (KnownZero|KnownOne)) == NewMask) 1832 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType())); 1833 1834 return false; 1835 } 1836 1837 /// computeMaskedBitsForTargetNode - Determine which of the bits specified 1838 /// in Mask are known to be either zero or one and return them in the 1839 /// KnownZero/KnownOne bitsets. 1840 void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 1841 const APInt &Mask, 1842 APInt &KnownZero, 1843 APInt &KnownOne, 1844 const SelectionDAG &DAG, 1845 unsigned Depth) const { 1846 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1847 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1848 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1849 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1850 "Should use MaskedValueIsZero if you don't know whether Op" 1851 " is a target node!"); 1852 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 1853 } 1854 1855 /// ComputeNumSignBitsForTargetNode - This method can be implemented by 1856 /// targets that want to expose additional information about sign bits to the 1857 /// DAG Combiner. 1858 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 1859 unsigned Depth) const { 1860 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1861 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1862 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1863 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1864 "Should use ComputeNumSignBits if you don't know whether Op" 1865 " is a target node!"); 1866 return 1; 1867 } 1868 1869 /// ValueHasExactlyOneBitSet - Test if the given value is known to have exactly 1870 /// one bit set. This differs from ComputeMaskedBits in that it doesn't need to 1871 /// determine which bit is set. 1872 /// 1873 static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) { 1874 // A left-shift of a constant one will have exactly one bit set, because 1875 // shifting the bit off the end is undefined. 1876 if (Val.getOpcode() == ISD::SHL) 1877 if (ConstantSDNode *C = 1878 dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0))) 1879 if (C->getAPIntValue() == 1) 1880 return true; 1881 1882 // Similarly, a right-shift of a constant sign-bit will have exactly 1883 // one bit set. 1884 if (Val.getOpcode() == ISD::SRL) 1885 if (ConstantSDNode *C = 1886 dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0))) 1887 if (C->getAPIntValue().isSignBit()) 1888 return true; 1889 1890 // More could be done here, though the above checks are enough 1891 // to handle some common cases. 1892 1893 // Fall back to ComputeMaskedBits to catch other known cases. 1894 EVT OpVT = Val.getValueType(); 1895 unsigned BitWidth = OpVT.getScalarType().getSizeInBits(); 1896 APInt Mask = APInt::getAllOnesValue(BitWidth); 1897 APInt KnownZero, KnownOne; 1898 DAG.ComputeMaskedBits(Val, Mask, KnownZero, KnownOne); 1899 return (KnownZero.countPopulation() == BitWidth - 1) && 1900 (KnownOne.countPopulation() == 1); 1901 } 1902 1903 /// SimplifySetCC - Try to simplify a setcc built with the specified operands 1904 /// and cc. If it is unable to simplify it, return a null SDValue. 1905 SDValue 1906 TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 1907 ISD::CondCode Cond, bool foldBooleans, 1908 DAGCombinerInfo &DCI, DebugLoc dl) const { 1909 SelectionDAG &DAG = DCI.DAG; 1910 1911 // These setcc operations always fold. 1912 switch (Cond) { 1913 default: break; 1914 case ISD::SETFALSE: 1915 case ISD::SETFALSE2: return DAG.getConstant(0, VT); 1916 case ISD::SETTRUE: 1917 case ISD::SETTRUE2: return DAG.getConstant(1, VT); 1918 } 1919 1920 // Ensure that the constant occurs on the RHS, and fold constant 1921 // comparisons. 1922 if (isa<ConstantSDNode>(N0.getNode())) 1923 return DAG.getSetCC(dl, VT, N1, N0, ISD::getSetCCSwappedOperands(Cond)); 1924 1925 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 1926 const APInt &C1 = N1C->getAPIntValue(); 1927 1928 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 1929 // equality comparison, then we're just comparing whether X itself is 1930 // zero. 1931 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) && 1932 N0.getOperand(0).getOpcode() == ISD::CTLZ && 1933 N0.getOperand(1).getOpcode() == ISD::Constant) { 1934 const APInt &ShAmt 1935 = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 1936 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1937 ShAmt == Log2_32(N0.getValueType().getSizeInBits())) { 1938 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 1939 // (srl (ctlz x), 5) == 0 -> X != 0 1940 // (srl (ctlz x), 5) != 1 -> X != 0 1941 Cond = ISD::SETNE; 1942 } else { 1943 // (srl (ctlz x), 5) != 0 -> X == 0 1944 // (srl (ctlz x), 5) == 1 -> X == 0 1945 Cond = ISD::SETEQ; 1946 } 1947 SDValue Zero = DAG.getConstant(0, N0.getValueType()); 1948 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), 1949 Zero, Cond); 1950 } 1951 } 1952 1953 SDValue CTPOP = N0; 1954 // Look through truncs that don't change the value of a ctpop. 1955 if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE) 1956 CTPOP = N0.getOperand(0); 1957 1958 if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP && 1959 (N0 == CTPOP || N0.getValueType().getSizeInBits() > 1960 Log2_32_Ceil(CTPOP.getValueType().getSizeInBits()))) { 1961 EVT CTVT = CTPOP.getValueType(); 1962 SDValue CTOp = CTPOP.getOperand(0); 1963 1964 // (ctpop x) u< 2 -> (x & x-1) == 0 1965 // (ctpop x) u> 1 -> (x & x-1) != 0 1966 if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){ 1967 SDValue Sub = DAG.getNode(ISD::SUB, dl, CTVT, CTOp, 1968 DAG.getConstant(1, CTVT)); 1969 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Sub); 1970 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE; 1971 return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, CTVT), CC); 1972 } 1973 1974 // TODO: (ctpop x) == 1 -> x && (x & x-1) == 0 iff ctpop is illegal. 1975 } 1976 1977 // (zext x) == C --> x == (trunc C) 1978 if (DCI.isBeforeLegalize() && N0->hasOneUse() && 1979 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1980 unsigned MinBits = N0.getValueSizeInBits(); 1981 SDValue PreZExt; 1982 if (N0->getOpcode() == ISD::ZERO_EXTEND) { 1983 // ZExt 1984 MinBits = N0->getOperand(0).getValueSizeInBits(); 1985 PreZExt = N0->getOperand(0); 1986 } else if (N0->getOpcode() == ISD::AND) { 1987 // DAGCombine turns costly ZExts into ANDs 1988 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) 1989 if ((C->getAPIntValue()+1).isPowerOf2()) { 1990 MinBits = C->getAPIntValue().countTrailingOnes(); 1991 PreZExt = N0->getOperand(0); 1992 } 1993 } else if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(N0)) { 1994 // ZEXTLOAD 1995 if (LN0->getExtensionType() == ISD::ZEXTLOAD) { 1996 MinBits = LN0->getMemoryVT().getSizeInBits(); 1997 PreZExt = N0; 1998 } 1999 } 2000 2001 // Make sure we're not loosing bits from the constant. 2002 if (MinBits < C1.getBitWidth() && MinBits > C1.getActiveBits()) { 2003 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits); 2004 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) { 2005 // Will get folded away. 2006 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreZExt); 2007 SDValue C = DAG.getConstant(C1.trunc(MinBits), MinVT); 2008 return DAG.getSetCC(dl, VT, Trunc, C, Cond); 2009 } 2010 } 2011 } 2012 2013 // If the LHS is '(and load, const)', the RHS is 0, 2014 // the test is for equality or unsigned, and all 1 bits of the const are 2015 // in the same partial word, see if we can shorten the load. 2016 if (DCI.isBeforeLegalize() && 2017 N0.getOpcode() == ISD::AND && C1 == 0 && 2018 N0.getNode()->hasOneUse() && 2019 isa<LoadSDNode>(N0.getOperand(0)) && 2020 N0.getOperand(0).getNode()->hasOneUse() && 2021 isa<ConstantSDNode>(N0.getOperand(1))) { 2022 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 2023 APInt bestMask; 2024 unsigned bestWidth = 0, bestOffset = 0; 2025 if (!Lod->isVolatile() && Lod->isUnindexed()) { 2026 unsigned origWidth = N0.getValueType().getSizeInBits(); 2027 unsigned maskWidth = origWidth; 2028 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 2029 // 8 bits, but have to be careful... 2030 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 2031 origWidth = Lod->getMemoryVT().getSizeInBits(); 2032 const APInt &Mask = 2033 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 2034 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 2035 APInt newMask = APInt::getLowBitsSet(maskWidth, width); 2036 for (unsigned offset=0; offset<origWidth/width; offset++) { 2037 if ((newMask & Mask) == Mask) { 2038 if (!TD->isLittleEndian()) 2039 bestOffset = (origWidth/width - offset - 1) * (width/8); 2040 else 2041 bestOffset = (uint64_t)offset * (width/8); 2042 bestMask = Mask.lshr(offset * (width/8) * 8); 2043 bestWidth = width; 2044 break; 2045 } 2046 newMask = newMask << width; 2047 } 2048 } 2049 } 2050 if (bestWidth) { 2051 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth); 2052 if (newVT.isRound()) { 2053 EVT PtrType = Lod->getOperand(1).getValueType(); 2054 SDValue Ptr = Lod->getBasePtr(); 2055 if (bestOffset != 0) 2056 Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(), 2057 DAG.getConstant(bestOffset, PtrType)); 2058 unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset); 2059 SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr, 2060 Lod->getPointerInfo().getWithOffset(bestOffset), 2061 false, false, NewAlign); 2062 return DAG.getSetCC(dl, VT, 2063 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 2064 DAG.getConstant(bestMask.trunc(bestWidth), 2065 newVT)), 2066 DAG.getConstant(0LL, newVT), Cond); 2067 } 2068 } 2069 } 2070 2071 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 2072 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 2073 unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits(); 2074 2075 // If the comparison constant has bits in the upper part, the 2076 // zero-extended value could never match. 2077 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 2078 C1.getBitWidth() - InSize))) { 2079 switch (Cond) { 2080 case ISD::SETUGT: 2081 case ISD::SETUGE: 2082 case ISD::SETEQ: return DAG.getConstant(0, VT); 2083 case ISD::SETULT: 2084 case ISD::SETULE: 2085 case ISD::SETNE: return DAG.getConstant(1, VT); 2086 case ISD::SETGT: 2087 case ISD::SETGE: 2088 // True if the sign bit of C1 is set. 2089 return DAG.getConstant(C1.isNegative(), VT); 2090 case ISD::SETLT: 2091 case ISD::SETLE: 2092 // True if the sign bit of C1 isn't set. 2093 return DAG.getConstant(C1.isNonNegative(), VT); 2094 default: 2095 break; 2096 } 2097 } 2098 2099 // Otherwise, we can perform the comparison with the low bits. 2100 switch (Cond) { 2101 case ISD::SETEQ: 2102 case ISD::SETNE: 2103 case ISD::SETUGT: 2104 case ISD::SETUGE: 2105 case ISD::SETULT: 2106 case ISD::SETULE: { 2107 EVT newVT = N0.getOperand(0).getValueType(); 2108 if (DCI.isBeforeLegalizeOps() || 2109 (isOperationLegal(ISD::SETCC, newVT) && 2110 getCondCodeAction(Cond, newVT)==Legal)) 2111 return DAG.getSetCC(dl, VT, N0.getOperand(0), 2112 DAG.getConstant(C1.trunc(InSize), newVT), 2113 Cond); 2114 break; 2115 } 2116 default: 2117 break; // todo, be more careful with signed comparisons 2118 } 2119 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 2120 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 2121 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 2122 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 2123 EVT ExtDstTy = N0.getValueType(); 2124 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 2125 2126 // If the constant doesn't fit into the number of bits for the source of 2127 // the sign extension, it is impossible for both sides to be equal. 2128 if (C1.getMinSignedBits() > ExtSrcTyBits) 2129 return DAG.getConstant(Cond == ISD::SETNE, VT); 2130 2131 SDValue ZextOp; 2132 EVT Op0Ty = N0.getOperand(0).getValueType(); 2133 if (Op0Ty == ExtSrcTy) { 2134 ZextOp = N0.getOperand(0); 2135 } else { 2136 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 2137 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0), 2138 DAG.getConstant(Imm, Op0Ty)); 2139 } 2140 if (!DCI.isCalledByLegalizer()) 2141 DCI.AddToWorklist(ZextOp.getNode()); 2142 // Otherwise, make this a use of a zext. 2143 return DAG.getSetCC(dl, VT, ZextOp, 2144 DAG.getConstant(C1 & APInt::getLowBitsSet( 2145 ExtDstTyBits, 2146 ExtSrcTyBits), 2147 ExtDstTy), 2148 Cond); 2149 } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) && 2150 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 2151 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 2152 if (N0.getOpcode() == ISD::SETCC && 2153 isTypeLegal(VT) && VT.bitsLE(N0.getValueType())) { 2154 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getAPIntValue() != 1); 2155 if (TrueWhenTrue) 2156 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); 2157 // Invert the condition. 2158 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 2159 CC = ISD::getSetCCInverse(CC, 2160 N0.getOperand(0).getValueType().isInteger()); 2161 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 2162 } 2163 2164 if ((N0.getOpcode() == ISD::XOR || 2165 (N0.getOpcode() == ISD::AND && 2166 N0.getOperand(0).getOpcode() == ISD::XOR && 2167 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 2168 isa<ConstantSDNode>(N0.getOperand(1)) && 2169 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) { 2170 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 2171 // can only do this if the top bits are known zero. 2172 unsigned BitWidth = N0.getValueSizeInBits(); 2173 if (DAG.MaskedValueIsZero(N0, 2174 APInt::getHighBitsSet(BitWidth, 2175 BitWidth-1))) { 2176 // Okay, get the un-inverted input value. 2177 SDValue Val; 2178 if (N0.getOpcode() == ISD::XOR) 2179 Val = N0.getOperand(0); 2180 else { 2181 assert(N0.getOpcode() == ISD::AND && 2182 N0.getOperand(0).getOpcode() == ISD::XOR); 2183 // ((X^1)&1)^1 -> X & 1 2184 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 2185 N0.getOperand(0).getOperand(0), 2186 N0.getOperand(1)); 2187 } 2188 2189 return DAG.getSetCC(dl, VT, Val, N1, 2190 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 2191 } 2192 } else if (N1C->getAPIntValue() == 1 && 2193 (VT == MVT::i1 || 2194 getBooleanContents() == ZeroOrOneBooleanContent)) { 2195 SDValue Op0 = N0; 2196 if (Op0.getOpcode() == ISD::TRUNCATE) 2197 Op0 = Op0.getOperand(0); 2198 2199 if ((Op0.getOpcode() == ISD::XOR) && 2200 Op0.getOperand(0).getOpcode() == ISD::SETCC && 2201 Op0.getOperand(1).getOpcode() == ISD::SETCC) { 2202 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc) 2203 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ; 2204 return DAG.getSetCC(dl, VT, Op0.getOperand(0), Op0.getOperand(1), 2205 Cond); 2206 } else if (Op0.getOpcode() == ISD::AND && 2207 isa<ConstantSDNode>(Op0.getOperand(1)) && 2208 cast<ConstantSDNode>(Op0.getOperand(1))->getAPIntValue() == 1) { 2209 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0. 2210 if (Op0.getValueType().bitsGT(VT)) 2211 Op0 = DAG.getNode(ISD::AND, dl, VT, 2212 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)), 2213 DAG.getConstant(1, VT)); 2214 else if (Op0.getValueType().bitsLT(VT)) 2215 Op0 = DAG.getNode(ISD::AND, dl, VT, 2216 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)), 2217 DAG.getConstant(1, VT)); 2218 2219 return DAG.getSetCC(dl, VT, Op0, 2220 DAG.getConstant(0, Op0.getValueType()), 2221 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 2222 } 2223 } 2224 } 2225 2226 APInt MinVal, MaxVal; 2227 unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits(); 2228 if (ISD::isSignedIntSetCC(Cond)) { 2229 MinVal = APInt::getSignedMinValue(OperandBitSize); 2230 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 2231 } else { 2232 MinVal = APInt::getMinValue(OperandBitSize); 2233 MaxVal = APInt::getMaxValue(OperandBitSize); 2234 } 2235 2236 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 2237 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 2238 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true 2239 // X >= C0 --> X > (C0-1) 2240 return DAG.getSetCC(dl, VT, N0, 2241 DAG.getConstant(C1-1, N1.getValueType()), 2242 (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT); 2243 } 2244 2245 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 2246 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true 2247 // X <= C0 --> X < (C0+1) 2248 return DAG.getSetCC(dl, VT, N0, 2249 DAG.getConstant(C1+1, N1.getValueType()), 2250 (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT); 2251 } 2252 2253 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal) 2254 return DAG.getConstant(0, VT); // X < MIN --> false 2255 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal) 2256 return DAG.getConstant(1, VT); // X >= MIN --> true 2257 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal) 2258 return DAG.getConstant(0, VT); // X > MAX --> false 2259 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal) 2260 return DAG.getConstant(1, VT); // X <= MAX --> true 2261 2262 // Canonicalize setgt X, Min --> setne X, Min 2263 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal) 2264 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 2265 // Canonicalize setlt X, Max --> setne X, Max 2266 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal) 2267 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 2268 2269 // If we have setult X, 1, turn it into seteq X, 0 2270 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1) 2271 return DAG.getSetCC(dl, VT, N0, 2272 DAG.getConstant(MinVal, N0.getValueType()), 2273 ISD::SETEQ); 2274 // If we have setugt X, Max-1, turn it into seteq X, Max 2275 else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1) 2276 return DAG.getSetCC(dl, VT, N0, 2277 DAG.getConstant(MaxVal, N0.getValueType()), 2278 ISD::SETEQ); 2279 2280 // If we have "setcc X, C0", check to see if we can shrink the immediate 2281 // by changing cc. 2282 2283 // SETUGT X, SINTMAX -> SETLT X, 0 2284 if (Cond == ISD::SETUGT && 2285 C1 == APInt::getSignedMaxValue(OperandBitSize)) 2286 return DAG.getSetCC(dl, VT, N0, 2287 DAG.getConstant(0, N1.getValueType()), 2288 ISD::SETLT); 2289 2290 // SETULT X, SINTMIN -> SETGT X, -1 2291 if (Cond == ISD::SETULT && 2292 C1 == APInt::getSignedMinValue(OperandBitSize)) { 2293 SDValue ConstMinusOne = 2294 DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), 2295 N1.getValueType()); 2296 return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT); 2297 } 2298 2299 // Fold bit comparisons when we can. 2300 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 2301 (VT == N0.getValueType() || 2302 (isTypeLegal(VT) && VT.bitsLE(N0.getValueType()))) && 2303 N0.getOpcode() == ISD::AND) 2304 if (ConstantSDNode *AndRHS = 2305 dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 2306 EVT ShiftTy = DCI.isBeforeLegalize() ? 2307 getPointerTy() : getShiftAmountTy(N0.getValueType()); 2308 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 2309 // Perform the xform if the AND RHS is a single bit. 2310 if (AndRHS->getAPIntValue().isPowerOf2()) { 2311 return DAG.getNode(ISD::TRUNCATE, dl, VT, 2312 DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0, 2313 DAG.getConstant(AndRHS->getAPIntValue().logBase2(), ShiftTy))); 2314 } 2315 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) { 2316 // (X & 8) == 8 --> (X & 8) >> 3 2317 // Perform the xform if C1 is a single bit. 2318 if (C1.isPowerOf2()) { 2319 return DAG.getNode(ISD::TRUNCATE, dl, VT, 2320 DAG.getNode(ISD::SRL, dl, N0.getValueType(), N0, 2321 DAG.getConstant(C1.logBase2(), ShiftTy))); 2322 } 2323 } 2324 } 2325 } 2326 2327 if (isa<ConstantFPSDNode>(N0.getNode())) { 2328 // Constant fold or commute setcc. 2329 SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl); 2330 if (O.getNode()) return O; 2331 } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) { 2332 // If the RHS of an FP comparison is a constant, simplify it away in 2333 // some cases. 2334 if (CFP->getValueAPF().isNaN()) { 2335 // If an operand is known to be a nan, we can fold it. 2336 switch (ISD::getUnorderedFlavor(Cond)) { 2337 default: llvm_unreachable("Unknown flavor!"); 2338 case 0: // Known false. 2339 return DAG.getConstant(0, VT); 2340 case 1: // Known true. 2341 return DAG.getConstant(1, VT); 2342 case 2: // Undefined. 2343 return DAG.getUNDEF(VT); 2344 } 2345 } 2346 2347 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 2348 // constant if knowing that the operand is non-nan is enough. We prefer to 2349 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 2350 // materialize 0.0. 2351 if (Cond == ISD::SETO || Cond == ISD::SETUO) 2352 return DAG.getSetCC(dl, VT, N0, N0, Cond); 2353 2354 // If the condition is not legal, see if we can find an equivalent one 2355 // which is legal. 2356 if (!isCondCodeLegal(Cond, N0.getValueType())) { 2357 // If the comparison was an awkward floating-point == or != and one of 2358 // the comparison operands is infinity or negative infinity, convert the 2359 // condition to a less-awkward <= or >=. 2360 if (CFP->getValueAPF().isInfinity()) { 2361 if (CFP->getValueAPF().isNegative()) { 2362 if (Cond == ISD::SETOEQ && 2363 isCondCodeLegal(ISD::SETOLE, N0.getValueType())) 2364 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE); 2365 if (Cond == ISD::SETUEQ && 2366 isCondCodeLegal(ISD::SETOLE, N0.getValueType())) 2367 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE); 2368 if (Cond == ISD::SETUNE && 2369 isCondCodeLegal(ISD::SETUGT, N0.getValueType())) 2370 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT); 2371 if (Cond == ISD::SETONE && 2372 isCondCodeLegal(ISD::SETUGT, N0.getValueType())) 2373 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT); 2374 } else { 2375 if (Cond == ISD::SETOEQ && 2376 isCondCodeLegal(ISD::SETOGE, N0.getValueType())) 2377 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE); 2378 if (Cond == ISD::SETUEQ && 2379 isCondCodeLegal(ISD::SETOGE, N0.getValueType())) 2380 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE); 2381 if (Cond == ISD::SETUNE && 2382 isCondCodeLegal(ISD::SETULT, N0.getValueType())) 2383 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT); 2384 if (Cond == ISD::SETONE && 2385 isCondCodeLegal(ISD::SETULT, N0.getValueType())) 2386 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT); 2387 } 2388 } 2389 } 2390 } 2391 2392 if (N0 == N1) { 2393 // We can always fold X == X for integer setcc's. 2394 if (N0.getValueType().isInteger()) 2395 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 2396 unsigned UOF = ISD::getUnorderedFlavor(Cond); 2397 if (UOF == 2) // FP operators that are undefined on NaNs. 2398 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 2399 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond))) 2400 return DAG.getConstant(UOF, VT); 2401 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 2402 // if it is not already. 2403 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 2404 if (NewCond != Cond) 2405 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 2406 } 2407 2408 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 2409 N0.getValueType().isInteger()) { 2410 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 2411 N0.getOpcode() == ISD::XOR) { 2412 // Simplify (X+Y) == (X+Z) --> Y == Z 2413 if (N0.getOpcode() == N1.getOpcode()) { 2414 if (N0.getOperand(0) == N1.getOperand(0)) 2415 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 2416 if (N0.getOperand(1) == N1.getOperand(1)) 2417 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 2418 if (DAG.isCommutativeBinOp(N0.getOpcode())) { 2419 // If X op Y == Y op X, try other combinations. 2420 if (N0.getOperand(0) == N1.getOperand(1)) 2421 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 2422 Cond); 2423 if (N0.getOperand(1) == N1.getOperand(0)) 2424 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 2425 Cond); 2426 } 2427 } 2428 2429 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) { 2430 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 2431 // Turn (X+C1) == C2 --> X == C2-C1 2432 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) { 2433 return DAG.getSetCC(dl, VT, N0.getOperand(0), 2434 DAG.getConstant(RHSC->getAPIntValue()- 2435 LHSR->getAPIntValue(), 2436 N0.getValueType()), Cond); 2437 } 2438 2439 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 2440 if (N0.getOpcode() == ISD::XOR) 2441 // If we know that all of the inverted bits are zero, don't bother 2442 // performing the inversion. 2443 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 2444 return 2445 DAG.getSetCC(dl, VT, N0.getOperand(0), 2446 DAG.getConstant(LHSR->getAPIntValue() ^ 2447 RHSC->getAPIntValue(), 2448 N0.getValueType()), 2449 Cond); 2450 } 2451 2452 // Turn (C1-X) == C2 --> X == C1-C2 2453 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 2454 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) { 2455 return 2456 DAG.getSetCC(dl, VT, N0.getOperand(1), 2457 DAG.getConstant(SUBC->getAPIntValue() - 2458 RHSC->getAPIntValue(), 2459 N0.getValueType()), 2460 Cond); 2461 } 2462 } 2463 } 2464 2465 // Simplify (X+Z) == X --> Z == 0 2466 if (N0.getOperand(0) == N1) 2467 return DAG.getSetCC(dl, VT, N0.getOperand(1), 2468 DAG.getConstant(0, N0.getValueType()), Cond); 2469 if (N0.getOperand(1) == N1) { 2470 if (DAG.isCommutativeBinOp(N0.getOpcode())) 2471 return DAG.getSetCC(dl, VT, N0.getOperand(0), 2472 DAG.getConstant(0, N0.getValueType()), Cond); 2473 else if (N0.getNode()->hasOneUse()) { 2474 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!"); 2475 // (Z-X) == X --> Z == X<<1 2476 SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), 2477 N1, 2478 DAG.getConstant(1, getShiftAmountTy(N1.getValueType()))); 2479 if (!DCI.isCalledByLegalizer()) 2480 DCI.AddToWorklist(SH.getNode()); 2481 return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond); 2482 } 2483 } 2484 } 2485 2486 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 2487 N1.getOpcode() == ISD::XOR) { 2488 // Simplify X == (X+Z) --> Z == 0 2489 if (N1.getOperand(0) == N0) { 2490 return DAG.getSetCC(dl, VT, N1.getOperand(1), 2491 DAG.getConstant(0, N1.getValueType()), Cond); 2492 } else if (N1.getOperand(1) == N0) { 2493 if (DAG.isCommutativeBinOp(N1.getOpcode())) { 2494 return DAG.getSetCC(dl, VT, N1.getOperand(0), 2495 DAG.getConstant(0, N1.getValueType()), Cond); 2496 } else if (N1.getNode()->hasOneUse()) { 2497 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!"); 2498 // X == (Z-X) --> X<<1 == Z 2499 SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N0, 2500 DAG.getConstant(1, getShiftAmountTy(N0.getValueType()))); 2501 if (!DCI.isCalledByLegalizer()) 2502 DCI.AddToWorklist(SH.getNode()); 2503 return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond); 2504 } 2505 } 2506 } 2507 2508 // Simplify x&y == y to x&y != 0 if y has exactly one bit set. 2509 // Note that where y is variable and is known to have at most 2510 // one bit set (for example, if it is z&1) we cannot do this; 2511 // the expressions are not equivalent when y==0. 2512 if (N0.getOpcode() == ISD::AND) 2513 if (N0.getOperand(0) == N1 || N0.getOperand(1) == N1) { 2514 if (ValueHasExactlyOneBitSet(N1, DAG)) { 2515 Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true); 2516 SDValue Zero = DAG.getConstant(0, N1.getValueType()); 2517 return DAG.getSetCC(dl, VT, N0, Zero, Cond); 2518 } 2519 } 2520 if (N1.getOpcode() == ISD::AND) 2521 if (N1.getOperand(0) == N0 || N1.getOperand(1) == N0) { 2522 if (ValueHasExactlyOneBitSet(N0, DAG)) { 2523 Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true); 2524 SDValue Zero = DAG.getConstant(0, N0.getValueType()); 2525 return DAG.getSetCC(dl, VT, N1, Zero, Cond); 2526 } 2527 } 2528 } 2529 2530 // Fold away ALL boolean setcc's. 2531 SDValue Temp; 2532 if (N0.getValueType() == MVT::i1 && foldBooleans) { 2533 switch (Cond) { 2534 default: llvm_unreachable("Unknown integer setcc!"); 2535 case ISD::SETEQ: // X == Y -> ~(X^Y) 2536 Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1); 2537 N0 = DAG.getNOT(dl, Temp, MVT::i1); 2538 if (!DCI.isCalledByLegalizer()) 2539 DCI.AddToWorklist(Temp.getNode()); 2540 break; 2541 case ISD::SETNE: // X != Y --> (X^Y) 2542 N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1); 2543 break; 2544 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 2545 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 2546 Temp = DAG.getNOT(dl, N0, MVT::i1); 2547 N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp); 2548 if (!DCI.isCalledByLegalizer()) 2549 DCI.AddToWorklist(Temp.getNode()); 2550 break; 2551 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 2552 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 2553 Temp = DAG.getNOT(dl, N1, MVT::i1); 2554 N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp); 2555 if (!DCI.isCalledByLegalizer()) 2556 DCI.AddToWorklist(Temp.getNode()); 2557 break; 2558 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 2559 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 2560 Temp = DAG.getNOT(dl, N0, MVT::i1); 2561 N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp); 2562 if (!DCI.isCalledByLegalizer()) 2563 DCI.AddToWorklist(Temp.getNode()); 2564 break; 2565 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 2566 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 2567 Temp = DAG.getNOT(dl, N1, MVT::i1); 2568 N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp); 2569 break; 2570 } 2571 if (VT != MVT::i1) { 2572 if (!DCI.isCalledByLegalizer()) 2573 DCI.AddToWorklist(N0.getNode()); 2574 // FIXME: If running after legalize, we probably can't do this. 2575 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0); 2576 } 2577 return N0; 2578 } 2579 2580 // Could not fold it. 2581 return SDValue(); 2582 } 2583 2584 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 2585 /// node is a GlobalAddress + offset. 2586 bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA, 2587 int64_t &Offset) const { 2588 if (isa<GlobalAddressSDNode>(N)) { 2589 GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N); 2590 GA = GASD->getGlobal(); 2591 Offset += GASD->getOffset(); 2592 return true; 2593 } 2594 2595 if (N->getOpcode() == ISD::ADD) { 2596 SDValue N1 = N->getOperand(0); 2597 SDValue N2 = N->getOperand(1); 2598 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 2599 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 2600 if (V) { 2601 Offset += V->getSExtValue(); 2602 return true; 2603 } 2604 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 2605 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 2606 if (V) { 2607 Offset += V->getSExtValue(); 2608 return true; 2609 } 2610 } 2611 } 2612 2613 return false; 2614 } 2615 2616 2617 SDValue TargetLowering:: 2618 PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { 2619 // Default implementation: no optimization. 2620 return SDValue(); 2621 } 2622 2623 //===----------------------------------------------------------------------===// 2624 // Inline Assembler Implementation Methods 2625 //===----------------------------------------------------------------------===// 2626 2627 2628 TargetLowering::ConstraintType 2629 TargetLowering::getConstraintType(const std::string &Constraint) const { 2630 if (Constraint.size() == 1) { 2631 switch (Constraint[0]) { 2632 default: break; 2633 case 'r': return C_RegisterClass; 2634 case 'm': // memory 2635 case 'o': // offsetable 2636 case 'V': // not offsetable 2637 return C_Memory; 2638 case 'i': // Simple Integer or Relocatable Constant 2639 case 'n': // Simple Integer 2640 case 'E': // Floating Point Constant 2641 case 'F': // Floating Point Constant 2642 case 's': // Relocatable Constant 2643 case 'p': // Address. 2644 case 'X': // Allow ANY value. 2645 case 'I': // Target registers. 2646 case 'J': 2647 case 'K': 2648 case 'L': 2649 case 'M': 2650 case 'N': 2651 case 'O': 2652 case 'P': 2653 case '<': 2654 case '>': 2655 return C_Other; 2656 } 2657 } 2658 2659 if (Constraint.size() > 1 && Constraint[0] == '{' && 2660 Constraint[Constraint.size()-1] == '}') 2661 return C_Register; 2662 return C_Unknown; 2663 } 2664 2665 /// LowerXConstraint - try to replace an X constraint, which matches anything, 2666 /// with another that has more specific requirements based on the type of the 2667 /// corresponding operand. 2668 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{ 2669 if (ConstraintVT.isInteger()) 2670 return "r"; 2671 if (ConstraintVT.isFloatingPoint()) 2672 return "f"; // works for many targets 2673 return 0; 2674 } 2675 2676 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 2677 /// vector. If it is invalid, don't add anything to Ops. 2678 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 2679 std::string &Constraint, 2680 std::vector<SDValue> &Ops, 2681 SelectionDAG &DAG) const { 2682 2683 if (Constraint.length() > 1) return; 2684 2685 char ConstraintLetter = Constraint[0]; 2686 switch (ConstraintLetter) { 2687 default: break; 2688 case 'X': // Allows any operand; labels (basic block) use this. 2689 if (Op.getOpcode() == ISD::BasicBlock) { 2690 Ops.push_back(Op); 2691 return; 2692 } 2693 // fall through 2694 case 'i': // Simple Integer or Relocatable Constant 2695 case 'n': // Simple Integer 2696 case 's': { // Relocatable Constant 2697 // These operands are interested in values of the form (GV+C), where C may 2698 // be folded in as an offset of GV, or it may be explicitly added. Also, it 2699 // is possible and fine if either GV or C are missing. 2700 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2701 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 2702 2703 // If we have "(add GV, C)", pull out GV/C 2704 if (Op.getOpcode() == ISD::ADD) { 2705 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2706 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 2707 if (C == 0 || GA == 0) { 2708 C = dyn_cast<ConstantSDNode>(Op.getOperand(0)); 2709 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1)); 2710 } 2711 if (C == 0 || GA == 0) 2712 C = 0, GA = 0; 2713 } 2714 2715 // If we find a valid operand, map to the TargetXXX version so that the 2716 // value itself doesn't get selected. 2717 if (GA) { // Either &GV or &GV+C 2718 if (ConstraintLetter != 'n') { 2719 int64_t Offs = GA->getOffset(); 2720 if (C) Offs += C->getZExtValue(); 2721 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), 2722 C ? C->getDebugLoc() : DebugLoc(), 2723 Op.getValueType(), Offs)); 2724 return; 2725 } 2726 } 2727 if (C) { // just C, no GV. 2728 // Simple constants are not allowed for 's'. 2729 if (ConstraintLetter != 's') { 2730 // gcc prints these as sign extended. Sign extend value to 64 bits 2731 // now; without this it would get ZExt'd later in 2732 // ScheduleDAGSDNodes::EmitNode, which is very generic. 2733 Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(), 2734 MVT::i64)); 2735 return; 2736 } 2737 } 2738 break; 2739 } 2740 } 2741 } 2742 2743 std::pair<unsigned, const TargetRegisterClass*> TargetLowering:: 2744 getRegForInlineAsmConstraint(const std::string &Constraint, 2745 EVT VT) const { 2746 if (Constraint[0] != '{') 2747 return std::make_pair(0u, static_cast<TargetRegisterClass*>(0)); 2748 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?"); 2749 2750 // Remove the braces from around the name. 2751 StringRef RegName(Constraint.data()+1, Constraint.size()-2); 2752 2753 // Figure out which register class contains this reg. 2754 const TargetRegisterInfo *RI = TM.getRegisterInfo(); 2755 for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(), 2756 E = RI->regclass_end(); RCI != E; ++RCI) { 2757 const TargetRegisterClass *RC = *RCI; 2758 2759 // If none of the value types for this register class are valid, we 2760 // can't use it. For example, 64-bit reg classes on 32-bit targets. 2761 bool isLegal = false; 2762 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 2763 I != E; ++I) { 2764 if (isTypeLegal(*I)) { 2765 isLegal = true; 2766 break; 2767 } 2768 } 2769 2770 if (!isLegal) continue; 2771 2772 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 2773 I != E; ++I) { 2774 if (RegName.equals_lower(RI->getName(*I))) 2775 return std::make_pair(*I, RC); 2776 } 2777 } 2778 2779 return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0)); 2780 } 2781 2782 //===----------------------------------------------------------------------===// 2783 // Constraint Selection. 2784 2785 /// isMatchingInputConstraint - Return true of this is an input operand that is 2786 /// a matching constraint like "4". 2787 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 2788 assert(!ConstraintCode.empty() && "No known constraint!"); 2789 return isdigit(ConstraintCode[0]); 2790 } 2791 2792 /// getMatchedOperand - If this is an input matching constraint, this method 2793 /// returns the output operand it matches. 2794 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 2795 assert(!ConstraintCode.empty() && "No known constraint!"); 2796 return atoi(ConstraintCode.c_str()); 2797 } 2798 2799 2800 /// ParseConstraints - Split up the constraint string from the inline 2801 /// assembly value into the specific constraints and their prefixes, 2802 /// and also tie in the associated operand values. 2803 /// If this returns an empty vector, and if the constraint string itself 2804 /// isn't empty, there was an error parsing. 2805 TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints( 2806 ImmutableCallSite CS) const { 2807 /// ConstraintOperands - Information about all of the constraints. 2808 AsmOperandInfoVector ConstraintOperands; 2809 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); 2810 unsigned maCount = 0; // Largest number of multiple alternative constraints. 2811 2812 // Do a prepass over the constraints, canonicalizing them, and building up the 2813 // ConstraintOperands list. 2814 InlineAsm::ConstraintInfoVector 2815 ConstraintInfos = IA->ParseConstraints(); 2816 2817 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 2818 unsigned ResNo = 0; // ResNo - The result number of the next output. 2819 2820 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) { 2821 ConstraintOperands.push_back(AsmOperandInfo(ConstraintInfos[i])); 2822 AsmOperandInfo &OpInfo = ConstraintOperands.back(); 2823 2824 // Update multiple alternative constraint count. 2825 if (OpInfo.multipleAlternatives.size() > maCount) 2826 maCount = OpInfo.multipleAlternatives.size(); 2827 2828 OpInfo.ConstraintVT = MVT::Other; 2829 2830 // Compute the value type for each operand. 2831 switch (OpInfo.Type) { 2832 case InlineAsm::isOutput: 2833 // Indirect outputs just consume an argument. 2834 if (OpInfo.isIndirect) { 2835 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++)); 2836 break; 2837 } 2838 2839 // The return value of the call is this value. As such, there is no 2840 // corresponding argument. 2841 assert(!CS.getType()->isVoidTy() && 2842 "Bad inline asm!"); 2843 if (StructType *STy = dyn_cast<StructType>(CS.getType())) { 2844 OpInfo.ConstraintVT = getValueType(STy->getElementType(ResNo)); 2845 } else { 2846 assert(ResNo == 0 && "Asm only has one result!"); 2847 OpInfo.ConstraintVT = getValueType(CS.getType()); 2848 } 2849 ++ResNo; 2850 break; 2851 case InlineAsm::isInput: 2852 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++)); 2853 break; 2854 case InlineAsm::isClobber: 2855 // Nothing to do. 2856 break; 2857 } 2858 2859 if (OpInfo.CallOperandVal) { 2860 llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); 2861 if (OpInfo.isIndirect) { 2862 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy); 2863 if (!PtrTy) 2864 report_fatal_error("Indirect operand for inline asm not a pointer!"); 2865 OpTy = PtrTy->getElementType(); 2866 } 2867 2868 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 2869 if (StructType *STy = dyn_cast<StructType>(OpTy)) 2870 if (STy->getNumElements() == 1) 2871 OpTy = STy->getElementType(0); 2872 2873 // If OpTy is not a single value, it may be a struct/union that we 2874 // can tile with integers. 2875 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 2876 unsigned BitSize = TD->getTypeSizeInBits(OpTy); 2877 switch (BitSize) { 2878 default: break; 2879 case 1: 2880 case 8: 2881 case 16: 2882 case 32: 2883 case 64: 2884 case 128: 2885 OpInfo.ConstraintVT = 2886 EVT::getEVT(IntegerType::get(OpTy->getContext(), BitSize), true); 2887 break; 2888 } 2889 } else if (dyn_cast<PointerType>(OpTy)) { 2890 OpInfo.ConstraintVT = MVT::getIntegerVT(8*TD->getPointerSize()); 2891 } else { 2892 OpInfo.ConstraintVT = EVT::getEVT(OpTy, true); 2893 } 2894 } 2895 } 2896 2897 // If we have multiple alternative constraints, select the best alternative. 2898 if (ConstraintInfos.size()) { 2899 if (maCount) { 2900 unsigned bestMAIndex = 0; 2901 int bestWeight = -1; 2902 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match. 2903 int weight = -1; 2904 unsigned maIndex; 2905 // Compute the sums of the weights for each alternative, keeping track 2906 // of the best (highest weight) one so far. 2907 for (maIndex = 0; maIndex < maCount; ++maIndex) { 2908 int weightSum = 0; 2909 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 2910 cIndex != eIndex; ++cIndex) { 2911 AsmOperandInfo& OpInfo = ConstraintOperands[cIndex]; 2912 if (OpInfo.Type == InlineAsm::isClobber) 2913 continue; 2914 2915 // If this is an output operand with a matching input operand, 2916 // look up the matching input. If their types mismatch, e.g. one 2917 // is an integer, the other is floating point, or their sizes are 2918 // different, flag it as an maCantMatch. 2919 if (OpInfo.hasMatchingInput()) { 2920 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 2921 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 2922 if ((OpInfo.ConstraintVT.isInteger() != 2923 Input.ConstraintVT.isInteger()) || 2924 (OpInfo.ConstraintVT.getSizeInBits() != 2925 Input.ConstraintVT.getSizeInBits())) { 2926 weightSum = -1; // Can't match. 2927 break; 2928 } 2929 } 2930 } 2931 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex); 2932 if (weight == -1) { 2933 weightSum = -1; 2934 break; 2935 } 2936 weightSum += weight; 2937 } 2938 // Update best. 2939 if (weightSum > bestWeight) { 2940 bestWeight = weightSum; 2941 bestMAIndex = maIndex; 2942 } 2943 } 2944 2945 // Now select chosen alternative in each constraint. 2946 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 2947 cIndex != eIndex; ++cIndex) { 2948 AsmOperandInfo& cInfo = ConstraintOperands[cIndex]; 2949 if (cInfo.Type == InlineAsm::isClobber) 2950 continue; 2951 cInfo.selectAlternative(bestMAIndex); 2952 } 2953 } 2954 } 2955 2956 // Check and hook up tied operands, choose constraint code to use. 2957 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 2958 cIndex != eIndex; ++cIndex) { 2959 AsmOperandInfo& OpInfo = ConstraintOperands[cIndex]; 2960 2961 // If this is an output operand with a matching input operand, look up the 2962 // matching input. If their types mismatch, e.g. one is an integer, the 2963 // other is floating point, or their sizes are different, flag it as an 2964 // error. 2965 if (OpInfo.hasMatchingInput()) { 2966 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 2967 2968 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 2969 std::pair<unsigned, const TargetRegisterClass*> MatchRC = 2970 getRegForInlineAsmConstraint(OpInfo.ConstraintCode, OpInfo.ConstraintVT); 2971 std::pair<unsigned, const TargetRegisterClass*> InputRC = 2972 getRegForInlineAsmConstraint(Input.ConstraintCode, Input.ConstraintVT); 2973 if ((OpInfo.ConstraintVT.isInteger() != 2974 Input.ConstraintVT.isInteger()) || 2975 (MatchRC.second != InputRC.second)) { 2976 report_fatal_error("Unsupported asm: input constraint" 2977 " with a matching output constraint of" 2978 " incompatible type!"); 2979 } 2980 } 2981 2982 } 2983 } 2984 2985 return ConstraintOperands; 2986 } 2987 2988 2989 /// getConstraintGenerality - Return an integer indicating how general CT 2990 /// is. 2991 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 2992 switch (CT) { 2993 default: llvm_unreachable("Unknown constraint type!"); 2994 case TargetLowering::C_Other: 2995 case TargetLowering::C_Unknown: 2996 return 0; 2997 case TargetLowering::C_Register: 2998 return 1; 2999 case TargetLowering::C_RegisterClass: 3000 return 2; 3001 case TargetLowering::C_Memory: 3002 return 3; 3003 } 3004 } 3005 3006 /// Examine constraint type and operand type and determine a weight value. 3007 /// This object must already have been set up with the operand type 3008 /// and the current alternative constraint selected. 3009 TargetLowering::ConstraintWeight 3010 TargetLowering::getMultipleConstraintMatchWeight( 3011 AsmOperandInfo &info, int maIndex) const { 3012 InlineAsm::ConstraintCodeVector *rCodes; 3013 if (maIndex >= (int)info.multipleAlternatives.size()) 3014 rCodes = &info.Codes; 3015 else 3016 rCodes = &info.multipleAlternatives[maIndex].Codes; 3017 ConstraintWeight BestWeight = CW_Invalid; 3018 3019 // Loop over the options, keeping track of the most general one. 3020 for (unsigned i = 0, e = rCodes->size(); i != e; ++i) { 3021 ConstraintWeight weight = 3022 getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str()); 3023 if (weight > BestWeight) 3024 BestWeight = weight; 3025 } 3026 3027 return BestWeight; 3028 } 3029 3030 /// Examine constraint type and operand type and determine a weight value. 3031 /// This object must already have been set up with the operand type 3032 /// and the current alternative constraint selected. 3033 TargetLowering::ConstraintWeight 3034 TargetLowering::getSingleConstraintMatchWeight( 3035 AsmOperandInfo &info, const char *constraint) const { 3036 ConstraintWeight weight = CW_Invalid; 3037 Value *CallOperandVal = info.CallOperandVal; 3038 // If we don't have a value, we can't do a match, 3039 // but allow it at the lowest weight. 3040 if (CallOperandVal == NULL) 3041 return CW_Default; 3042 // Look at the constraint type. 3043 switch (*constraint) { 3044 case 'i': // immediate integer. 3045 case 'n': // immediate integer with a known value. 3046 if (isa<ConstantInt>(CallOperandVal)) 3047 weight = CW_Constant; 3048 break; 3049 case 's': // non-explicit intregal immediate. 3050 if (isa<GlobalValue>(CallOperandVal)) 3051 weight = CW_Constant; 3052 break; 3053 case 'E': // immediate float if host format. 3054 case 'F': // immediate float. 3055 if (isa<ConstantFP>(CallOperandVal)) 3056 weight = CW_Constant; 3057 break; 3058 case '<': // memory operand with autodecrement. 3059 case '>': // memory operand with autoincrement. 3060 case 'm': // memory operand. 3061 case 'o': // offsettable memory operand 3062 case 'V': // non-offsettable memory operand 3063 weight = CW_Memory; 3064 break; 3065 case 'r': // general register. 3066 case 'g': // general register, memory operand or immediate integer. 3067 // note: Clang converts "g" to "imr". 3068 if (CallOperandVal->getType()->isIntegerTy()) 3069 weight = CW_Register; 3070 break; 3071 case 'X': // any operand. 3072 default: 3073 weight = CW_Default; 3074 break; 3075 } 3076 return weight; 3077 } 3078 3079 /// ChooseConstraint - If there are multiple different constraints that we 3080 /// could pick for this operand (e.g. "imr") try to pick the 'best' one. 3081 /// This is somewhat tricky: constraints fall into four classes: 3082 /// Other -> immediates and magic values 3083 /// Register -> one specific register 3084 /// RegisterClass -> a group of regs 3085 /// Memory -> memory 3086 /// Ideally, we would pick the most specific constraint possible: if we have 3087 /// something that fits into a register, we would pick it. The problem here 3088 /// is that if we have something that could either be in a register or in 3089 /// memory that use of the register could cause selection of *other* 3090 /// operands to fail: they might only succeed if we pick memory. Because of 3091 /// this the heuristic we use is: 3092 /// 3093 /// 1) If there is an 'other' constraint, and if the operand is valid for 3094 /// that constraint, use it. This makes us take advantage of 'i' 3095 /// constraints when available. 3096 /// 2) Otherwise, pick the most general constraint present. This prefers 3097 /// 'm' over 'r', for example. 3098 /// 3099 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 3100 const TargetLowering &TLI, 3101 SDValue Op, SelectionDAG *DAG) { 3102 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 3103 unsigned BestIdx = 0; 3104 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 3105 int BestGenerality = -1; 3106 3107 // Loop over the options, keeping track of the most general one. 3108 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 3109 TargetLowering::ConstraintType CType = 3110 TLI.getConstraintType(OpInfo.Codes[i]); 3111 3112 // If this is an 'other' constraint, see if the operand is valid for it. 3113 // For example, on X86 we might have an 'rI' constraint. If the operand 3114 // is an integer in the range [0..31] we want to use I (saving a load 3115 // of a register), otherwise we must use 'r'. 3116 if (CType == TargetLowering::C_Other && Op.getNode()) { 3117 assert(OpInfo.Codes[i].size() == 1 && 3118 "Unhandled multi-letter 'other' constraint"); 3119 std::vector<SDValue> ResultOps; 3120 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i], 3121 ResultOps, *DAG); 3122 if (!ResultOps.empty()) { 3123 BestType = CType; 3124 BestIdx = i; 3125 break; 3126 } 3127 } 3128 3129 // Things with matching constraints can only be registers, per gcc 3130 // documentation. This mainly affects "g" constraints. 3131 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) 3132 continue; 3133 3134 // This constraint letter is more general than the previous one, use it. 3135 int Generality = getConstraintGenerality(CType); 3136 if (Generality > BestGenerality) { 3137 BestType = CType; 3138 BestIdx = i; 3139 BestGenerality = Generality; 3140 } 3141 } 3142 3143 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 3144 OpInfo.ConstraintType = BestType; 3145 } 3146 3147 /// ComputeConstraintToUse - Determines the constraint code and constraint 3148 /// type to use for the specific AsmOperandInfo, setting 3149 /// OpInfo.ConstraintCode and OpInfo.ConstraintType. 3150 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 3151 SDValue Op, 3152 SelectionDAG *DAG) const { 3153 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 3154 3155 // Single-letter constraints ('r') are very common. 3156 if (OpInfo.Codes.size() == 1) { 3157 OpInfo.ConstraintCode = OpInfo.Codes[0]; 3158 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 3159 } else { 3160 ChooseConstraint(OpInfo, *this, Op, DAG); 3161 } 3162 3163 // 'X' matches anything. 3164 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 3165 // Labels and constants are handled elsewhere ('X' is the only thing 3166 // that matches labels). For Functions, the type here is the type of 3167 // the result, which is not what we want to look at; leave them alone. 3168 Value *v = OpInfo.CallOperandVal; 3169 if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) { 3170 OpInfo.CallOperandVal = v; 3171 return; 3172 } 3173 3174 // Otherwise, try to resolve it to something we know about by looking at 3175 // the actual operand type. 3176 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 3177 OpInfo.ConstraintCode = Repl; 3178 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 3179 } 3180 } 3181 } 3182 3183 //===----------------------------------------------------------------------===// 3184 // Loop Strength Reduction hooks 3185 //===----------------------------------------------------------------------===// 3186 3187 /// isLegalAddressingMode - Return true if the addressing mode represented 3188 /// by AM is legal for this target, for a load/store of the specified type. 3189 bool TargetLowering::isLegalAddressingMode(const AddrMode &AM, 3190 Type *Ty) const { 3191 // The default implementation of this implements a conservative RISCy, r+r and 3192 // r+i addr mode. 3193 3194 // Allows a sign-extended 16-bit immediate field. 3195 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 3196 return false; 3197 3198 // No global is ever allowed as a base. 3199 if (AM.BaseGV) 3200 return false; 3201 3202 // Only support r+r, 3203 switch (AM.Scale) { 3204 case 0: // "r+i" or just "i", depending on HasBaseReg. 3205 break; 3206 case 1: 3207 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 3208 return false; 3209 // Otherwise we have r+r or r+i. 3210 break; 3211 case 2: 3212 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 3213 return false; 3214 // Allow 2*r as r+r. 3215 break; 3216 } 3217 3218 return true; 3219 } 3220 3221 /// BuildExactDiv - Given an exact SDIV by a constant, create a multiplication 3222 /// with the multiplicative inverse of the constant. 3223 SDValue TargetLowering::BuildExactSDIV(SDValue Op1, SDValue Op2, DebugLoc dl, 3224 SelectionDAG &DAG) const { 3225 ConstantSDNode *C = cast<ConstantSDNode>(Op2); 3226 APInt d = C->getAPIntValue(); 3227 assert(d != 0 && "Division by zero!"); 3228 3229 // Shift the value upfront if it is even, so the LSB is one. 3230 unsigned ShAmt = d.countTrailingZeros(); 3231 if (ShAmt) { 3232 // TODO: For UDIV use SRL instead of SRA. 3233 SDValue Amt = DAG.getConstant(ShAmt, getShiftAmountTy(Op1.getValueType())); 3234 Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt); 3235 d = d.ashr(ShAmt); 3236 } 3237 3238 // Calculate the multiplicative inverse, using Newton's method. 3239 APInt t, xn = d; 3240 while ((t = d*xn) != 1) 3241 xn *= APInt(d.getBitWidth(), 2) - t; 3242 3243 Op2 = DAG.getConstant(xn, Op1.getValueType()); 3244 return DAG.getNode(ISD::MUL, dl, Op1.getValueType(), Op1, Op2); 3245 } 3246 3247 /// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant, 3248 /// return a DAG expression to select that will generate the same value by 3249 /// multiplying by a magic number. See: 3250 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 3251 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 3252 std::vector<SDNode*>* Created) const { 3253 EVT VT = N->getValueType(0); 3254 DebugLoc dl= N->getDebugLoc(); 3255 3256 // Check to see if we can do this. 3257 // FIXME: We should be more aggressive here. 3258 if (!isTypeLegal(VT)) 3259 return SDValue(); 3260 3261 APInt d = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue(); 3262 APInt::ms magics = d.magic(); 3263 3264 // Multiply the numerator (operand 0) by the magic value 3265 // FIXME: We should support doing a MUL in a wider type 3266 SDValue Q; 3267 if (isOperationLegalOrCustom(ISD::MULHS, VT)) 3268 Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0), 3269 DAG.getConstant(magics.m, VT)); 3270 else if (isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) 3271 Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), 3272 N->getOperand(0), 3273 DAG.getConstant(magics.m, VT)).getNode(), 1); 3274 else 3275 return SDValue(); // No mulhs or equvialent 3276 // If d > 0 and m < 0, add the numerator 3277 if (d.isStrictlyPositive() && magics.m.isNegative()) { 3278 Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0)); 3279 if (Created) 3280 Created->push_back(Q.getNode()); 3281 } 3282 // If d < 0 and m > 0, subtract the numerator. 3283 if (d.isNegative() && magics.m.isStrictlyPositive()) { 3284 Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0)); 3285 if (Created) 3286 Created->push_back(Q.getNode()); 3287 } 3288 // Shift right algebraic if shift value is nonzero 3289 if (magics.s > 0) { 3290 Q = DAG.getNode(ISD::SRA, dl, VT, Q, 3291 DAG.getConstant(magics.s, getShiftAmountTy(Q.getValueType()))); 3292 if (Created) 3293 Created->push_back(Q.getNode()); 3294 } 3295 // Extract the sign bit and add it to the quotient 3296 SDValue T = 3297 DAG.getNode(ISD::SRL, dl, VT, Q, DAG.getConstant(VT.getSizeInBits()-1, 3298 getShiftAmountTy(Q.getValueType()))); 3299 if (Created) 3300 Created->push_back(T.getNode()); 3301 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 3302 } 3303 3304 /// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant, 3305 /// return a DAG expression to select that will generate the same value by 3306 /// multiplying by a magic number. See: 3307 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 3308 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 3309 std::vector<SDNode*>* Created) const { 3310 EVT VT = N->getValueType(0); 3311 DebugLoc dl = N->getDebugLoc(); 3312 3313 // Check to see if we can do this. 3314 // FIXME: We should be more aggressive here. 3315 if (!isTypeLegal(VT)) 3316 return SDValue(); 3317 3318 // FIXME: We should use a narrower constant when the upper 3319 // bits are known to be zero. 3320 const APInt &N1C = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue(); 3321 APInt::mu magics = N1C.magicu(); 3322 3323 SDValue Q = N->getOperand(0); 3324 3325 // If the divisor is even, we can avoid using the expensive fixup by shifting 3326 // the divided value upfront. 3327 if (magics.a != 0 && !N1C[0]) { 3328 unsigned Shift = N1C.countTrailingZeros(); 3329 Q = DAG.getNode(ISD::SRL, dl, VT, Q, 3330 DAG.getConstant(Shift, getShiftAmountTy(Q.getValueType()))); 3331 if (Created) 3332 Created->push_back(Q.getNode()); 3333 3334 // Get magic number for the shifted divisor. 3335 magics = N1C.lshr(Shift).magicu(Shift); 3336 assert(magics.a == 0 && "Should use cheap fixup now"); 3337 } 3338 3339 // Multiply the numerator (operand 0) by the magic value 3340 // FIXME: We should support doing a MUL in a wider type 3341 if (isOperationLegalOrCustom(ISD::MULHU, VT)) 3342 Q = DAG.getNode(ISD::MULHU, dl, VT, Q, DAG.getConstant(magics.m, VT)); 3343 else if (isOperationLegalOrCustom(ISD::UMUL_LOHI, VT)) 3344 Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), Q, 3345 DAG.getConstant(magics.m, VT)).getNode(), 1); 3346 else 3347 return SDValue(); // No mulhu or equvialent 3348 if (Created) 3349 Created->push_back(Q.getNode()); 3350 3351 if (magics.a == 0) { 3352 assert(magics.s < N1C.getBitWidth() && 3353 "We shouldn't generate an undefined shift!"); 3354 return DAG.getNode(ISD::SRL, dl, VT, Q, 3355 DAG.getConstant(magics.s, getShiftAmountTy(Q.getValueType()))); 3356 } else { 3357 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q); 3358 if (Created) 3359 Created->push_back(NPQ.getNode()); 3360 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, 3361 DAG.getConstant(1, getShiftAmountTy(NPQ.getValueType()))); 3362 if (Created) 3363 Created->push_back(NPQ.getNode()); 3364 NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 3365 if (Created) 3366 Created->push_back(NPQ.getNode()); 3367 return DAG.getNode(ISD::SRL, dl, VT, NPQ, 3368 DAG.getConstant(magics.s-1, getShiftAmountTy(NPQ.getValueType()))); 3369 } 3370 } 3371