1 //===- subzero/src/IceTargetLoweringX8664Traits.h - x86-64 traits -*- C++ -*-=// 2 // 3 // The Subzero Code Generator 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// 10 /// \file 11 /// \brief Declares the X8664 Target Lowering Traits. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #ifndef SUBZERO_SRC_ICETARGETLOWERINGX8664TRAITS_H 16 #define SUBZERO_SRC_ICETARGETLOWERINGX8664TRAITS_H 17 18 #include "IceAssembler.h" 19 #include "IceConditionCodesX8664.h" 20 #include "IceDefs.h" 21 #include "IceInst.h" 22 #include "IceInstX8664.def" 23 #include "IceOperand.h" 24 #include "IceRegistersX8664.h" 25 #include "IceTargetLowering.h" 26 #include "IceTargetLoweringX8664.def" 27 #include "IceTargetLoweringX86RegClass.h" 28 29 #include <array> 30 #include <initializer_list> 31 32 namespace Ice { 33 34 namespace X8664 { 35 using namespace ::Ice::X86; 36 37 template <class TraitsType> class AssemblerX86Base; 38 template <class TraitsType> struct Insts; 39 template <class TraitsType> class TargetX86Base; 40 41 class TargetX8664; 42 43 struct TargetX8664Traits { 44 //---------------------------------------------------------------------------- 45 // ______ ______ __ __ 46 // /\ __ \/\ ___\/\ "-./ \ 47 // \ \ __ \ \___ \ \ \-./\ \ 48 // \ \_\ \_\/\_____\ \_\ \ \_\ 49 // \/_/\/_/\/_____/\/_/ \/_/ 50 // 51 //---------------------------------------------------------------------------- 52 static constexpr ::Ice::Assembler::AssemblerKind AsmKind = 53 ::Ice::Assembler::Asm_X8664; 54 55 static constexpr bool Is64Bit = true; 56 static constexpr bool HasPopa = false; 57 static constexpr bool HasPusha = false; 58 static constexpr bool UsesX87 = false; 59 static constexpr ::Ice::RegX8664::GPRRegister Last8BitGPR = 60 ::Ice::RegX8664::GPRRegister::Encoded_Reg_r15d; 61 62 enum ScaleFactor { TIMES_1 = 0, TIMES_2 = 1, TIMES_4 = 2, TIMES_8 = 3 }; 63 64 using GPRRegister = ::Ice::RegX8664::GPRRegister; 65 using ByteRegister = ::Ice::RegX8664::ByteRegister; 66 using XmmRegister = ::Ice::RegX8664::XmmRegister; 67 68 using Cond = ::Ice::CondX8664; 69 70 using RegisterSet = ::Ice::RegX8664; 71 static constexpr RegisterSet::AllRegisters StackPtr = RegX8664::Reg_rsp; 72 static constexpr RegisterSet::AllRegisters FramePtr = RegX8664::Reg_rbp; 73 static constexpr GPRRegister Encoded_Reg_Accumulator = 74 RegX8664::Encoded_Reg_eax; 75 static constexpr GPRRegister Encoded_Reg_Counter = RegX8664::Encoded_Reg_ecx; 76 static constexpr FixupKind FK_PcRel = llvm::ELF::R_X86_64_PC32; 77 static constexpr FixupKind FK_Abs = llvm::ELF::R_X86_64_32S; 78 static constexpr FixupKind FK_Gotoff = llvm::ELF::R_X86_64_GOTOFF64; 79 static constexpr FixupKind FK_GotPC = llvm::ELF::R_X86_64_GOTPC32; 80 81 class Operand { 82 public: 83 enum RexBits { 84 RexNone = 0x00, 85 RexBase = 0x40, 86 RexW = RexBase | (1 << 3), 87 RexR = RexBase | (1 << 2), 88 RexX = RexBase | (1 << 1), 89 RexB = RexBase | (1 << 0), 90 }; 91 92 protected: 93 // Needed by subclass Address. 94 Operand() = default; 95 96 public: 97 Operand(const Operand &) = default; 98 Operand(Operand &&) = default; 99 Operand &operator=(const Operand &) = default; 100 Operand &operator=(Operand &&) = default; 101 102 uint8_t mod() const { return (encoding_at(0) >> 6) & 3; } 103 104 uint8_t rexX() const { return (rex_ & RexX) != RexX ? RexNone : RexX; } 105 uint8_t rexB() const { return (rex_ & RexB) != RexB ? RexNone : RexB; } 106 107 GPRRegister rm() const { 108 return static_cast<GPRRegister>((rexB() != 0 ? 0x08 : 0) | 109 (encoding_at(0) & 7)); 110 } 111 112 ScaleFactor scale() const { 113 return static_cast<ScaleFactor>((encoding_at(1) >> 6) & 3); 114 } 115 116 GPRRegister index() const { 117 return static_cast<GPRRegister>((rexX() != 0 ? 0x08 : 0) | 118 ((encoding_at(1) >> 3) & 7)); 119 } 120 121 GPRRegister base() const { 122 return static_cast<GPRRegister>((rexB() != 0 ? 0x08 : 0) | 123 (encoding_at(1) & 7)); 124 } 125 126 int8_t disp8() const { 127 assert(length_ >= 2); 128 return static_cast<int8_t>(encoding_[length_ - 1]); 129 } 130 131 AssemblerFixup *fixup() const { return fixup_; } 132 133 protected: 134 void SetModRM(int mod, GPRRegister rm) { 135 assert((mod & ~3) == 0); 136 encoding_[0] = (mod << 6) | (rm & 0x07); 137 rex_ = (rm & 0x08) ? RexB : RexNone; 138 length_ = 1; 139 } 140 141 void SetSIB(ScaleFactor scale, GPRRegister index, GPRRegister base) { 142 assert(length_ == 1); 143 assert((scale & ~3) == 0); 144 encoding_[1] = (scale << 6) | ((index & 0x07) << 3) | (base & 0x07); 145 rex_ = 146 ((base & 0x08) ? RexB : RexNone) | ((index & 0x08) ? RexX : RexNone); 147 length_ = 2; 148 } 149 150 void SetDisp8(int8_t disp) { 151 assert(length_ == 1 || length_ == 2); 152 encoding_[length_++] = static_cast<uint8_t>(disp); 153 } 154 155 void SetDisp32(int32_t disp) { 156 assert(length_ == 1 || length_ == 2); 157 intptr_t disp_size = sizeof(disp); 158 memmove(&encoding_[length_], &disp, disp_size); 159 length_ += disp_size; 160 } 161 162 void SetFixup(AssemblerFixup *fixup) { fixup_ = fixup; } 163 164 private: 165 AssemblerFixup *fixup_ = nullptr; 166 uint8_t rex_ = 0; 167 uint8_t encoding_[6]; 168 uint8_t length_ = 0; 169 170 explicit Operand(GPRRegister reg) : fixup_(nullptr) { SetModRM(3, reg); } 171 172 /// Get the operand encoding byte at the given index. 173 uint8_t encoding_at(intptr_t index) const { 174 assert(index >= 0 && index < length_); 175 return encoding_[index]; 176 } 177 178 /// Returns whether or not this operand is really the given register in 179 /// disguise. Used from the assembler to generate better encodings. 180 bool IsRegister(GPRRegister reg) const { 181 return ((encoding_[0] & 0xF8) == 182 0xC0) // Addressing mode is register only. 183 && 184 (rm() == reg); // Register codes match. 185 } 186 187 friend class AssemblerX86Base<TargetX8664Traits>; 188 }; 189 190 class Address : public Operand { 191 Address() = default; 192 193 public: 194 Address(const Address &) = default; 195 Address(Address &&) = default; 196 Address &operator=(const Address &) = default; 197 Address &operator=(Address &&) = default; 198 199 Address(GPRRegister Base, int32_t Disp, AssemblerFixup *Fixup) { 200 if (Fixup == nullptr && Disp == 0 && 201 (Base & 7) != RegX8664::Encoded_Reg_rbp) { 202 SetModRM(0, Base); 203 if ((Base & 7) == RegX8664::Encoded_Reg_rsp) 204 SetSIB(TIMES_1, RegX8664::Encoded_Reg_rsp, Base); 205 } else if (Fixup == nullptr && Utils::IsInt(8, Disp)) { 206 SetModRM(1, Base); 207 if ((Base & 7) == RegX8664::Encoded_Reg_rsp) 208 SetSIB(TIMES_1, RegX8664::Encoded_Reg_rsp, Base); 209 SetDisp8(Disp); 210 } else { 211 SetModRM(2, Base); 212 if ((Base & 7) == RegX8664::Encoded_Reg_rsp) 213 SetSIB(TIMES_1, RegX8664::Encoded_Reg_rsp, Base); 214 SetDisp32(Disp); 215 if (Fixup) 216 SetFixup(Fixup); 217 } 218 } 219 220 Address(GPRRegister Index, ScaleFactor Scale, int32_t Disp, 221 AssemblerFixup *Fixup) { 222 assert(Index != RegX8664::Encoded_Reg_rsp); // Illegal addressing mode. 223 SetModRM(0, RegX8664::Encoded_Reg_rsp); 224 SetSIB(Scale, Index, RegX8664::Encoded_Reg_rbp); 225 SetDisp32(Disp); 226 if (Fixup) 227 SetFixup(Fixup); 228 } 229 230 Address(GPRRegister Base, GPRRegister Index, ScaleFactor Scale, 231 int32_t Disp, AssemblerFixup *Fixup) { 232 assert(Index != RegX8664::Encoded_Reg_rsp); // Illegal addressing mode. 233 if (Fixup == nullptr && Disp == 0 && 234 (Base & 7) != RegX8664::Encoded_Reg_rbp) { 235 SetModRM(0, RegX8664::Encoded_Reg_rsp); 236 SetSIB(Scale, Index, Base); 237 } else if (Fixup == nullptr && Utils::IsInt(8, Disp)) { 238 SetModRM(1, RegX8664::Encoded_Reg_rsp); 239 SetSIB(Scale, Index, Base); 240 SetDisp8(Disp); 241 } else { 242 SetModRM(2, RegX8664::Encoded_Reg_rsp); 243 SetSIB(Scale, Index, Base); 244 SetDisp32(Disp); 245 if (Fixup) 246 SetFixup(Fixup); 247 } 248 } 249 250 /// Generate a RIP-relative address expression on x86-64. 251 static Address RipRelative(RelocOffsetT Offset, AssemblerFixup *Fixup) { 252 assert(Fixup != nullptr); 253 assert(Fixup->kind() == FK_PcRel); 254 Address NewAddress; 255 NewAddress.SetModRM(0x0, RegX8664::Encoded_Reg_rbp); 256 257 // Use the Offset in the displacement for now. If we decide to process 258 // fixups later, we'll need to patch up the emitted displacement. 259 NewAddress.SetDisp32(Offset); 260 if (Fixup) 261 NewAddress.SetFixup(Fixup); 262 263 return NewAddress; 264 } 265 266 /// Generate an absolute address. 267 static Address Absolute(RelocOffsetT Addr) { 268 Address NewAddress; 269 NewAddress.SetModRM(0x0, RegX8664::Encoded_Reg_rsp); 270 static constexpr ScaleFactor NoScale = TIMES_1; 271 NewAddress.SetSIB(NoScale, RegX8664::Encoded_Reg_rsp, 272 RegX8664::Encoded_Reg_rbp); 273 NewAddress.SetDisp32(Addr); 274 return NewAddress; 275 } 276 277 static Address ofConstPool(Assembler *Asm, const Constant *Imm) { 278 // TODO(jpp): ??? 279 AssemblerFixup *Fixup = Asm->createFixup(FK_Abs, Imm); 280 const RelocOffsetT Offset = 4; 281 return Address::RipRelative(Offset, Fixup); 282 } 283 }; 284 285 //---------------------------------------------------------------------------- 286 // __ ______ __ __ ______ ______ __ __ __ ______ 287 // /\ \ /\ __ \/\ \ _ \ \/\ ___\/\ == \/\ \/\ "-.\ \/\ ___\ 288 // \ \ \___\ \ \/\ \ \ \/ ".\ \ \ __\\ \ __<\ \ \ \ \-. \ \ \__ \ 289 // \ \_____\ \_____\ \__/".~\_\ \_____\ \_\ \_\ \_\ \_\\"\_\ \_____\ 290 // \/_____/\/_____/\/_/ \/_/\/_____/\/_/ /_/\/_/\/_/ \/_/\/_____/ 291 // 292 //---------------------------------------------------------------------------- 293 enum InstructionSet { 294 Begin, 295 // SSE2 is the PNaCl baseline instruction set. 296 SSE2 = Begin, 297 SSE4_1, 298 End 299 }; 300 301 static const char *TargetName; 302 static constexpr Type WordType = IceType_i64; 303 304 static const char *getRegName(RegNumT RegNum) { 305 static const char *const RegNames[RegisterSet::Reg_NUM] = { 306 #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ 307 sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \ 308 is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \ 309 name, 310 REGX8664_TABLE 311 #undef X 312 }; 313 RegNum.assertIsValid(); 314 return RegNames[RegNum]; 315 } 316 317 static GPRRegister getEncodedGPR(RegNumT RegNum) { 318 static const GPRRegister GPRRegs[RegisterSet::Reg_NUM] = { 319 #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ 320 sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \ 321 is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \ 322 GPRRegister(isGPR ? encode : GPRRegister::Encoded_Not_GPR), 323 REGX8664_TABLE 324 #undef X 325 }; 326 RegNum.assertIsValid(); 327 assert(GPRRegs[RegNum] != GPRRegister::Encoded_Not_GPR); 328 return GPRRegs[RegNum]; 329 } 330 331 static ByteRegister getEncodedByteReg(RegNumT RegNum) { 332 static const ByteRegister ByteRegs[RegisterSet::Reg_NUM] = { 333 #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ 334 sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \ 335 is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \ 336 ByteRegister(is8 ? encode : ByteRegister::Encoded_Not_ByteReg), 337 REGX8664_TABLE 338 #undef X 339 }; 340 RegNum.assertIsValid(); 341 assert(ByteRegs[RegNum] != ByteRegister::Encoded_Not_ByteReg); 342 return ByteRegs[RegNum]; 343 } 344 345 static XmmRegister getEncodedXmm(RegNumT RegNum) { 346 static const XmmRegister XmmRegs[RegisterSet::Reg_NUM] = { 347 #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ 348 sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \ 349 is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \ 350 XmmRegister(isXmm ? encode : XmmRegister::Encoded_Not_Xmm), 351 REGX8664_TABLE 352 #undef X 353 }; 354 RegNum.assertIsValid(); 355 assert(XmmRegs[RegNum] != XmmRegister::Encoded_Not_Xmm); 356 return XmmRegs[RegNum]; 357 } 358 359 static uint32_t getEncoding(RegNumT RegNum) { 360 static const uint32_t Encoding[RegisterSet::Reg_NUM] = { 361 #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ 362 sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \ 363 is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \ 364 encode, 365 REGX8664_TABLE 366 #undef X 367 }; 368 RegNum.assertIsValid(); 369 return Encoding[RegNum]; 370 } 371 372 static inline RegNumT getBaseReg(RegNumT RegNum) { 373 static const RegNumT BaseRegs[RegisterSet::Reg_NUM] = { 374 #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ 375 sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \ 376 is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \ 377 RegisterSet::base, 378 REGX8664_TABLE 379 #undef X 380 }; 381 RegNum.assertIsValid(); 382 return BaseRegs[RegNum]; 383 } 384 385 private: 386 static RegNumT getFirstGprForType(Type Ty) { 387 switch (Ty) { 388 default: 389 llvm_unreachable("Invalid type for GPR."); 390 case IceType_i1: 391 case IceType_i8: 392 return RegisterSet::Reg_al; 393 case IceType_i16: 394 return RegisterSet::Reg_ax; 395 case IceType_i32: 396 return RegisterSet::Reg_eax; 397 case IceType_i64: 398 return RegisterSet::Reg_rax; 399 } 400 } 401 402 public: 403 static RegNumT getGprForType(Type Ty, RegNumT RegNum) { 404 assert(RegNum.hasValue()); 405 406 if (!isScalarIntegerType(Ty)) { 407 return RegNum; 408 } 409 410 assert(Ty == IceType_i1 || Ty == IceType_i8 || Ty == IceType_i16 || 411 Ty == IceType_i32 || Ty == IceType_i64); 412 413 if (RegNum == RegisterSet::Reg_ah) { 414 assert(Ty == IceType_i8); 415 return RegNum; 416 } 417 418 assert(RegNum != RegisterSet::Reg_bh); 419 assert(RegNum != RegisterSet::Reg_ch); 420 assert(RegNum != RegisterSet::Reg_dh); 421 422 const RegNumT FirstGprForType = getFirstGprForType(Ty); 423 424 switch (RegNum) { 425 default: 426 llvm::report_fatal_error("Unknown register."); 427 #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ 428 sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \ 429 is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \ 430 case RegisterSet::val: { \ 431 if (!isGPR) \ 432 return RegisterSet::val; \ 433 assert((is64) || (is32) || (is16) || (is8) || \ 434 getBaseReg(RegisterSet::val) == RegisterSet::Reg_rsp); \ 435 constexpr RegisterSet::AllRegisters FirstGprWithRegNumSize = \ 436 ((is64) || RegisterSet::val == RegisterSet::Reg_rsp) \ 437 ? RegisterSet::Reg_rax \ 438 : (((is32) || RegisterSet::val == RegisterSet::Reg_esp) \ 439 ? RegisterSet::Reg_eax \ 440 : (((is16) || RegisterSet::val == RegisterSet::Reg_sp) \ 441 ? RegisterSet::Reg_ax \ 442 : RegisterSet::Reg_al)); \ 443 const auto NewRegNum = \ 444 RegNumT::fixme(RegNum - FirstGprWithRegNumSize + FirstGprForType); \ 445 assert(getBaseReg(RegNum) == getBaseReg(NewRegNum) && \ 446 "Error involving " #val); \ 447 return NewRegNum; \ 448 } 449 REGX8664_TABLE 450 #undef X 451 } 452 } 453 454 private: 455 /// SizeOf is used to obtain the size of an initializer list as a constexpr 456 /// expression. This is only needed until our C++ library is updated to 457 /// C++ 14 -- which defines constexpr members to std::initializer_list. 458 class SizeOf { 459 SizeOf(const SizeOf &) = delete; 460 SizeOf &operator=(const SizeOf &) = delete; 461 462 public: 463 constexpr SizeOf() : Size(0) {} 464 template <typename... T> 465 explicit constexpr SizeOf(T...) 466 : Size(length<T...>::value) {} 467 constexpr SizeT size() const { return Size; } 468 469 private: 470 template <typename T, typename... U> struct length { 471 static constexpr std::size_t value = 1 + length<U...>::value; 472 }; 473 474 template <typename T> struct length<T> { 475 static constexpr std::size_t value = 1; 476 }; 477 478 const std::size_t Size; 479 }; 480 481 public: 482 static void initRegisterSet( 483 const ::Ice::ClFlags &Flags, 484 std::array<SmallBitVector, RCX86_NUM> *TypeToRegisterSet, 485 std::array<SmallBitVector, RegisterSet::Reg_NUM> *RegisterAliases) { 486 SmallBitVector IntegerRegistersI64(RegisterSet::Reg_NUM); 487 SmallBitVector IntegerRegistersI32(RegisterSet::Reg_NUM); 488 SmallBitVector IntegerRegistersI16(RegisterSet::Reg_NUM); 489 SmallBitVector IntegerRegistersI8(RegisterSet::Reg_NUM); 490 SmallBitVector FloatRegisters(RegisterSet::Reg_NUM); 491 SmallBitVector VectorRegisters(RegisterSet::Reg_NUM); 492 SmallBitVector Trunc64To8Registers(RegisterSet::Reg_NUM); 493 SmallBitVector Trunc32To8Registers(RegisterSet::Reg_NUM); 494 SmallBitVector Trunc16To8Registers(RegisterSet::Reg_NUM); 495 SmallBitVector Trunc8RcvrRegisters(RegisterSet::Reg_NUM); 496 SmallBitVector AhRcvrRegisters(RegisterSet::Reg_NUM); 497 SmallBitVector InvalidRegisters(RegisterSet::Reg_NUM); 498 499 static constexpr struct { 500 uint16_t Val; 501 unsigned IsReservedWhenSandboxing : 1; 502 unsigned Is64 : 1; 503 unsigned Is32 : 1; 504 unsigned Is16 : 1; 505 unsigned Is8 : 1; 506 unsigned IsXmm : 1; 507 unsigned Is64To8 : 1; 508 unsigned Is32To8 : 1; 509 unsigned Is16To8 : 1; 510 unsigned IsTrunc8Rcvr : 1; 511 unsigned IsAhRcvr : 1; 512 #define NUM_ALIASES_BITS 2 513 SizeT NumAliases : (NUM_ALIASES_BITS + 1); 514 uint16_t Aliases[1 << NUM_ALIASES_BITS]; 515 #undef NUM_ALIASES_BITS 516 } X8664RegTable[RegisterSet::Reg_NUM] = { 517 #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ 518 sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \ 519 is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \ 520 { \ 521 RegisterSet::val, sboxres, is64, is32, is16, is8, isXmm, is64To8, is32To8, \ 522 is16To8, isTrunc8Rcvr, isAhRcvr, (SizeOf aliases).size(), aliases, \ 523 } \ 524 , 525 REGX8664_TABLE 526 #undef X 527 }; 528 529 const bool NeedSandboxing = Flags.getUseSandboxing(); 530 for (SizeT ii = 0; ii < llvm::array_lengthof(X8664RegTable); ++ii) { 531 const auto &Entry = X8664RegTable[ii]; 532 // Even though the register is disabled for register allocation, it might 533 // still be used by the Target Lowering (e.g., base pointer), so the 534 // register alias table still needs to be defined. 535 (*RegisterAliases)[Entry.Val].resize(RegisterSet::Reg_NUM); 536 for (int J = 0; J < Entry.NumAliases; ++J) { 537 SizeT Alias = Entry.Aliases[J]; 538 assert(!(*RegisterAliases)[Entry.Val][Alias] && "Duplicate alias"); 539 (*RegisterAliases)[Entry.Val].set(Alias); 540 } 541 542 (*RegisterAliases)[Entry.Val].set(Entry.Val); 543 const bool DisabledRegister = 544 NeedSandboxing && Entry.IsReservedWhenSandboxing; 545 if (DisabledRegister) { 546 continue; 547 } 548 (IntegerRegistersI64)[Entry.Val] = Entry.Is64; 549 (IntegerRegistersI32)[Entry.Val] = Entry.Is32; 550 (IntegerRegistersI16)[Entry.Val] = Entry.Is16; 551 (IntegerRegistersI8)[Entry.Val] = Entry.Is8; 552 (FloatRegisters)[Entry.Val] = Entry.IsXmm; 553 (VectorRegisters)[Entry.Val] = Entry.IsXmm; 554 (Trunc64To8Registers)[Entry.Val] = Entry.Is64To8; 555 (Trunc32To8Registers)[Entry.Val] = Entry.Is32To8; 556 (Trunc16To8Registers)[Entry.Val] = Entry.Is16To8; 557 (Trunc8RcvrRegisters)[Entry.Val] = Entry.IsTrunc8Rcvr; 558 (AhRcvrRegisters)[Entry.Val] = Entry.IsAhRcvr; 559 } 560 561 (*TypeToRegisterSet)[RC_void] = InvalidRegisters; 562 (*TypeToRegisterSet)[RC_i1] = IntegerRegistersI8; 563 (*TypeToRegisterSet)[RC_i8] = IntegerRegistersI8; 564 (*TypeToRegisterSet)[RC_i16] = IntegerRegistersI16; 565 (*TypeToRegisterSet)[RC_i32] = IntegerRegistersI32; 566 (*TypeToRegisterSet)[RC_i64] = IntegerRegistersI64; 567 (*TypeToRegisterSet)[RC_f32] = FloatRegisters; 568 (*TypeToRegisterSet)[RC_f64] = FloatRegisters; 569 (*TypeToRegisterSet)[RC_v4i1] = VectorRegisters; 570 (*TypeToRegisterSet)[RC_v8i1] = VectorRegisters; 571 (*TypeToRegisterSet)[RC_v16i1] = VectorRegisters; 572 (*TypeToRegisterSet)[RC_v16i8] = VectorRegisters; 573 (*TypeToRegisterSet)[RC_v8i16] = VectorRegisters; 574 (*TypeToRegisterSet)[RC_v4i32] = VectorRegisters; 575 (*TypeToRegisterSet)[RC_v4f32] = VectorRegisters; 576 (*TypeToRegisterSet)[RCX86_Is64To8] = Trunc64To8Registers; 577 (*TypeToRegisterSet)[RCX86_Is32To8] = Trunc32To8Registers; 578 (*TypeToRegisterSet)[RCX86_Is16To8] = Trunc16To8Registers; 579 (*TypeToRegisterSet)[RCX86_IsTrunc8Rcvr] = Trunc8RcvrRegisters; 580 (*TypeToRegisterSet)[RCX86_IsAhRcvr] = AhRcvrRegisters; 581 } 582 583 static SmallBitVector getRegisterSet(const ::Ice::ClFlags &Flags, 584 TargetLowering::RegSetMask Include, 585 TargetLowering::RegSetMask Exclude) { 586 SmallBitVector Registers(RegisterSet::Reg_NUM); 587 588 const bool NeedSandboxing = Flags.getUseSandboxing(); 589 #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ 590 sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \ 591 is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \ 592 if (!NeedSandboxing || !(sboxres)) { \ 593 if (scratch && (Include & ::Ice::TargetLowering::RegSet_CallerSave)) \ 594 Registers[RegisterSet::val] = true; \ 595 if (preserved && (Include & ::Ice::TargetLowering::RegSet_CalleeSave)) \ 596 Registers[RegisterSet::val] = true; \ 597 if (stackptr && (Include & ::Ice::TargetLowering::RegSet_StackPointer)) \ 598 Registers[RegisterSet::val] = true; \ 599 if (frameptr && (Include & ::Ice::TargetLowering::RegSet_FramePointer)) \ 600 Registers[RegisterSet::val] = true; \ 601 if (scratch && (Exclude & ::Ice::TargetLowering::RegSet_CallerSave)) \ 602 Registers[RegisterSet::val] = false; \ 603 if (preserved && (Exclude & ::Ice::TargetLowering::RegSet_CalleeSave)) \ 604 Registers[RegisterSet::val] = false; \ 605 if (stackptr && (Exclude & ::Ice::TargetLowering::RegSet_StackPointer)) \ 606 Registers[RegisterSet::val] = false; \ 607 if (frameptr && (Exclude & ::Ice::TargetLowering::RegSet_FramePointer)) \ 608 Registers[RegisterSet::val] = false; \ 609 } 610 611 REGX8664_TABLE 612 613 #undef X 614 615 return Registers; 616 } 617 618 static void makeRandomRegisterPermutation( 619 Cfg *Func, llvm::SmallVectorImpl<RegNumT> &Permutation, 620 const SmallBitVector &ExcludeRegisters, uint64_t Salt) { 621 // TODO(stichnot): Declaring Permutation this way loses type/size 622 // information. Fix this in conjunction with the caller-side TODO. 623 assert(Permutation.size() >= RegisterSet::Reg_NUM); 624 // Expected upper bound on the number of registers in a single equivalence 625 // class. For x86-64, this would comprise the 16 XMM registers. This is 626 // for performance, not correctness. 627 static const unsigned MaxEquivalenceClassSize = 8; 628 using RegisterList = llvm::SmallVector<RegNumT, MaxEquivalenceClassSize>; 629 using EquivalenceClassMap = std::map<uint32_t, RegisterList>; 630 EquivalenceClassMap EquivalenceClasses; 631 SizeT NumShuffled = 0, NumPreserved = 0; 632 633 // Build up the equivalence classes of registers by looking at the register 634 // properties as well as whether the registers should be explicitly excluded 635 // from shuffling. 636 #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ 637 sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \ 638 is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \ 639 if (ExcludeRegisters[RegisterSet::val]) { \ 640 /* val stays the same in the resulting permutation. */ \ 641 Permutation[RegisterSet::val] = RegisterSet::val; \ 642 ++NumPreserved; \ 643 } else { \ 644 uint32_t AttrKey = 0; \ 645 uint32_t Index = 0; \ 646 /* Combine relevant attributes into an equivalence class key. */ \ 647 Index |= (scratch << (AttrKey++)); \ 648 Index |= (preserved << (AttrKey++)); \ 649 Index |= (is8 << (AttrKey++)); \ 650 Index |= (is16 << (AttrKey++)); \ 651 Index |= (is32 << (AttrKey++)); \ 652 Index |= (is64 << (AttrKey++)); \ 653 Index |= (isXmm << (AttrKey++)); \ 654 Index |= (is16To8 << (AttrKey++)); \ 655 Index |= (is32To8 << (AttrKey++)); \ 656 Index |= (is64To8 << (AttrKey++)); \ 657 Index |= (isTrunc8Rcvr << (AttrKey++)); \ 658 /* val is assigned to an equivalence class based on its properties. */ \ 659 EquivalenceClasses[Index].push_back(RegisterSet::val); \ 660 } 661 REGX8664_TABLE 662 #undef X 663 664 // Create a random number generator for regalloc randomization. 665 RandomNumberGenerator RNG(getFlags().getRandomSeed(), 666 RPE_RegAllocRandomization, Salt); 667 RandomNumberGeneratorWrapper RNGW(RNG); 668 669 // Shuffle the resulting equivalence classes. 670 for (auto I : EquivalenceClasses) { 671 const RegisterList &List = I.second; 672 RegisterList Shuffled(List); 673 RandomShuffle(Shuffled.begin(), Shuffled.end(), RNGW); 674 for (size_t SI = 0, SE = Shuffled.size(); SI < SE; ++SI) { 675 Permutation[List[SI]] = Shuffled[SI]; 676 ++NumShuffled; 677 } 678 } 679 680 assert(NumShuffled + NumPreserved == RegisterSet::Reg_NUM); 681 682 if (Func->isVerbose(IceV_Random)) { 683 OstreamLocker L(Func->getContext()); 684 Ostream &Str = Func->getContext()->getStrDump(); 685 Str << "Register equivalence classes:\n"; 686 for (auto I : EquivalenceClasses) { 687 Str << "{"; 688 const RegisterList &List = I.second; 689 bool First = true; 690 for (RegNumT Register : List) { 691 if (!First) 692 Str << " "; 693 First = false; 694 Str << getRegName(Register); 695 } 696 Str << "}\n"; 697 } 698 } 699 } 700 701 static RegNumT getRaxOrDie() { return RegisterSet::Reg_rax; } 702 703 static RegNumT getRdxOrDie() { return RegisterSet::Reg_rdx; } 704 705 #if defined(SUBZERO_USE_MICROSOFT_ABI) 706 // Microsoft x86-64 calling convention: 707 // 708 // * The first four arguments of vector/fp type, regardless of their 709 // position relative to the other arguments in the argument list, are placed 710 // in registers %xmm0 - %xmm3. 711 // 712 // * The first four arguments of integer types, regardless of their position 713 // relative to the other arguments in the argument list, are placed in 714 // registers %rcx, %rdx, %r8, and %r9. 715 716 /// The maximum number of arguments to pass in XMM registers 717 static constexpr uint32_t X86_MAX_XMM_ARGS = 4; 718 /// The maximum number of arguments to pass in GPR registers 719 static constexpr uint32_t X86_MAX_GPR_ARGS = 4; 720 static RegNumT getRegisterForGprArgNum(Type Ty, uint32_t ArgNum) { 721 if (ArgNum >= X86_MAX_GPR_ARGS) { 722 return RegNumT(); 723 } 724 static const RegisterSet::AllRegisters GprForArgNum[] = { 725 RegisterSet::Reg_rcx, RegisterSet::Reg_rdx, RegisterSet::Reg_r8, 726 RegisterSet::Reg_r9, 727 }; 728 static_assert(llvm::array_lengthof(GprForArgNum) == X86_MAX_GPR_ARGS, 729 "Mismatch between MAX_GPR_ARGS and GprForArgNum."); 730 assert(Ty == IceType_i64 || Ty == IceType_i32); 731 return getGprForType(Ty, GprForArgNum[ArgNum]); 732 } 733 #else 734 // System V x86-64 calling convention: 735 // 736 // * The first eight arguments of vector/fp type, regardless of their 737 // position relative to the other arguments in the argument list, are placed 738 // in registers %xmm0 - %xmm7. 739 // 740 // * The first six arguments of integer types, regardless of their position 741 // relative to the other arguments in the argument list, are placed in 742 // registers %rdi, %rsi, %rdx, %rcx, %r8, and %r9. 743 // 744 // This intends to match the section "Function Calling Sequence" of the 745 // document "System V Application Binary Interface." 746 747 /// The maximum number of arguments to pass in XMM registers 748 static constexpr uint32_t X86_MAX_XMM_ARGS = 8; 749 /// The maximum number of arguments to pass in GPR registers 750 static constexpr uint32_t X86_MAX_GPR_ARGS = 6; 751 /// Get the register for a given argument slot in the GPRs. 752 static RegNumT getRegisterForGprArgNum(Type Ty, uint32_t ArgNum) { 753 if (ArgNum >= X86_MAX_GPR_ARGS) { 754 return RegNumT(); 755 } 756 static const RegisterSet::AllRegisters GprForArgNum[] = { 757 RegisterSet::Reg_rdi, RegisterSet::Reg_rsi, RegisterSet::Reg_rdx, 758 RegisterSet::Reg_rcx, RegisterSet::Reg_r8, RegisterSet::Reg_r9, 759 }; 760 static_assert(llvm::array_lengthof(GprForArgNum) == X86_MAX_GPR_ARGS, 761 "Mismatch between MAX_GPR_ARGS and GprForArgNum."); 762 assert(Ty == IceType_i64 || Ty == IceType_i32); 763 return getGprForType(Ty, GprForArgNum[ArgNum]); 764 } 765 #endif 766 767 /// Whether scalar floating point arguments are passed in XMM registers 768 static constexpr bool X86_PASS_SCALAR_FP_IN_XMM = true; 769 /// Get the register for a given argument slot in the XMM registers. 770 static RegNumT getRegisterForXmmArgNum(uint32_t ArgNum) { 771 // TODO(sehr): Change to use the CCArg technique used in ARM32. 772 static_assert(RegisterSet::Reg_xmm0 + 1 == RegisterSet::Reg_xmm1, 773 "Inconsistency between XMM register numbers and ordinals"); 774 if (ArgNum >= X86_MAX_XMM_ARGS) { 775 return RegNumT(); 776 } 777 return RegNumT::fixme(RegisterSet::Reg_xmm0 + ArgNum); 778 } 779 780 /// The number of bits in a byte 781 static constexpr uint32_t X86_CHAR_BIT = 8; 782 /// Stack alignment. This is defined in IceTargetLoweringX8664.cpp because it 783 /// is used as an argument to std::max(), and the default std::less<T> has an 784 /// operator(T const&, T const&) which requires this member to have an 785 /// address. 786 static const uint32_t X86_STACK_ALIGNMENT_BYTES; 787 /// Size of the return address on the stack 788 static constexpr uint32_t X86_RET_IP_SIZE_BYTES = 8; 789 /// The number of different NOP instructions 790 static constexpr uint32_t X86_NUM_NOP_VARIANTS = 5; 791 792 /// \name Limits for unrolling memory intrinsics. 793 /// @{ 794 static constexpr uint32_t MEMCPY_UNROLL_LIMIT = 8; 795 static constexpr uint32_t MEMMOVE_UNROLL_LIMIT = 8; 796 static constexpr uint32_t MEMSET_UNROLL_LIMIT = 8; 797 /// @} 798 799 /// Value is in bytes. Return Value adjusted to the next highest multiple of 800 /// the stack alignment. 801 static uint32_t applyStackAlignment(uint32_t Value) { 802 return Utils::applyAlignment(Value, X86_STACK_ALIGNMENT_BYTES); 803 } 804 805 /// Return the type which the elements of the vector have in the X86 806 /// representation of the vector. 807 static Type getInVectorElementType(Type Ty) { 808 assert(isVectorType(Ty)); 809 assert(Ty < TableTypeX8664AttributesSize); 810 return TableTypeX8664Attributes[Ty].InVectorElementType; 811 } 812 813 // Note: The following data structures are defined in 814 // IceTargetLoweringX8664.cpp. 815 816 /// The following table summarizes the logic for lowering the fcmp 817 /// instruction. There is one table entry for each of the 16 conditions. 818 /// 819 /// The first four columns describe the case when the operands are floating 820 /// point scalar values. A comment in lowerFcmp() describes the lowering 821 /// template. In the most general case, there is a compare followed by two 822 /// conditional branches, because some fcmp conditions don't map to a single 823 /// x86 conditional branch. However, in many cases it is possible to swap the 824 /// operands in the comparison and have a single conditional branch. Since 825 /// it's quite tedious to validate the table by hand, good execution tests are 826 /// helpful. 827 /// 828 /// The last two columns describe the case when the operands are vectors of 829 /// floating point values. For most fcmp conditions, there is a clear mapping 830 /// to a single x86 cmpps instruction variant. Some fcmp conditions require 831 /// special code to handle and these are marked in the table with a 832 /// Cmpps_Invalid predicate. 833 /// {@ 834 static const struct TableFcmpType { 835 uint32_t Default; 836 bool SwapScalarOperands; 837 Cond::BrCond C1, C2; 838 bool SwapVectorOperands; 839 Cond::CmppsCond Predicate; 840 } TableFcmp[]; 841 static const size_t TableFcmpSize; 842 /// @} 843 844 /// The following table summarizes the logic for lowering the icmp instruction 845 /// for i32 and narrower types. Each icmp condition has a clear mapping to an 846 /// x86 conditional branch instruction. 847 /// {@ 848 static const struct TableIcmp32Type { Cond::BrCond Mapping; } TableIcmp32[]; 849 static const size_t TableIcmp32Size; 850 /// @} 851 852 /// The following table summarizes the logic for lowering the icmp instruction 853 /// for the i64 type. For Eq and Ne, two separate 32-bit comparisons and 854 /// conditional branches are needed. For the other conditions, three separate 855 /// conditional branches are needed. 856 /// {@ 857 static const struct TableIcmp64Type { 858 Cond::BrCond C1, C2, C3; 859 } TableIcmp64[]; 860 static const size_t TableIcmp64Size; 861 /// @} 862 863 static Cond::BrCond getIcmp32Mapping(InstIcmp::ICond Cond) { 864 assert(Cond < TableIcmp32Size); 865 return TableIcmp32[Cond].Mapping; 866 } 867 868 static const struct TableTypeX8664AttributesType { 869 Type InVectorElementType; 870 } TableTypeX8664Attributes[]; 871 static const size_t TableTypeX8664AttributesSize; 872 873 //---------------------------------------------------------------------------- 874 // __ __ __ ______ ______ 875 // /\ \/\ "-.\ \/\ ___\/\__ _\ 876 // \ \ \ \ \-. \ \___ \/_/\ \/ 877 // \ \_\ \_\\"\_\/\_____\ \ \_\ 878 // \/_/\/_/ \/_/\/_____/ \/_/ 879 // 880 //---------------------------------------------------------------------------- 881 using Traits = TargetX8664Traits; 882 using Insts = ::Ice::X8664::Insts<Traits>; 883 884 using TargetLowering = ::Ice::X8664::TargetX86Base<Traits>; 885 using ConcreteTarget = ::Ice::X8664::TargetX8664; 886 using Assembler = ::Ice::X8664::AssemblerX86Base<Traits>; 887 888 /// X86Operand extends the Operand hierarchy. Its subclasses are X86OperandMem 889 /// and VariableSplit. 890 class X86Operand : public ::Ice::Operand { 891 X86Operand() = delete; 892 X86Operand(const X86Operand &) = delete; 893 X86Operand &operator=(const X86Operand &) = delete; 894 895 public: 896 enum OperandKindX8664 { k__Start = ::Ice::Operand::kTarget, kMem, kSplit }; 897 using ::Ice::Operand::dump; 898 899 void dump(const Cfg *, Ostream &Str) const override; 900 901 protected: 902 X86Operand(OperandKindX8664 Kind, Type Ty) 903 : Operand(static_cast<::Ice::Operand::OperandKind>(Kind), Ty) {} 904 }; 905 906 /// X86OperandMem represents the m64 addressing mode, with optional base and 907 /// index registers, a constant offset, and a fixed shift value for the index 908 /// register. 909 class X86OperandMem : public X86Operand { 910 X86OperandMem() = delete; 911 X86OperandMem(const X86OperandMem &) = delete; 912 X86OperandMem &operator=(const X86OperandMem &) = delete; 913 914 public: 915 enum SegmentRegisters { DefaultSegment = -1, SegReg_NUM }; 916 static X86OperandMem * 917 create(Cfg *Func, Type Ty, Variable *Base, Constant *Offset, 918 Variable *Index = nullptr, uint16_t Shift = 0, 919 SegmentRegisters SegmentRegister = DefaultSegment, 920 bool IsRebased = false) { 921 assert(SegmentRegister == DefaultSegment); 922 (void)SegmentRegister; 923 return new (Func->allocate<X86OperandMem>()) 924 X86OperandMem(Func, Ty, Base, Offset, Index, Shift, IsRebased); 925 } 926 static X86OperandMem *create(Cfg *Func, Type Ty, Variable *Base, 927 Constant *Offset, bool IsRebased) { 928 constexpr Variable *NoIndex = nullptr; 929 constexpr uint16_t NoShift = 0; 930 return new (Func->allocate<X86OperandMem>()) 931 X86OperandMem(Func, Ty, Base, Offset, NoIndex, NoShift, IsRebased); 932 } 933 Variable *getBase() const { return Base; } 934 Constant *getOffset() const { return Offset; } 935 Variable *getIndex() const { return Index; } 936 uint16_t getShift() const { return Shift; } 937 SegmentRegisters getSegmentRegister() const { return DefaultSegment; } 938 void emitSegmentOverride(Assembler *) const {} 939 bool getIsRebased() const { return IsRebased; } 940 Address toAsmAddress(Assembler *Asm, const Ice::TargetLowering *Target, 941 bool IsLeaAddr = false) const; 942 943 void emit(const Cfg *Func) const override; 944 using X86Operand::dump; 945 void dump(const Cfg *Func, Ostream &Str) const override; 946 947 static bool classof(const Operand *Operand) { 948 return Operand->getKind() == static_cast<OperandKind>(kMem); 949 } 950 951 void setRandomized(bool R) { Randomized = R; } 952 953 bool getRandomized() const { return Randomized; } 954 955 private: 956 X86OperandMem(Cfg *Func, Type Ty, Variable *Base, Constant *Offset, 957 Variable *Index, uint16_t Shift, bool IsRebased); 958 959 Variable *const Base; 960 Constant *const Offset; 961 Variable *const Index; 962 const uint16_t Shift; 963 const bool IsRebased; 964 /// A flag to show if this memory operand is a randomized one. Randomized 965 /// memory operands are generated in 966 /// TargetX86Base::randomizeOrPoolImmediate() 967 bool Randomized = false; 968 }; 969 970 /// VariableSplit is a way to treat an f64 memory location as a pair of i32 971 /// locations (Low and High). This is needed for some cases of the Bitcast 972 /// instruction. Since it's not possible for integer registers to access the 973 /// XMM registers and vice versa, the lowering forces the f64 to be spilled to 974 /// the stack and then accesses through the VariableSplit. 975 // TODO(jpp): remove references to VariableSplit from IceInstX86Base as 64bit 976 // targets can natively handle these. 977 class VariableSplit : public X86Operand { 978 VariableSplit() = delete; 979 VariableSplit(const VariableSplit &) = delete; 980 VariableSplit &operator=(const VariableSplit &) = delete; 981 982 public: 983 enum Portion { Low, High }; 984 static VariableSplit *create(Cfg *Func, Variable *Var, Portion Part) { 985 return new (Func->allocate<VariableSplit>()) 986 VariableSplit(Func, Var, Part); 987 } 988 int32_t getOffset() const { return Part == High ? 4 : 0; } 989 990 Address toAsmAddress(const Cfg *Func) const; 991 void emit(const Cfg *Func) const override; 992 using X86Operand::dump; 993 void dump(const Cfg *Func, Ostream &Str) const override; 994 995 static bool classof(const Operand *Operand) { 996 return Operand->getKind() == static_cast<OperandKind>(kSplit); 997 } 998 999 private: 1000 VariableSplit(Cfg *Func, Variable *Var, Portion Part) 1001 : X86Operand(kSplit, IceType_i32), Var(Var), Part(Part) { 1002 assert(Var->getType() == IceType_f64); 1003 Vars = Func->allocateArrayOf<Variable *>(1); 1004 Vars[0] = Var; 1005 NumVars = 1; 1006 } 1007 1008 Variable *Var; 1009 Portion Part; 1010 }; 1011 1012 // Note: The following data structures are defined in IceInstX8664.cpp. 1013 1014 static const struct InstBrAttributesType { 1015 Cond::BrCond Opposite; 1016 const char *DisplayString; 1017 const char *EmitString; 1018 } InstBrAttributes[]; 1019 1020 static const struct InstCmppsAttributesType { 1021 const char *EmitString; 1022 } InstCmppsAttributes[]; 1023 1024 static const struct TypeAttributesType { 1025 const char *CvtString; // i (integer), s (single FP), d (double FP) 1026 const char *SdSsString; // ss, sd, or <blank> 1027 const char *PdPsString; // ps, pd, or <blank> 1028 const char *SpSdString; // ss, sd, ps, pd, or <blank> 1029 const char *IntegralString; // b, w, d, or <blank> 1030 const char *UnpackString; // bw, wd, dq, or <blank> 1031 const char *PackString; // wb, dw, or <blank> 1032 const char *WidthString; // b, w, l, q, or <blank> 1033 const char *FldString; // s, l, or <blank> 1034 } TypeAttributes[]; 1035 }; 1036 1037 using Traits = ::Ice::X8664::TargetX8664Traits; 1038 } // end of namespace X8664 1039 1040 } // end of namespace Ice 1041 1042 #endif // SUBZERO_SRC_ICETARGETLOWERINGX8664TRAITS_H 1043