Home | History | Annotate | Download | only in ARM
      1 //===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 //
     11 //===----------------------------------------------------------------------===//
     12 
     13 #include "ARM.h"
     14 #include "ARMFrameLowering.h"
     15 #include "ARMTargetMachine.h"
     16 #include "ARMTargetObjectFile.h"
     17 #include "ARMTargetTransformInfo.h"
     18 #include "llvm/CodeGen/Passes.h"
     19 #include "llvm/IR/Function.h"
     20 #include "llvm/IR/LegacyPassManager.h"
     21 #include "llvm/MC/MCAsmInfo.h"
     22 #include "llvm/Support/CommandLine.h"
     23 #include "llvm/Support/FormattedStream.h"
     24 #include "llvm/Support/TargetRegistry.h"
     25 #include "llvm/Target/TargetOptions.h"
     26 #include "llvm/Transforms/Scalar.h"
     27 using namespace llvm;
     28 
     29 static cl::opt<bool>
     30 DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden,
     31                    cl::desc("Inhibit optimization of S->D register accesses on A15"),
     32                    cl::init(false));
     33 
     34 static cl::opt<bool>
     35 EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden,
     36                  cl::desc("Run SimplifyCFG after expanding atomic operations"
     37                           " to make use of cmpxchg flow-based information"),
     38                  cl::init(true));
     39 
     40 static cl::opt<bool>
     41 EnableARMLoadStoreOpt("arm-load-store-opt", cl::Hidden,
     42                       cl::desc("Enable ARM load/store optimization pass"),
     43                       cl::init(true));
     44 
     45 // FIXME: Unify control over GlobalMerge.
     46 static cl::opt<cl::boolOrDefault>
     47 EnableGlobalMerge("arm-global-merge", cl::Hidden,
     48                   cl::desc("Enable the global merge pass"));
     49 
     50 extern "C" void LLVMInitializeARMTarget() {
     51   // Register the target.
     52   RegisterTargetMachine<ARMLETargetMachine> X(TheARMLETarget);
     53   RegisterTargetMachine<ARMBETargetMachine> Y(TheARMBETarget);
     54   RegisterTargetMachine<ThumbLETargetMachine> A(TheThumbLETarget);
     55   RegisterTargetMachine<ThumbBETargetMachine> B(TheThumbBETarget);
     56 }
     57 
     58 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
     59   if (TT.isOSBinFormatMachO())
     60     return make_unique<TargetLoweringObjectFileMachO>();
     61   if (TT.isOSWindows())
     62     return make_unique<TargetLoweringObjectFileCOFF>();
     63   return make_unique<ARMElfTargetObjectFile>();
     64 }
     65 
     66 static ARMBaseTargetMachine::ARMABI
     67 computeTargetABI(const Triple &TT, StringRef CPU,
     68                  const TargetOptions &Options) {
     69   if (Options.MCOptions.getABIName() == "aapcs16")
     70     return ARMBaseTargetMachine::ARM_ABI_AAPCS16;
     71   else if (Options.MCOptions.getABIName().startswith("aapcs"))
     72     return ARMBaseTargetMachine::ARM_ABI_AAPCS;
     73   else if (Options.MCOptions.getABIName().startswith("apcs"))
     74     return ARMBaseTargetMachine::ARM_ABI_APCS;
     75 
     76   assert(Options.MCOptions.getABIName().empty() &&
     77          "Unknown target-abi option!");
     78 
     79   ARMBaseTargetMachine::ARMABI TargetABI =
     80       ARMBaseTargetMachine::ARM_ABI_UNKNOWN;
     81 
     82   // FIXME: This is duplicated code from the front end and should be unified.
     83   if (TT.isOSBinFormatMachO()) {
     84     if (TT.getEnvironment() == llvm::Triple::EABI ||
     85         (TT.getOS() == llvm::Triple::UnknownOS && TT.isOSBinFormatMachO()) ||
     86         CPU.startswith("cortex-m")) {
     87       TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
     88     } else if (TT.isWatchOS()) {
     89       TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS16;
     90     } else {
     91       TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS;
     92     }
     93   } else if (TT.isOSWindows()) {
     94     // FIXME: this is invalid for WindowsCE
     95     TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
     96   } else {
     97     // Select the default based on the platform.
     98     switch (TT.getEnvironment()) {
     99     case llvm::Triple::Android:
    100     case llvm::Triple::GNUEABI:
    101     case llvm::Triple::GNUEABIHF:
    102     case llvm::Triple::EABIHF:
    103     case llvm::Triple::EABI:
    104       TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
    105       break;
    106     case llvm::Triple::GNU:
    107       TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS;
    108       break;
    109     default:
    110       if (TT.isOSNetBSD())
    111         TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS;
    112       else
    113         TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
    114       break;
    115     }
    116   }
    117 
    118   return TargetABI;
    119 }
    120 
    121 static std::string computeDataLayout(const Triple &TT, StringRef CPU,
    122                                      const TargetOptions &Options,
    123                                      bool isLittle) {
    124   auto ABI = computeTargetABI(TT, CPU, Options);
    125   std::string Ret = "";
    126 
    127   if (isLittle)
    128     // Little endian.
    129     Ret += "e";
    130   else
    131     // Big endian.
    132     Ret += "E";
    133 
    134   Ret += DataLayout::getManglingComponent(TT);
    135 
    136   // Pointers are 32 bits and aligned to 32 bits.
    137   Ret += "-p:32:32";
    138 
    139   // ABIs other than APCS have 64 bit integers with natural alignment.
    140   if (ABI != ARMBaseTargetMachine::ARM_ABI_APCS)
    141     Ret += "-i64:64";
    142 
    143   // We have 64 bits floats. The APCS ABI requires them to be aligned to 32
    144   // bits, others to 64 bits. We always try to align to 64 bits.
    145   if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS)
    146     Ret += "-f64:32:64";
    147 
    148   // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others
    149   // to 64. We always ty to give them natural alignment.
    150   if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS)
    151     Ret += "-v64:32:64-v128:32:128";
    152   else if (ABI != ARMBaseTargetMachine::ARM_ABI_AAPCS16)
    153     Ret += "-v128:64:128";
    154 
    155   // Try to align aggregates to 32 bits (the default is 64 bits, which has no
    156   // particular hardware support on 32-bit ARM).
    157   Ret += "-a:0:32";
    158 
    159   // Integer registers are 32 bits.
    160   Ret += "-n32";
    161 
    162   // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit
    163   // aligned everywhere else.
    164   if (TT.isOSNaCl() || ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16)
    165     Ret += "-S128";
    166   else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS)
    167     Ret += "-S64";
    168   else
    169     Ret += "-S32";
    170 
    171   return Ret;
    172 }
    173 
    174 /// TargetMachine ctor - Create an ARM architecture model.
    175 ///
    176 ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT,
    177                                            StringRef CPU, StringRef FS,
    178                                            const TargetOptions &Options,
    179                                            Reloc::Model RM, CodeModel::Model CM,
    180                                            CodeGenOpt::Level OL, bool isLittle)
    181     : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT,
    182                         CPU, FS, Options, RM, CM, OL),
    183       TargetABI(computeTargetABI(TT, CPU, Options)),
    184       TLOF(createTLOF(getTargetTriple())),
    185       Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) {
    186 
    187   // Default to triple-appropriate float ABI
    188   if (Options.FloatABIType == FloatABI::Default)
    189     this->Options.FloatABIType =
    190         Subtarget.isTargetHardFloat() ? FloatABI::Hard : FloatABI::Soft;
    191 
    192   // Default to triple-appropriate EABI
    193   if (Options.EABIVersion == EABI::Default ||
    194       Options.EABIVersion == EABI::Unknown) {
    195     if (Subtarget.isTargetGNUAEABI())
    196       this->Options.EABIVersion = EABI::GNU;
    197     else
    198       this->Options.EABIVersion = EABI::EABI5;
    199   }
    200 }
    201 
    202 ARMBaseTargetMachine::~ARMBaseTargetMachine() {}
    203 
    204 const ARMSubtarget *
    205 ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const {
    206   Attribute CPUAttr = F.getFnAttribute("target-cpu");
    207   Attribute FSAttr = F.getFnAttribute("target-features");
    208 
    209   std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
    210                         ? CPUAttr.getValueAsString().str()
    211                         : TargetCPU;
    212   std::string FS = !FSAttr.hasAttribute(Attribute::None)
    213                        ? FSAttr.getValueAsString().str()
    214                        : TargetFS;
    215 
    216   // FIXME: This is related to the code below to reset the target options,
    217   // we need to know whether or not the soft float flag is set on the
    218   // function before we can generate a subtarget. We also need to use
    219   // it as a key for the subtarget since that can be the only difference
    220   // between two functions.
    221   bool SoftFloat =
    222       F.hasFnAttribute("use-soft-float") &&
    223       F.getFnAttribute("use-soft-float").getValueAsString() == "true";
    224   // If the soft float attribute is set on the function turn on the soft float
    225   // subtarget feature.
    226   if (SoftFloat)
    227     FS += FS.empty() ? "+soft-float" : ",+soft-float";
    228 
    229   auto &I = SubtargetMap[CPU + FS];
    230   if (!I) {
    231     // This needs to be done before we create a new subtarget since any
    232     // creation will depend on the TM and the code generation flags on the
    233     // function that reside in TargetOptions.
    234     resetTargetOptions(F);
    235     I = llvm::make_unique<ARMSubtarget>(TargetTriple, CPU, FS, *this, isLittle);
    236   }
    237   return I.get();
    238 }
    239 
    240 TargetIRAnalysis ARMBaseTargetMachine::getTargetIRAnalysis() {
    241   return TargetIRAnalysis([this](const Function &F) {
    242     return TargetTransformInfo(ARMTTIImpl(this, F));
    243   });
    244 }
    245 
    246 void ARMTargetMachine::anchor() {}
    247 
    248 ARMTargetMachine::ARMTargetMachine(const Target &T, const Triple &TT,
    249                                    StringRef CPU, StringRef FS,
    250                                    const TargetOptions &Options,
    251                                    Reloc::Model RM, CodeModel::Model CM,
    252                                    CodeGenOpt::Level OL, bool isLittle)
    253     : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) {
    254   initAsmInfo();
    255   if (!Subtarget.hasARMOps())
    256     report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not "
    257                        "support ARM mode execution!");
    258 }
    259 
    260 void ARMLETargetMachine::anchor() {}
    261 
    262 ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT,
    263                                        StringRef CPU, StringRef FS,
    264                                        const TargetOptions &Options,
    265                                        Reloc::Model RM, CodeModel::Model CM,
    266                                        CodeGenOpt::Level OL)
    267     : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
    268 
    269 void ARMBETargetMachine::anchor() {}
    270 
    271 ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT,
    272                                        StringRef CPU, StringRef FS,
    273                                        const TargetOptions &Options,
    274                                        Reloc::Model RM, CodeModel::Model CM,
    275                                        CodeGenOpt::Level OL)
    276     : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
    277 
    278 void ThumbTargetMachine::anchor() {}
    279 
    280 ThumbTargetMachine::ThumbTargetMachine(const Target &T, const Triple &TT,
    281                                        StringRef CPU, StringRef FS,
    282                                        const TargetOptions &Options,
    283                                        Reloc::Model RM, CodeModel::Model CM,
    284                                        CodeGenOpt::Level OL, bool isLittle)
    285     : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) {
    286   initAsmInfo();
    287 }
    288 
    289 void ThumbLETargetMachine::anchor() {}
    290 
    291 ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, const Triple &TT,
    292                                            StringRef CPU, StringRef FS,
    293                                            const TargetOptions &Options,
    294                                            Reloc::Model RM, CodeModel::Model CM,
    295                                            CodeGenOpt::Level OL)
    296     : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
    297 
    298 void ThumbBETargetMachine::anchor() {}
    299 
    300 ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, const Triple &TT,
    301                                            StringRef CPU, StringRef FS,
    302                                            const TargetOptions &Options,
    303                                            Reloc::Model RM, CodeModel::Model CM,
    304                                            CodeGenOpt::Level OL)
    305     : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
    306 
    307 namespace {
    308 /// ARM Code Generator Pass Configuration Options.
    309 class ARMPassConfig : public TargetPassConfig {
    310 public:
    311   ARMPassConfig(ARMBaseTargetMachine *TM, PassManagerBase &PM)
    312     : TargetPassConfig(TM, PM) {}
    313 
    314   ARMBaseTargetMachine &getARMTargetMachine() const {
    315     return getTM<ARMBaseTargetMachine>();
    316   }
    317 
    318   void addIRPasses() override;
    319   bool addPreISel() override;
    320   bool addInstSelector() override;
    321   void addPreRegAlloc() override;
    322   void addPreSched2() override;
    323   void addPreEmitPass() override;
    324 };
    325 } // namespace
    326 
    327 TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) {
    328   return new ARMPassConfig(this, PM);
    329 }
    330 
    331 void ARMPassConfig::addIRPasses() {
    332   if (TM->Options.ThreadModel == ThreadModel::Single)
    333     addPass(createLowerAtomicPass());
    334   else
    335     addPass(createAtomicExpandPass(TM));
    336 
    337   // Cmpxchg instructions are often used with a subsequent comparison to
    338   // determine whether it succeeded. We can exploit existing control-flow in
    339   // ldrex/strex loops to simplify this, but it needs tidying up.
    340   if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
    341     addPass(createCFGSimplificationPass(-1, [this](const Function &F) {
    342       const auto &ST = this->TM->getSubtarget<ARMSubtarget>(F);
    343       return ST.hasAnyDataBarrier() && !ST.isThumb1Only();
    344     }));
    345 
    346   TargetPassConfig::addIRPasses();
    347 
    348   // Match interleaved memory accesses to ldN/stN intrinsics.
    349   if (TM->getOptLevel() != CodeGenOpt::None)
    350     addPass(createInterleavedAccessPass(TM));
    351 }
    352 
    353 bool ARMPassConfig::addPreISel() {
    354   if ((TM->getOptLevel() != CodeGenOpt::None &&
    355        EnableGlobalMerge == cl::BOU_UNSET) ||
    356       EnableGlobalMerge == cl::BOU_TRUE) {
    357     // FIXME: This is using the thumb1 only constant value for
    358     // maximal global offset for merging globals. We may want
    359     // to look into using the old value for non-thumb1 code of
    360     // 4095 based on the TargetMachine, but this starts to become
    361     // tricky when doing code gen per function.
    362     bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
    363                                (EnableGlobalMerge == cl::BOU_UNSET);
    364     // Merging of extern globals is enabled by default on non-Mach-O as we
    365     // expect it to be generally either beneficial or harmless. On Mach-O it
    366     // is disabled as we emit the .subsections_via_symbols directive which
    367     // means that merging extern globals is not safe.
    368     bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO();
    369     addPass(createGlobalMergePass(TM, 127, OnlyOptimizeForSize,
    370                                   MergeExternalByDefault));
    371   }
    372 
    373   return false;
    374 }
    375 
    376 bool ARMPassConfig::addInstSelector() {
    377   addPass(createARMISelDag(getARMTargetMachine(), getOptLevel()));
    378   return false;
    379 }
    380 
    381 void ARMPassConfig::addPreRegAlloc() {
    382   if (getOptLevel() != CodeGenOpt::None) {
    383     addPass(createMLxExpansionPass());
    384 
    385     if (EnableARMLoadStoreOpt)
    386       addPass(createARMLoadStoreOptimizationPass(/* pre-register alloc */ true));
    387 
    388     if (!DisableA15SDOptimization)
    389       addPass(createA15SDOptimizerPass());
    390   }
    391 }
    392 
    393 void ARMPassConfig::addPreSched2() {
    394   if (getOptLevel() != CodeGenOpt::None) {
    395     if (EnableARMLoadStoreOpt)
    396       addPass(createARMLoadStoreOptimizationPass());
    397 
    398     addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass));
    399   }
    400 
    401   // Expand some pseudo instructions into multiple instructions to allow
    402   // proper scheduling.
    403   addPass(createARMExpandPseudoPass());
    404 
    405   if (getOptLevel() != CodeGenOpt::None) {
    406     // in v8, IfConversion depends on Thumb instruction widths
    407     addPass(createThumb2SizeReductionPass([this](const Function &F) {
    408       return this->TM->getSubtarget<ARMSubtarget>(F).restrictIT();
    409     }));
    410 
    411     addPass(createIfConverter([this](const Function &F) {
    412       return !this->TM->getSubtarget<ARMSubtarget>(F).isThumb1Only();
    413     }));
    414   }
    415   addPass(createThumb2ITBlockPass());
    416 }
    417 
    418 void ARMPassConfig::addPreEmitPass() {
    419   addPass(createThumb2SizeReductionPass());
    420 
    421   // Constant island pass work on unbundled instructions.
    422   addPass(createUnpackMachineBundles([this](const Function &F) {
    423     return this->TM->getSubtarget<ARMSubtarget>(F).isThumb2();
    424   }));
    425 
    426   // Don't optimize barriers at -O0.
    427   if (getOptLevel() != CodeGenOpt::None)
    428     addPass(createARMOptimizeBarriersPass());
    429 
    430   addPass(createARMConstantIslandPass());
    431 }
    432