1 Index: include/llvm/Intrinsics.td 2 =================================================================== 3 --- include/llvm/Intrinsics.td (revision 3710) 4 +++ include/llvm/Intrinsics.td (working copy) 5 @@ -439,10 +439,10 @@ 6 // Target-specific intrinsics 7 //===----------------------------------------------------------------------===// 8 9 -include "llvm/IntrinsicsPowerPC.td" 10 +//include "llvm/IntrinsicsPowerPC.td" 11 include "llvm/IntrinsicsX86.td" 12 -include "llvm/IntrinsicsARM.td" 13 -include "llvm/IntrinsicsCellSPU.td" 14 -include "llvm/IntrinsicsAlpha.td" 15 -include "llvm/IntrinsicsXCore.td" 16 -include "llvm/IntrinsicsPTX.td" 17 +//include "llvm/IntrinsicsARM.td" 18 +//include "llvm/IntrinsicsCellSPU.td" 19 +//include "llvm/IntrinsicsAlpha.td" 20 +//include "llvm/IntrinsicsXCore.td" 21 +//include "llvm/IntrinsicsPTX.td" 22 Index: lib/Analysis/BasicAliasAnalysis.cpp 23 =================================================================== 24 --- lib/Analysis/BasicAliasAnalysis.cpp (revision 3710) 25 +++ lib/Analysis/BasicAliasAnalysis.cpp (working copy) 26 @@ -785,27 +785,27 @@ 27 return NoModRef; 28 break; 29 } 30 - case Intrinsic::arm_neon_vld1: { 31 - // LLVM's vld1 and vst1 intrinsics currently only support a single 32 - // vector register. 33 - uint64_t Size = 34 - TD ? TD->getTypeStoreSize(II->getType()) : UnknownSize; 35 - if (isNoAlias(Location(II->getArgOperand(0), Size, 36 - II->getMetadata(LLVMContext::MD_tbaa)), 37 - Loc)) 38 - return NoModRef; 39 - break; 40 + //case Intrinsic::arm_neon_vld1: { 41 + // // LLVM's vld1 and vst1 intrinsics currently only support a single 42 + // // vector register. 43 + // uint64_t Size = 44 + // TD ? TD->getTypeStoreSize(II->getType()) : UnknownSize; 45 + // if (isNoAlias(Location(II->getArgOperand(0), Size, 46 + // II->getMetadata(LLVMContext::MD_tbaa)), 47 + // Loc)) 48 + // return NoModRef; 49 + // break; 50 + //} 51 + //case Intrinsic::arm_neon_vst1: { 52 + // uint64_t Size = 53 + // TD ? TD->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize; 54 + // if (isNoAlias(Location(II->getArgOperand(0), Size, 55 + // II->getMetadata(LLVMContext::MD_tbaa)), 56 + // Loc)) 57 + // return NoModRef; 58 + // break; 59 + //} 60 } 61 - case Intrinsic::arm_neon_vst1: { 62 - uint64_t Size = 63 - TD ? TD->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize; 64 - if (isNoAlias(Location(II->getArgOperand(0), Size, 65 - II->getMetadata(LLVMContext::MD_tbaa)), 66 - Loc)) 67 - return NoModRef; 68 - break; 69 - } 70 - } 71 72 // We can bound the aliasing properties of memset_pattern16 just as we can 73 // for memcpy/memset. This is particularly important because the 74 Index: lib/Transforms/InstCombine/InstCombineCalls.cpp 75 =================================================================== 76 --- lib/Transforms/InstCombine/InstCombineCalls.cpp (revision 3710) 77 +++ lib/Transforms/InstCombine/InstCombineCalls.cpp (working copy) 78 @@ -544,25 +544,25 @@ 79 } 80 } 81 break; 82 - case Intrinsic::ppc_altivec_lvx: 83 - case Intrinsic::ppc_altivec_lvxl: 84 - // Turn PPC lvx -> load if the pointer is known aligned. 85 - if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) { 86 - Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), 87 - PointerType::getUnqual(II->getType())); 88 - return new LoadInst(Ptr); 89 - } 90 - break; 91 - case Intrinsic::ppc_altivec_stvx: 92 - case Intrinsic::ppc_altivec_stvxl: 93 - // Turn stvx -> store if the pointer is known aligned. 94 - if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) { 95 - Type *OpPtrTy = 96 - PointerType::getUnqual(II->getArgOperand(0)->getType()); 97 - Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); 98 - return new StoreInst(II->getArgOperand(0), Ptr); 99 - } 100 - break; 101 + //case Intrinsic::ppc_altivec_lvx: 102 + //case Intrinsic::ppc_altivec_lvxl: 103 + // // Turn PPC lvx -> load if the pointer is known aligned. 104 + // if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) { 105 + // Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), 106 + // PointerType::getUnqual(II->getType())); 107 + // return new LoadInst(Ptr); 108 + // } 109 + // break; 110 + //case Intrinsic::ppc_altivec_stvx: 111 + //case Intrinsic::ppc_altivec_stvxl: 112 + // // Turn stvx -> store if the pointer is known aligned. 113 + // if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) { 114 + // Type *OpPtrTy = 115 + // PointerType::getUnqual(II->getArgOperand(0)->getType()); 116 + // Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); 117 + // return new StoreInst(II->getArgOperand(0), Ptr); 118 + // } 119 + // break; 120 case Intrinsic::x86_sse_storeu_ps: 121 case Intrinsic::x86_sse2_storeu_pd: 122 case Intrinsic::x86_sse2_storeu_dq: 123 @@ -619,79 +619,79 @@ 124 break; 125 } 126 127 - case Intrinsic::ppc_altivec_vperm: 128 - // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 129 - if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) { 130 - assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 131 - 132 - // Check that all of the elements are integer constants or undefs. 133 - bool AllEltsOk = true; 134 - for (unsigned i = 0; i != 16; ++i) { 135 - if (!isa<ConstantInt>(Mask->getOperand(i)) && 136 - !isa<UndefValue>(Mask->getOperand(i))) { 137 - AllEltsOk = false; 138 - break; 139 - } 140 - } 141 - 142 - if (AllEltsOk) { 143 - // Cast the input vectors to byte vectors. 144 - Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), 145 - Mask->getType()); 146 - Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), 147 - Mask->getType()); 148 - Value *Result = UndefValue::get(Op0->getType()); 149 - 150 - // Only extract each element once. 151 - Value *ExtractedElts[32]; 152 - memset(ExtractedElts, 0, sizeof(ExtractedElts)); 153 - 154 - for (unsigned i = 0; i != 16; ++i) { 155 - if (isa<UndefValue>(Mask->getOperand(i))) 156 - continue; 157 - unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 158 - Idx &= 31; // Match the hardware behavior. 159 - 160 - if (ExtractedElts[Idx] == 0) { 161 - ExtractedElts[Idx] = 162 - Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 163 - Builder->getInt32(Idx&15)); 164 - } 165 - 166 - // Insert this value into the result vector. 167 - Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 168 - Builder->getInt32(i)); 169 - } 170 - return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 171 - } 172 - } 173 - break; 174 + //case Intrinsic::ppc_altivec_vperm: 175 + // // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant. 176 + // if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) { 177 + // assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!"); 178 + // 179 + // // Check that all of the elements are integer constants or undefs. 180 + // bool AllEltsOk = true; 181 + // for (unsigned i = 0; i != 16; ++i) { 182 + // if (!isa<ConstantInt>(Mask->getOperand(i)) && 183 + // !isa<UndefValue>(Mask->getOperand(i))) { 184 + // AllEltsOk = false; 185 + // break; 186 + // } 187 + // } 188 + // 189 + // if (AllEltsOk) { 190 + // // Cast the input vectors to byte vectors. 191 + // Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), 192 + // Mask->getType()); 193 + // Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), 194 + // Mask->getType()); 195 + // Value *Result = UndefValue::get(Op0->getType()); 196 + // 197 + // // Only extract each element once. 198 + // Value *ExtractedElts[32]; 199 + // memset(ExtractedElts, 0, sizeof(ExtractedElts)); 200 + // 201 + // for (unsigned i = 0; i != 16; ++i) { 202 + // if (isa<UndefValue>(Mask->getOperand(i))) 203 + // continue; 204 + // unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue(); 205 + // Idx &= 31; // Match the hardware behavior. 206 + // 207 + // if (ExtractedElts[Idx] == 0) { 208 + // ExtractedElts[Idx] = 209 + // Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1, 210 + // Builder->getInt32(Idx&15)); 211 + // } 212 + // 213 + // // Insert this value into the result vector. 214 + // Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], 215 + // Builder->getInt32(i)); 216 + // } 217 + // return CastInst::Create(Instruction::BitCast, Result, CI.getType()); 218 + // } 219 + // } 220 + // break; 221 222 - case Intrinsic::arm_neon_vld1: 223 - case Intrinsic::arm_neon_vld2: 224 - case Intrinsic::arm_neon_vld3: 225 - case Intrinsic::arm_neon_vld4: 226 - case Intrinsic::arm_neon_vld2lane: 227 - case Intrinsic::arm_neon_vld3lane: 228 - case Intrinsic::arm_neon_vld4lane: 229 - case Intrinsic::arm_neon_vst1: 230 - case Intrinsic::arm_neon_vst2: 231 - case Intrinsic::arm_neon_vst3: 232 - case Intrinsic::arm_neon_vst4: 233 - case Intrinsic::arm_neon_vst2lane: 234 - case Intrinsic::arm_neon_vst3lane: 235 - case Intrinsic::arm_neon_vst4lane: { 236 - unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD); 237 - unsigned AlignArg = II->getNumArgOperands() - 1; 238 - ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg)); 239 - if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) { 240 - II->setArgOperand(AlignArg, 241 - ConstantInt::get(Type::getInt32Ty(II->getContext()), 242 - MemAlign, false)); 243 - return II; 244 - } 245 - break; 246 - } 247 + //case Intrinsic::arm_neon_vld1: 248 + //case Intrinsic::arm_neon_vld2: 249 + //case Intrinsic::arm_neon_vld3: 250 + //case Intrinsic::arm_neon_vld4: 251 + //case Intrinsic::arm_neon_vld2lane: 252 + //case Intrinsic::arm_neon_vld3lane: 253 + //case Intrinsic::arm_neon_vld4lane: 254 + //case Intrinsic::arm_neon_vst1: 255 + //case Intrinsic::arm_neon_vst2: 256 + //case Intrinsic::arm_neon_vst3: 257 + //case Intrinsic::arm_neon_vst4: 258 + //case Intrinsic::arm_neon_vst2lane: 259 + //case Intrinsic::arm_neon_vst3lane: 260 + //case Intrinsic::arm_neon_vst4lane: { 261 + // unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD); 262 + // unsigned AlignArg = II->getNumArgOperands() - 1; 263 + // ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg)); 264 + // if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) { 265 + // II->setArgOperand(AlignArg, 266 + // ConstantInt::get(Type::getInt32Ty(II->getContext()), 267 + // MemAlign, false)); 268 + // return II; 269 + // } 270 + // break; 271 + //} 272 273 case Intrinsic::stackrestore: { 274 // If the save is right next to the restore, remove the restore. This can 275