1 //== Store.cpp - Interface for maps from Locations to Values ----*- C++ -*--==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defined the types Store and StoreManager. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/StaticAnalyzer/Core/PathSensitive/Store.h" 15 #include "clang/StaticAnalyzer/Core/PathSensitive/GRState.h" 16 #include "clang/AST/CharUnits.h" 17 18 using namespace clang; 19 using namespace ento; 20 21 StoreManager::StoreManager(GRStateManager &stateMgr) 22 : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr), 23 MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {} 24 25 StoreRef StoreManager::enterStackFrame(const GRState *state, 26 const StackFrameContext *frame) { 27 return StoreRef(state->getStore(), *this); 28 } 29 30 const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base, 31 QualType EleTy, uint64_t index) { 32 NonLoc idx = svalBuilder.makeArrayIndex(index); 33 return MRMgr.getElementRegion(EleTy, idx, Base, svalBuilder.getContext()); 34 } 35 36 // FIXME: Merge with the implementation of the same method in MemRegion.cpp 37 static bool IsCompleteType(ASTContext &Ctx, QualType Ty) { 38 if (const RecordType *RT = Ty->getAs<RecordType>()) { 39 const RecordDecl *D = RT->getDecl(); 40 if (!D->getDefinition()) 41 return false; 42 } 43 44 return true; 45 } 46 47 StoreRef StoreManager::BindDefault(Store store, const MemRegion *R, SVal V) { 48 return StoreRef(store, *this); 49 } 50 51 const ElementRegion *StoreManager::GetElementZeroRegion(const MemRegion *R, 52 QualType T) { 53 NonLoc idx = svalBuilder.makeZeroArrayIndex(); 54 assert(!T.isNull()); 55 return MRMgr.getElementRegion(T, idx, R, Ctx); 56 } 57 58 const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy) { 59 60 ASTContext& Ctx = StateMgr.getContext(); 61 62 // Handle casts to Objective-C objects. 63 if (CastToTy->isObjCObjectPointerType()) 64 return R->StripCasts(); 65 66 if (CastToTy->isBlockPointerType()) { 67 // FIXME: We may need different solutions, depending on the symbol 68 // involved. Blocks can be casted to/from 'id', as they can be treated 69 // as Objective-C objects. This could possibly be handled by enhancing 70 // our reasoning of downcasts of symbolic objects. 71 if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R)) 72 return R; 73 74 // We don't know what to make of it. Return a NULL region, which 75 // will be interpretted as UnknownVal. 76 return NULL; 77 } 78 79 // Now assume we are casting from pointer to pointer. Other cases should 80 // already be handled. 81 QualType PointeeTy = CastToTy->getPointeeType(); 82 QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy); 83 84 // Handle casts to void*. We just pass the region through. 85 if (CanonPointeeTy.getLocalUnqualifiedType() == Ctx.VoidTy) 86 return R; 87 88 // Handle casts from compatible types. 89 if (R->isBoundable()) 90 if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) { 91 QualType ObjTy = Ctx.getCanonicalType(TR->getValueType()); 92 if (CanonPointeeTy == ObjTy) 93 return R; 94 } 95 96 // Process region cast according to the kind of the region being cast. 97 switch (R->getKind()) { 98 case MemRegion::CXXThisRegionKind: 99 case MemRegion::GenericMemSpaceRegionKind: 100 case MemRegion::StackLocalsSpaceRegionKind: 101 case MemRegion::StackArgumentsSpaceRegionKind: 102 case MemRegion::HeapSpaceRegionKind: 103 case MemRegion::UnknownSpaceRegionKind: 104 case MemRegion::NonStaticGlobalSpaceRegionKind: 105 case MemRegion::StaticGlobalSpaceRegionKind: { 106 assert(0 && "Invalid region cast"); 107 break; 108 } 109 110 case MemRegion::FunctionTextRegionKind: 111 case MemRegion::BlockTextRegionKind: 112 case MemRegion::BlockDataRegionKind: 113 case MemRegion::StringRegionKind: 114 // FIXME: Need to handle arbitrary downcasts. 115 case MemRegion::SymbolicRegionKind: 116 case MemRegion::AllocaRegionKind: 117 case MemRegion::CompoundLiteralRegionKind: 118 case MemRegion::FieldRegionKind: 119 case MemRegion::ObjCIvarRegionKind: 120 case MemRegion::VarRegionKind: 121 case MemRegion::CXXTempObjectRegionKind: 122 case MemRegion::CXXBaseObjectRegionKind: 123 return MakeElementRegion(R, PointeeTy); 124 125 case MemRegion::ElementRegionKind: { 126 // If we are casting from an ElementRegion to another type, the 127 // algorithm is as follows: 128 // 129 // (1) Compute the "raw offset" of the ElementRegion from the 130 // base region. This is done by calling 'getAsRawOffset()'. 131 // 132 // (2a) If we get a 'RegionRawOffset' after calling 133 // 'getAsRawOffset()', determine if the absolute offset 134 // can be exactly divided into chunks of the size of the 135 // casted-pointee type. If so, create a new ElementRegion with 136 // the pointee-cast type as the new ElementType and the index 137 // being the offset divded by the chunk size. If not, create 138 // a new ElementRegion at offset 0 off the raw offset region. 139 // 140 // (2b) If we don't a get a 'RegionRawOffset' after calling 141 // 'getAsRawOffset()', it means that we are at offset 0. 142 // 143 // FIXME: Handle symbolic raw offsets. 144 145 const ElementRegion *elementR = cast<ElementRegion>(R); 146 const RegionRawOffset &rawOff = elementR->getAsArrayOffset(); 147 const MemRegion *baseR = rawOff.getRegion(); 148 149 // If we cannot compute a raw offset, throw up our hands and return 150 // a NULL MemRegion*. 151 if (!baseR) 152 return NULL; 153 154 CharUnits off = rawOff.getOffset(); 155 156 if (off.isZero()) { 157 // Edge case: we are at 0 bytes off the beginning of baseR. We 158 // check to see if type we are casting to is the same as the base 159 // region. If so, just return the base region. 160 if (const TypedRegion *TR = dyn_cast<TypedRegion>(baseR)) { 161 QualType ObjTy = Ctx.getCanonicalType(TR->getValueType()); 162 QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy); 163 if (CanonPointeeTy == ObjTy) 164 return baseR; 165 } 166 167 // Otherwise, create a new ElementRegion at offset 0. 168 return MakeElementRegion(baseR, PointeeTy); 169 } 170 171 // We have a non-zero offset from the base region. We want to determine 172 // if the offset can be evenly divided by sizeof(PointeeTy). If so, 173 // we create an ElementRegion whose index is that value. Otherwise, we 174 // create two ElementRegions, one that reflects a raw offset and the other 175 // that reflects the cast. 176 177 // Compute the index for the new ElementRegion. 178 int64_t newIndex = 0; 179 const MemRegion *newSuperR = 0; 180 181 // We can only compute sizeof(PointeeTy) if it is a complete type. 182 if (IsCompleteType(Ctx, PointeeTy)) { 183 // Compute the size in **bytes**. 184 CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy); 185 if (!pointeeTySize.isZero()) { 186 // Is the offset a multiple of the size? If so, we can layer the 187 // ElementRegion (with elementType == PointeeTy) directly on top of 188 // the base region. 189 if (off % pointeeTySize == 0) { 190 newIndex = off / pointeeTySize; 191 newSuperR = baseR; 192 } 193 } 194 } 195 196 if (!newSuperR) { 197 // Create an intermediate ElementRegion to represent the raw byte. 198 // This will be the super region of the final ElementRegion. 199 newSuperR = MakeElementRegion(baseR, Ctx.CharTy, off.getQuantity()); 200 } 201 202 return MakeElementRegion(newSuperR, PointeeTy, newIndex); 203 } 204 } 205 206 assert(0 && "unreachable"); 207 return 0; 208 } 209 210 211 /// CastRetrievedVal - Used by subclasses of StoreManager to implement 212 /// implicit casts that arise from loads from regions that are reinterpreted 213 /// as another region. 214 SVal StoreManager::CastRetrievedVal(SVal V, const TypedRegion *R, 215 QualType castTy, bool performTestOnly) { 216 217 if (castTy.isNull()) 218 return V; 219 220 ASTContext &Ctx = svalBuilder.getContext(); 221 222 if (performTestOnly) { 223 // Automatically translate references to pointers. 224 QualType T = R->getValueType(); 225 if (const ReferenceType *RT = T->getAs<ReferenceType>()) 226 T = Ctx.getPointerType(RT->getPointeeType()); 227 228 assert(svalBuilder.getContext().hasSameUnqualifiedType(castTy, T)); 229 return V; 230 } 231 232 if (const Loc *L = dyn_cast<Loc>(&V)) 233 return svalBuilder.evalCastFromLoc(*L, castTy); 234 else if (const NonLoc *NL = dyn_cast<NonLoc>(&V)) 235 return svalBuilder.evalCastFromNonLoc(*NL, castTy); 236 237 return V; 238 } 239 240 SVal StoreManager::getLValueFieldOrIvar(const Decl* D, SVal Base) { 241 if (Base.isUnknownOrUndef()) 242 return Base; 243 244 Loc BaseL = cast<Loc>(Base); 245 const MemRegion* BaseR = 0; 246 247 switch (BaseL.getSubKind()) { 248 case loc::MemRegionKind: 249 BaseR = cast<loc::MemRegionVal>(BaseL).getRegion(); 250 break; 251 252 case loc::GotoLabelKind: 253 // These are anormal cases. Flag an undefined value. 254 return UndefinedVal(); 255 256 case loc::ConcreteIntKind: 257 // While these seem funny, this can happen through casts. 258 // FIXME: What we should return is the field offset. For example, 259 // add the field offset to the integer value. That way funny things 260 // like this work properly: &(((struct foo *) 0xa)->f) 261 return Base; 262 263 default: 264 assert(0 && "Unhandled Base."); 265 return Base; 266 } 267 268 // NOTE: We must have this check first because ObjCIvarDecl is a subclass 269 // of FieldDecl. 270 if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D)) 271 return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR)); 272 273 return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR)); 274 } 275 276 SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset, 277 SVal Base) { 278 279 // If the base is an unknown or undefined value, just return it back. 280 // FIXME: For absolute pointer addresses, we just return that value back as 281 // well, although in reality we should return the offset added to that 282 // value. 283 if (Base.isUnknownOrUndef() || isa<loc::ConcreteInt>(Base)) 284 return Base; 285 286 const MemRegion* BaseRegion = cast<loc::MemRegionVal>(Base).getRegion(); 287 288 // Pointer of any type can be cast and used as array base. 289 const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion); 290 291 // Convert the offset to the appropriate size and signedness. 292 Offset = cast<NonLoc>(svalBuilder.convertToArrayIndex(Offset)); 293 294 if (!ElemR) { 295 // 296 // If the base region is not an ElementRegion, create one. 297 // This can happen in the following example: 298 // 299 // char *p = __builtin_alloc(10); 300 // p[1] = 8; 301 // 302 // Observe that 'p' binds to an AllocaRegion. 303 // 304 return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset, 305 BaseRegion, Ctx)); 306 } 307 308 SVal BaseIdx = ElemR->getIndex(); 309 310 if (!isa<nonloc::ConcreteInt>(BaseIdx)) 311 return UnknownVal(); 312 313 const llvm::APSInt& BaseIdxI = cast<nonloc::ConcreteInt>(BaseIdx).getValue(); 314 315 // Only allow non-integer offsets if the base region has no offset itself. 316 // FIXME: This is a somewhat arbitrary restriction. We should be using 317 // SValBuilder here to add the two offsets without checking their types. 318 if (!isa<nonloc::ConcreteInt>(Offset)) { 319 if (isa<ElementRegion>(BaseRegion->StripCasts())) 320 return UnknownVal(); 321 322 return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset, 323 ElemR->getSuperRegion(), 324 Ctx)); 325 } 326 327 const llvm::APSInt& OffI = cast<nonloc::ConcreteInt>(Offset).getValue(); 328 assert(BaseIdxI.isSigned()); 329 330 // Compute the new index. 331 nonloc::ConcreteInt NewIdx(svalBuilder.getBasicValueFactory().getValue(BaseIdxI + 332 OffI)); 333 334 // Construct the new ElementRegion. 335 const MemRegion *ArrayR = ElemR->getSuperRegion(); 336 return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR, 337 Ctx)); 338 } 339