1 //===-- sanitizer_posix.cc ------------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is shared between AddressSanitizer and ThreadSanitizer 11 // run-time libraries and implements POSIX-specific functions from 12 // sanitizer_libc.h. 13 //===----------------------------------------------------------------------===// 14 15 #include "sanitizer_platform.h" 16 #if SANITIZER_POSIX 17 18 #include "sanitizer_common.h" 19 #include "sanitizer_libc.h" 20 #include "sanitizer_procmaps.h" 21 #include "sanitizer_stacktrace.h" 22 23 #include <sys/mman.h> 24 25 #if SANITIZER_LINUX 26 #include <sys/utsname.h> 27 #endif 28 29 #if SANITIZER_LINUX && !SANITIZER_ANDROID 30 #include <sys/personality.h> 31 #endif 32 33 namespace __sanitizer { 34 35 // ------------- sanitizer_common.h 36 uptr GetMmapGranularity() { 37 return GetPageSize(); 38 } 39 40 #if SANITIZER_WORDSIZE == 32 41 // Take care of unusable kernel area in top gigabyte. 42 static uptr GetKernelAreaSize() { 43 #if SANITIZER_LINUX 44 const uptr gbyte = 1UL << 30; 45 46 // Firstly check if there are writable segments 47 // mapped to top gigabyte (e.g. stack). 48 MemoryMappingLayout proc_maps(/*cache_enabled*/true); 49 uptr end, prot; 50 while (proc_maps.Next(/*start*/0, &end, 51 /*offset*/0, /*filename*/0, 52 /*filename_size*/0, &prot)) { 53 if ((end >= 3 * gbyte) 54 && (prot & MemoryMappingLayout::kProtectionWrite) != 0) 55 return 0; 56 } 57 58 #if !SANITIZER_ANDROID 59 // Even if nothing is mapped, top Gb may still be accessible 60 // if we are running on 64-bit kernel. 61 // Uname may report misleading results if personality type 62 // is modified (e.g. under schroot) so check this as well. 63 struct utsname uname_info; 64 int pers = personality(0xffffffffUL); 65 if (!(pers & PER_MASK) 66 && uname(&uname_info) == 0 67 && internal_strstr(uname_info.machine, "64")) 68 return 0; 69 #endif // SANITIZER_ANDROID 70 71 // Top gigabyte is reserved for kernel. 72 return gbyte; 73 #else 74 return 0; 75 #endif // SANITIZER_LINUX 76 } 77 #endif // SANITIZER_WORDSIZE == 32 78 79 uptr GetMaxVirtualAddress() { 80 #if SANITIZER_WORDSIZE == 64 81 # if defined(__powerpc64__) 82 // On PowerPC64 we have two different address space layouts: 44- and 46-bit. 83 // We somehow need to figure out which one we are using now and choose 84 // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL. 85 // Note that with 'ulimit -s unlimited' the stack is moved away from the top 86 // of the address space, so simply checking the stack address is not enough. 87 return (1ULL << 44) - 1; // 0x00000fffffffffffUL 88 # elif defined(__aarch64__) 89 return (1ULL << 39) - 1; 90 # else 91 return (1ULL << 47) - 1; // 0x00007fffffffffffUL; 92 # endif 93 #else // SANITIZER_WORDSIZE == 32 94 uptr res = (1ULL << 32) - 1; // 0xffffffff; 95 if (!common_flags()->full_address_space) 96 res -= GetKernelAreaSize(); 97 CHECK_LT(reinterpret_cast<uptr>(&res), res); 98 return res; 99 #endif // SANITIZER_WORDSIZE 100 } 101 102 void *MmapOrDie(uptr size, const char *mem_type) { 103 size = RoundUpTo(size, GetPageSizeCached()); 104 uptr res = internal_mmap(0, size, 105 PROT_READ | PROT_WRITE, 106 MAP_PRIVATE | MAP_ANON, -1, 0); 107 int reserrno; 108 if (internal_iserror(res, &reserrno)) { 109 static int recursion_count; 110 if (recursion_count) { 111 // The Report() and CHECK calls below may call mmap recursively and fail. 112 // If we went into recursion, just die. 113 RawWrite("ERROR: Failed to mmap\n"); 114 Die(); 115 } 116 recursion_count++; 117 Report("ERROR: %s failed to " 118 "allocate 0x%zx (%zd) bytes of %s (errno: %d)\n", 119 SanitizerToolName, size, size, mem_type, reserrno); 120 DumpProcessMap(); 121 CHECK("unable to mmap" && 0); 122 } 123 IncreaseTotalMmap(size); 124 return (void *)res; 125 } 126 127 void UnmapOrDie(void *addr, uptr size) { 128 if (!addr || !size) return; 129 uptr res = internal_munmap(addr, size); 130 if (internal_iserror(res)) { 131 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n", 132 SanitizerToolName, size, size, addr); 133 CHECK("unable to unmap" && 0); 134 } 135 DecreaseTotalMmap(size); 136 } 137 138 void *MmapNoReserveOrDie(uptr size, const char *mem_type) { 139 uptr PageSize = GetPageSizeCached(); 140 uptr p = internal_mmap(0, 141 RoundUpTo(size, PageSize), 142 PROT_READ | PROT_WRITE, 143 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, 144 -1, 0); 145 int reserrno; 146 if (internal_iserror(p, &reserrno)) { 147 Report("ERROR: %s failed to " 148 "allocate noreserve 0x%zx (%zd) bytes for '%s' (errno: %d)\n", 149 SanitizerToolName, size, size, mem_type, reserrno); 150 CHECK("unable to mmap" && 0); 151 } 152 IncreaseTotalMmap(size); 153 return (void *)p; 154 } 155 156 void *MmapFixedNoReserve(uptr fixed_addr, uptr size) { 157 uptr PageSize = GetPageSizeCached(); 158 uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)), 159 RoundUpTo(size, PageSize), 160 PROT_READ | PROT_WRITE, 161 MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, 162 -1, 0); 163 int reserrno; 164 if (internal_iserror(p, &reserrno)) 165 Report("ERROR: %s failed to " 166 "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n", 167 SanitizerToolName, size, size, fixed_addr, reserrno); 168 IncreaseTotalMmap(size); 169 return (void *)p; 170 } 171 172 void *MmapFixedOrDie(uptr fixed_addr, uptr size) { 173 uptr PageSize = GetPageSizeCached(); 174 uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)), 175 RoundUpTo(size, PageSize), 176 PROT_READ | PROT_WRITE, 177 MAP_PRIVATE | MAP_ANON | MAP_FIXED, 178 -1, 0); 179 int reserrno; 180 if (internal_iserror(p, &reserrno)) { 181 Report("ERROR: %s failed to " 182 "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n", 183 SanitizerToolName, size, size, fixed_addr, reserrno); 184 CHECK("unable to mmap" && 0); 185 } 186 IncreaseTotalMmap(size); 187 return (void *)p; 188 } 189 190 void *Mprotect(uptr fixed_addr, uptr size) { 191 return (void *)internal_mmap((void*)fixed_addr, size, 192 PROT_NONE, 193 MAP_PRIVATE | MAP_ANON | MAP_FIXED | 194 MAP_NORESERVE, -1, 0); 195 } 196 197 void *MapFileToMemory(const char *file_name, uptr *buff_size) { 198 uptr openrv = OpenFile(file_name, false); 199 CHECK(!internal_iserror(openrv)); 200 fd_t fd = openrv; 201 uptr fsize = internal_filesize(fd); 202 CHECK_NE(fsize, (uptr)-1); 203 CHECK_GT(fsize, 0); 204 *buff_size = RoundUpTo(fsize, GetPageSizeCached()); 205 uptr map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0); 206 return internal_iserror(map) ? 0 : (void *)map; 207 } 208 209 void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset) { 210 uptr flags = MAP_SHARED; 211 if (addr) flags |= MAP_FIXED; 212 uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset); 213 if (internal_iserror(p)) { 214 Printf("could not map writable file (%zd, %zu, %zu): %zd\n", fd, offset, 215 size, p); 216 return 0; 217 } 218 return (void *)p; 219 } 220 221 static inline bool IntervalsAreSeparate(uptr start1, uptr end1, 222 uptr start2, uptr end2) { 223 CHECK(start1 <= end1); 224 CHECK(start2 <= end2); 225 return (end1 < start2) || (end2 < start1); 226 } 227 228 // FIXME: this is thread-unsafe, but should not cause problems most of the time. 229 // When the shadow is mapped only a single thread usually exists (plus maybe 230 // several worker threads on Mac, which aren't expected to map big chunks of 231 // memory). 232 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { 233 MemoryMappingLayout proc_maps(/*cache_enabled*/true); 234 uptr start, end; 235 while (proc_maps.Next(&start, &end, 236 /*offset*/0, /*filename*/0, /*filename_size*/0, 237 /*protection*/0)) { 238 if (!IntervalsAreSeparate(start, end, range_start, range_end)) 239 return false; 240 } 241 return true; 242 } 243 244 void DumpProcessMap() { 245 MemoryMappingLayout proc_maps(/*cache_enabled*/true); 246 uptr start, end; 247 const sptr kBufSize = 4095; 248 char *filename = (char*)MmapOrDie(kBufSize, __func__); 249 Report("Process memory map follows:\n"); 250 while (proc_maps.Next(&start, &end, /* file_offset */0, 251 filename, kBufSize, /* protection */0)) { 252 Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename); 253 } 254 Report("End of process memory map.\n"); 255 UnmapOrDie(filename, kBufSize); 256 } 257 258 const char *GetPwd() { 259 return GetEnv("PWD"); 260 } 261 262 char *FindPathToBinary(const char *name) { 263 const char *path = GetEnv("PATH"); 264 if (!path) 265 return 0; 266 uptr name_len = internal_strlen(name); 267 InternalScopedBuffer<char> buffer(kMaxPathLength); 268 const char *beg = path; 269 while (true) { 270 const char *end = internal_strchrnul(beg, ':'); 271 uptr prefix_len = end - beg; 272 if (prefix_len + name_len + 2 <= kMaxPathLength) { 273 internal_memcpy(buffer.data(), beg, prefix_len); 274 buffer[prefix_len] = '/'; 275 internal_memcpy(&buffer[prefix_len + 1], name, name_len); 276 buffer[prefix_len + 1 + name_len] = '\0'; 277 if (FileExists(buffer.data())) 278 return internal_strdup(buffer.data()); 279 } 280 if (*end == '\0') break; 281 beg = end + 1; 282 } 283 return 0; 284 } 285 286 void MaybeOpenReportFile() { 287 if (!log_to_file) return; 288 uptr pid = internal_getpid(); 289 // If in tracer, use the parent's file. 290 if (pid == stoptheworld_tracer_pid) 291 pid = stoptheworld_tracer_ppid; 292 if (report_fd_pid == pid) return; 293 InternalScopedBuffer<char> report_path_full(4096); 294 internal_snprintf(report_path_full.data(), report_path_full.size(), 295 "%s.%zu", report_path_prefix, pid); 296 uptr openrv = OpenFile(report_path_full.data(), true); 297 if (internal_iserror(openrv)) { 298 report_fd = kStderrFd; 299 log_to_file = false; 300 Report("ERROR: Can't open file: %s\n", report_path_full.data()); 301 Die(); 302 } 303 if (report_fd != kInvalidFd) { 304 // We're in the child. Close the parent's log. 305 internal_close(report_fd); 306 } 307 report_fd = openrv; 308 report_fd_pid = pid; 309 } 310 311 void RawWrite(const char *buffer) { 312 static const char *kRawWriteError = 313 "RawWrite can't output requested buffer!\n"; 314 uptr length = (uptr)internal_strlen(buffer); 315 MaybeOpenReportFile(); 316 if (length != internal_write(report_fd, buffer, length)) { 317 internal_write(report_fd, kRawWriteError, internal_strlen(kRawWriteError)); 318 Die(); 319 } 320 } 321 322 bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) { 323 uptr s, e, off, prot; 324 InternalScopedString buff(4096); 325 MemoryMappingLayout proc_maps(/*cache_enabled*/false); 326 while (proc_maps.Next(&s, &e, &off, buff.data(), buff.size(), &prot)) { 327 if ((prot & MemoryMappingLayout::kProtectionExecute) != 0 328 && internal_strcmp(module, buff.data()) == 0) { 329 *start = s; 330 *end = e; 331 return true; 332 } 333 } 334 return false; 335 } 336 337 } // namespace __sanitizer 338 339 #endif // SANITIZER_POSIX 340