1 2 /*--------------------------------------------------------------------*/ 3 /*--- The address space manager: stuff common to all platforms ---*/ 4 /*--- ---*/ 5 /*--- m_aspacemgr-common.c ---*/ 6 /*--------------------------------------------------------------------*/ 7 8 /* 9 This file is part of Valgrind, a dynamic binary instrumentation 10 framework. 11 12 Copyright (C) 2006-2010 OpenWorks LLP 13 info (at) open-works.co.uk 14 15 This program is free software; you can redistribute it and/or 16 modify it under the terms of the GNU General Public License as 17 published by the Free Software Foundation; either version 2 of the 18 License, or (at your option) any later version. 19 20 This program is distributed in the hope that it will be useful, but 21 WITHOUT ANY WARRANTY; without even the implied warranty of 22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 23 General Public License for more details. 24 25 You should have received a copy of the GNU General Public License 26 along with this program; if not, write to the Free Software 27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 28 02111-1307, USA. 29 30 The GNU General Public License is contained in the file COPYING. 31 */ 32 33 /* ************************************************************* 34 DO NOT INCLUDE ANY OTHER FILES HERE. 35 ADD NEW INCLUDES ONLY TO priv_aspacemgr.h 36 AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO. 37 ************************************************************* */ 38 39 #include "priv_aspacemgr.h" 40 #include "config.h" 41 42 43 /*-----------------------------------------------------------------*/ 44 /*--- ---*/ 45 /*--- Stuff to make aspacem almost completely independent of ---*/ 46 /*--- the rest of Valgrind. ---*/ 47 /*--- ---*/ 48 /*-----------------------------------------------------------------*/ 49 50 //-------------------------------------------------------------- 51 // Simple assert and assert-like fns, which avoid dependence on 52 // m_libcassert, and hence on the entire debug-info reader swamp 53 54 __attribute__ ((noreturn)) 55 void ML_(am_exit)( Int status ) 56 { 57 # if defined(VGO_linux) 58 (void)VG_(do_syscall1)(__NR_exit_group, status); 59 # endif 60 (void)VG_(do_syscall1)(__NR_exit, status); 61 /* Why are we still alive here? */ 62 /*NOTREACHED*/ 63 *(volatile Int *)0 = 'x'; 64 aspacem_assert(2+2 == 5); 65 } 66 67 void ML_(am_barf) ( HChar* what ) 68 { 69 VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s\n", what); 70 VG_(debugLog)(0, "aspacem", "Exiting now.\n"); 71 ML_(am_exit)(1); 72 } 73 74 void ML_(am_barf_toolow) ( HChar* what ) 75 { 76 VG_(debugLog)(0, "aspacem", 77 "Valgrind: FATAL: %s is too low.\n", what); 78 VG_(debugLog)(0, "aspacem", " Increase it and rebuild. " 79 "Exiting now.\n"); 80 ML_(am_exit)(1); 81 } 82 83 void ML_(am_assert_fail)( const HChar* expr, 84 const Char* file, 85 Int line, 86 const Char* fn ) 87 { 88 VG_(debugLog)(0, "aspacem", 89 "Valgrind: FATAL: aspacem assertion failed:\n"); 90 VG_(debugLog)(0, "aspacem", " %s\n", expr); 91 VG_(debugLog)(0, "aspacem", " at %s:%d (%s)\n", file,line,fn); 92 VG_(debugLog)(0, "aspacem", "Exiting now.\n"); 93 ML_(am_exit)(1); 94 } 95 96 Int ML_(am_getpid)( void ) 97 { 98 SysRes sres = VG_(do_syscall0)(__NR_getpid); 99 aspacem_assert(!sr_isError(sres)); 100 return sr_Res(sres); 101 } 102 103 104 //-------------------------------------------------------------- 105 // A simple sprintf implementation, so as to avoid dependence on 106 // m_libcprint. 107 108 static void local_add_to_aspacem_sprintf_buf ( HChar c, void *p ) 109 { 110 HChar** aspacem_sprintf_ptr = p; 111 *(*aspacem_sprintf_ptr)++ = c; 112 } 113 114 static 115 UInt local_vsprintf ( HChar* buf, const HChar *format, va_list vargs ) 116 { 117 Int ret; 118 Char *aspacem_sprintf_ptr = buf; 119 120 ret = VG_(debugLog_vprintf) 121 ( local_add_to_aspacem_sprintf_buf, 122 &aspacem_sprintf_ptr, format, vargs ); 123 local_add_to_aspacem_sprintf_buf('\0', &aspacem_sprintf_ptr); 124 125 return ret; 126 } 127 128 UInt ML_(am_sprintf) ( HChar* buf, const HChar *format, ... ) 129 { 130 UInt ret; 131 va_list vargs; 132 133 va_start(vargs,format); 134 ret = local_vsprintf(buf, format, vargs); 135 va_end(vargs); 136 137 return ret; 138 } 139 140 141 //-------------------------------------------------------------- 142 // Direct access to a handful of syscalls. This avoids dependence on 143 // m_libc*. THESE DO NOT UPDATE THE ANY aspacem-internal DATA 144 // STRUCTURES (SEGMENT LISTS). DO NOT USE THEM UNLESS YOU KNOW WHAT 145 // YOU ARE DOING. 146 147 /* --- Pertaining to mappings --- */ 148 149 /* Note: this is VG_, not ML_. */ 150 SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr start, SizeT length, UInt prot, 151 UInt flags, Int fd, Off64T offset) 152 { 153 SysRes res; 154 aspacem_assert(VG_IS_PAGE_ALIGNED(offset)); 155 # if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \ 156 || defined(VGP_arm_linux) 157 /* mmap2 uses 4096 chunks even if actual page size is bigger. */ 158 aspacem_assert((offset % 4096) == 0); 159 res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length, 160 prot, flags, fd, offset / 4096); 161 # elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux) \ 162 || defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5) 163 res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length, 164 prot, flags, fd, offset); 165 # elif defined(VGP_x86_darwin) 166 if (fd == 0 && (flags & VKI_MAP_ANONYMOUS)) { 167 fd = -1; // MAP_ANON with fd==0 is EINVAL 168 } 169 res = VG_(do_syscall7)(__NR_mmap, (UWord)start, length, 170 prot, flags, fd, offset & 0xffffffff, offset >> 32); 171 # elif defined(VGP_amd64_darwin) 172 if (fd == 0 && (flags & VKI_MAP_ANONYMOUS)) { 173 fd = -1; // MAP_ANON with fd==0 is EINVAL 174 } 175 res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length, 176 prot, flags, (UInt)fd, offset); 177 # else 178 # error Unknown platform 179 # endif 180 return res; 181 } 182 183 static 184 SysRes local_do_mprotect_NO_NOTIFY(Addr start, SizeT length, UInt prot) 185 { 186 return VG_(do_syscall3)(__NR_mprotect, (UWord)start, length, prot ); 187 } 188 189 SysRes ML_(am_do_munmap_NO_NOTIFY)(Addr start, SizeT length) 190 { 191 return VG_(do_syscall2)(__NR_munmap, (UWord)start, length ); 192 } 193 194 #if HAVE_MREMAP 195 /* The following are used only to implement mremap(). */ 196 197 SysRes ML_(am_do_extend_mapping_NO_NOTIFY)( 198 Addr old_addr, 199 SizeT old_len, 200 SizeT new_len 201 ) 202 { 203 /* Extend the mapping old_addr .. old_addr+old_len-1 to have length 204 new_len, WITHOUT moving it. If it can't be extended in place, 205 fail. */ 206 # if defined(VGO_linux) 207 return VG_(do_syscall5)( 208 __NR_mremap, 209 old_addr, old_len, new_len, 210 0/*flags, meaning: must be at old_addr, else FAIL */, 211 0/*new_addr, is ignored*/ 212 ); 213 # elif defined(VGO_aix5) 214 ML_(am_barf)("ML_(am_do_extend_mapping_NO_NOTIFY) on AIX5"); 215 /* NOTREACHED, but gcc doesn't understand that */ 216 return VG_(mk_SysRes_Error)(0); 217 # else 218 # error Unknown OS 219 # endif 220 } 221 222 SysRes ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)( 223 Addr old_addr, Addr old_len, 224 Addr new_addr, Addr new_len 225 ) 226 { 227 /* Move the mapping old_addr .. old_addr+old_len-1 to the new 228 location and with the new length. Only needs to handle the case 229 where the two areas do not overlap, neither length is zero, and 230 all args are page aligned. */ 231 # if defined(VGO_linux) 232 return VG_(do_syscall5)( 233 __NR_mremap, 234 old_addr, old_len, new_len, 235 VKI_MREMAP_MAYMOVE|VKI_MREMAP_FIXED/*move-or-fail*/, 236 new_addr 237 ); 238 # elif defined(VGO_aix5) 239 ML_(am_barf)("ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY) on AIX5"); 240 /* NOTREACHED, but gcc doesn't understand that */ 241 return VG_(mk_SysRes_Error)(0); 242 # else 243 # error Unknown OS 244 # endif 245 } 246 247 #endif 248 249 /* --- Pertaining to files --- */ 250 251 SysRes ML_(am_open) ( const Char* pathname, Int flags, Int mode ) 252 { 253 SysRes res = VG_(do_syscall3)(__NR_open, (UWord)pathname, flags, mode); 254 return res; 255 } 256 257 Int ML_(am_read) ( Int fd, void* buf, Int count) 258 { 259 SysRes res = VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count); 260 return sr_isError(res) ? -1 : sr_Res(res); 261 } 262 263 void ML_(am_close) ( Int fd ) 264 { 265 (void)VG_(do_syscall1)(__NR_close, fd); 266 } 267 268 Int ML_(am_readlink)(HChar* path, HChar* buf, UInt bufsiz) 269 { 270 SysRes res; 271 res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz); 272 return sr_isError(res) ? -1 : sr_Res(res); 273 } 274 275 Int ML_(am_fcntl) ( Int fd, Int cmd, Addr arg ) 276 { 277 # if defined(VGO_linux) || defined(VGO_aix5) 278 SysRes res = VG_(do_syscall3)(__NR_fcntl, fd, cmd, arg); 279 # elif defined(VGO_darwin) 280 SysRes res = VG_(do_syscall3)(__NR_fcntl_nocancel, fd, cmd, arg); 281 # else 282 # error "Unknown OS" 283 # endif 284 return sr_isError(res) ? -1 : sr_Res(res); 285 } 286 287 /* Get the dev, inode and mode info for a file descriptor, if 288 possible. Returns True on success. */ 289 Bool ML_(am_get_fd_d_i_m)( Int fd, 290 /*OUT*/ULong* dev, 291 /*OUT*/ULong* ino, /*OUT*/UInt* mode ) 292 { 293 SysRes res; 294 struct vki_stat buf; 295 # if defined(VGO_linux) && defined(__NR_fstat64) 296 /* Try fstat64 first as it can cope with minor and major device 297 numbers outside the 0-255 range and it works properly for x86 298 binaries on amd64 systems where fstat seems to be broken. */ 299 struct vki_stat64 buf64; 300 res = VG_(do_syscall2)(__NR_fstat64, fd, (UWord)&buf64); 301 if (!sr_isError(res)) { 302 *dev = (ULong)buf64.st_dev; 303 *ino = (ULong)buf64.st_ino; 304 *mode = (UInt) buf64.st_mode; 305 return True; 306 } 307 # endif 308 res = VG_(do_syscall2)(__NR_fstat, fd, (UWord)&buf); 309 if (!sr_isError(res)) { 310 *dev = (ULong)buf.st_dev; 311 *ino = (ULong)buf.st_ino; 312 *mode = (UInt) buf.st_mode; 313 return True; 314 } 315 return False; 316 } 317 318 Bool ML_(am_resolve_filename) ( Int fd, /*OUT*/HChar* buf, Int nbuf ) 319 { 320 #if defined(VGO_linux) 321 Int i; 322 HChar tmp[64]; 323 for (i = 0; i < nbuf; i++) buf[i] = 0; 324 ML_(am_sprintf)(tmp, "/proc/self/fd/%d", fd); 325 if (ML_(am_readlink)(tmp, buf, nbuf) > 0 && buf[0] == '/') 326 return True; 327 else 328 return False; 329 330 #elif defined(VGO_aix5) 331 I_die_here; /* maybe just return False? */ 332 return False; 333 334 #elif defined(VGO_darwin) 335 HChar tmp[VKI_MAXPATHLEN+1]; 336 if (0 == ML_(am_fcntl)(fd, VKI_F_GETPATH, (UWord)tmp)) { 337 if (nbuf > 0) { 338 VG_(strncpy)( buf, tmp, nbuf < sizeof(tmp) ? nbuf : sizeof(tmp) ); 339 buf[nbuf-1] = 0; 340 } 341 if (tmp[0] == '/') return True; 342 } 343 return False; 344 345 # else 346 # error Unknown OS 347 # endif 348 } 349 350 351 352 353 /*-----------------------------------------------------------------*/ 354 /*--- ---*/ 355 /*--- Manage stacks for Valgrind itself. ---*/ 356 /*--- ---*/ 357 /*-----------------------------------------------------------------*/ 358 359 /* Allocate and initialise a VgStack (anonymous valgrind space). 360 Protect the stack active area and the guard areas appropriately. 361 Returns NULL on failure, else the address of the bottom of the 362 stack. On success, also sets *initial_sp to what the stack pointer 363 should be set to. */ 364 365 VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp ) 366 { 367 Int szB; 368 SysRes sres; 369 VgStack* stack; 370 UInt* p; 371 Int i; 372 373 /* Allocate the stack. */ 374 szB = VG_STACK_GUARD_SZB 375 + VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB; 376 377 sres = VG_(am_mmap_anon_float_valgrind)( szB ); 378 if (sr_isError(sres)) 379 return NULL; 380 381 stack = (VgStack*)(AddrH)sr_Res(sres); 382 383 aspacem_assert(VG_IS_PAGE_ALIGNED(szB)); 384 aspacem_assert(VG_IS_PAGE_ALIGNED(stack)); 385 386 /* Protect the guard areas. */ 387 sres = local_do_mprotect_NO_NOTIFY( 388 (Addr) &stack[0], 389 VG_STACK_GUARD_SZB, VKI_PROT_NONE 390 ); 391 if (sr_isError(sres)) goto protect_failed; 392 VG_(am_notify_mprotect)( 393 (Addr) &stack->bytes[0], 394 VG_STACK_GUARD_SZB, VKI_PROT_NONE 395 ); 396 397 sres = local_do_mprotect_NO_NOTIFY( 398 (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB], 399 VG_STACK_GUARD_SZB, VKI_PROT_NONE 400 ); 401 if (sr_isError(sres)) goto protect_failed; 402 VG_(am_notify_mprotect)( 403 (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB], 404 VG_STACK_GUARD_SZB, VKI_PROT_NONE 405 ); 406 407 /* Looks good. Fill the active area with junk so we can later 408 tell how much got used. */ 409 410 p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB]; 411 for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++) 412 p[i] = 0xDEADBEEF; 413 414 *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB]; 415 *initial_sp -= 8; 416 *initial_sp &= ~((Addr)0x1F); /* 32-align it */ 417 418 VG_(debugLog)( 1,"aspacem","allocated thread stack at 0x%llx size %d\n", 419 (ULong)(Addr)stack, szB); 420 ML_(am_do_sanity_check)(); 421 return stack; 422 423 protect_failed: 424 /* The stack was allocated, but we can't protect it. Unmap it and 425 return NULL (failure). */ 426 (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB ); 427 ML_(am_do_sanity_check)(); 428 return NULL; 429 } 430 431 432 /* Figure out how many bytes of the stack's active area have not 433 been used. Used for estimating if we are close to overflowing it. */ 434 435 SizeT VG_(am_get_VgStack_unused_szB)( VgStack* stack, SizeT limit ) 436 { 437 SizeT i; 438 UInt* p; 439 440 p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB]; 441 for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++) { 442 if (p[i] != 0xDEADBEEF) 443 break; 444 if (i * sizeof(UInt) >= limit) 445 break; 446 } 447 448 return i * sizeof(UInt); 449 } 450 451 452 /*--------------------------------------------------------------------*/ 453 /*--- end ---*/ 454 /*--------------------------------------------------------------------*/ 455