1 /* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define _GNU_SOURCE 1 18 #include <dirent.h> 19 #include <dlfcn.h> 20 #include <errno.h> 21 #include <fcntl.h> 22 #include <inttypes.h> 23 #include <pthread.h> 24 #include <signal.h> 25 #include <stdint.h> 26 #include <stdio.h> 27 #include <stdlib.h> 28 #include <string.h> 29 #include <sys/ptrace.h> 30 #include <sys/stat.h> 31 #include <sys/types.h> 32 #include <sys/wait.h> 33 #include <time.h> 34 #include <unistd.h> 35 36 #include <algorithm> 37 #include <list> 38 #include <memory> 39 #include <string> 40 #include <vector> 41 42 #include <backtrace/Backtrace.h> 43 #include <backtrace/BacktraceMap.h> 44 45 #include <android-base/stringprintf.h> 46 #include <cutils/atomic.h> 47 #include <cutils/threads.h> 48 49 #include <gtest/gtest.h> 50 51 // For the THREAD_SIGNAL definition. 52 #include "BacktraceCurrent.h" 53 #include "thread_utils.h" 54 55 // Number of microseconds per milliseconds. 56 #define US_PER_MSEC 1000 57 58 // Number of nanoseconds in a second. 59 #define NS_PER_SEC 1000000000ULL 60 61 // Number of simultaneous dumping operations to perform. 62 #define NUM_THREADS 40 63 64 // Number of simultaneous threads running in our forked process. 65 #define NUM_PTRACE_THREADS 5 66 67 struct thread_t { 68 pid_t tid; 69 int32_t state; 70 pthread_t threadId; 71 void* data; 72 }; 73 74 struct dump_thread_t { 75 thread_t thread; 76 Backtrace* backtrace; 77 int32_t* now; 78 int32_t done; 79 }; 80 81 extern "C" { 82 // Prototypes for functions in the test library. 83 int test_level_one(int, int, int, int, void (*)(void*), void*); 84 85 int test_recursive_call(int, void (*)(void*), void*); 86 } 87 88 uint64_t NanoTime() { 89 struct timespec t = { 0, 0 }; 90 clock_gettime(CLOCK_MONOTONIC, &t); 91 return static_cast<uint64_t>(t.tv_sec * NS_PER_SEC + t.tv_nsec); 92 } 93 94 std::string DumpFrames(Backtrace* backtrace) { 95 if (backtrace->NumFrames() == 0) { 96 return " No frames to dump.\n"; 97 } 98 99 std::string frame; 100 for (size_t i = 0; i < backtrace->NumFrames(); i++) { 101 frame += " " + backtrace->FormatFrameData(i) + '\n'; 102 } 103 return frame; 104 } 105 106 void WaitForStop(pid_t pid) { 107 uint64_t start = NanoTime(); 108 109 siginfo_t si; 110 while (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) < 0 && (errno == EINTR || errno == ESRCH)) { 111 if ((NanoTime() - start) > NS_PER_SEC) { 112 printf("The process did not get to a stopping point in 1 second.\n"); 113 break; 114 } 115 usleep(US_PER_MSEC); 116 } 117 } 118 119 bool ReadyLevelBacktrace(Backtrace* backtrace) { 120 // See if test_level_four is in the backtrace. 121 bool found = false; 122 for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) { 123 if (it->func_name == "test_level_four") { 124 found = true; 125 break; 126 } 127 } 128 129 return found; 130 } 131 132 void VerifyLevelDump(Backtrace* backtrace) { 133 ASSERT_GT(backtrace->NumFrames(), static_cast<size_t>(0)) 134 << DumpFrames(backtrace); 135 ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES)) 136 << DumpFrames(backtrace); 137 138 // Look through the frames starting at the highest to find the 139 // frame we want. 140 size_t frame_num = 0; 141 for (size_t i = backtrace->NumFrames()-1; i > 2; i--) { 142 if (backtrace->GetFrame(i)->func_name == "test_level_one") { 143 frame_num = i; 144 break; 145 } 146 } 147 ASSERT_LT(static_cast<size_t>(0), frame_num) << DumpFrames(backtrace); 148 ASSERT_LE(static_cast<size_t>(3), frame_num) << DumpFrames(backtrace); 149 150 ASSERT_EQ(backtrace->GetFrame(frame_num)->func_name, "test_level_one") 151 << DumpFrames(backtrace); 152 ASSERT_EQ(backtrace->GetFrame(frame_num-1)->func_name, "test_level_two") 153 << DumpFrames(backtrace); 154 ASSERT_EQ(backtrace->GetFrame(frame_num-2)->func_name, "test_level_three") 155 << DumpFrames(backtrace); 156 ASSERT_EQ(backtrace->GetFrame(frame_num-3)->func_name, "test_level_four") 157 << DumpFrames(backtrace); 158 } 159 160 void VerifyLevelBacktrace(void*) { 161 std::unique_ptr<Backtrace> backtrace( 162 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD)); 163 ASSERT_TRUE(backtrace.get() != nullptr); 164 ASSERT_TRUE(backtrace->Unwind(0)); 165 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 166 167 VerifyLevelDump(backtrace.get()); 168 } 169 170 bool ReadyMaxBacktrace(Backtrace* backtrace) { 171 return (backtrace->NumFrames() == MAX_BACKTRACE_FRAMES); 172 } 173 174 void VerifyMaxDump(Backtrace* backtrace) { 175 ASSERT_EQ(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES)) 176 << DumpFrames(backtrace); 177 // Verify that the last frame is our recursive call. 178 ASSERT_EQ(backtrace->GetFrame(MAX_BACKTRACE_FRAMES-1)->func_name, "test_recursive_call") 179 << DumpFrames(backtrace); 180 } 181 182 void VerifyMaxBacktrace(void*) { 183 std::unique_ptr<Backtrace> backtrace( 184 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD)); 185 ASSERT_TRUE(backtrace.get() != nullptr); 186 ASSERT_TRUE(backtrace->Unwind(0)); 187 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 188 189 VerifyMaxDump(backtrace.get()); 190 } 191 192 void ThreadSetState(void* data) { 193 thread_t* thread = reinterpret_cast<thread_t*>(data); 194 android_atomic_acquire_store(1, &thread->state); 195 volatile int i = 0; 196 while (thread->state) { 197 i++; 198 } 199 } 200 201 void VerifyThreadTest(pid_t tid, void (*VerifyFunc)(Backtrace*)) { 202 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), tid)); 203 ASSERT_TRUE(backtrace.get() != nullptr); 204 ASSERT_TRUE(backtrace->Unwind(0)); 205 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 206 207 VerifyFunc(backtrace.get()); 208 } 209 210 bool WaitForNonZero(int32_t* value, uint64_t seconds) { 211 uint64_t start = NanoTime(); 212 do { 213 if (android_atomic_acquire_load(value)) { 214 return true; 215 } 216 } while ((NanoTime() - start) < seconds * NS_PER_SEC); 217 return false; 218 } 219 220 TEST(libbacktrace, local_no_unwind_frames) { 221 // Verify that a local unwind does not include any frames within 222 // libunwind or libbacktrace. 223 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), getpid())); 224 ASSERT_TRUE(backtrace.get() != nullptr); 225 ASSERT_TRUE(backtrace->Unwind(0)); 226 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 227 228 ASSERT_TRUE(backtrace->NumFrames() != 0); 229 for (const auto& frame : *backtrace ) { 230 if (BacktraceMap::IsValid(frame.map)) { 231 const std::string name = basename(frame.map.name.c_str()); 232 ASSERT_TRUE(name != "libunwind.so" && name != "libbacktrace.so") 233 << DumpFrames(backtrace.get()); 234 } 235 break; 236 } 237 } 238 239 TEST(libbacktrace, local_trace) { 240 ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelBacktrace, nullptr), 0); 241 } 242 243 void VerifyIgnoreFrames( 244 Backtrace* bt_all, Backtrace* bt_ign1, 245 Backtrace* bt_ign2, const char* cur_proc) { 246 EXPECT_EQ(bt_all->NumFrames(), bt_ign1->NumFrames() + 1) 247 << "All backtrace:\n" << DumpFrames(bt_all) << "Ignore 1 backtrace:\n" << DumpFrames(bt_ign1); 248 EXPECT_EQ(bt_all->NumFrames(), bt_ign2->NumFrames() + 2) 249 << "All backtrace:\n" << DumpFrames(bt_all) << "Ignore 2 backtrace:\n" << DumpFrames(bt_ign2); 250 251 // Check all of the frames are the same > the current frame. 252 bool check = (cur_proc == nullptr); 253 for (size_t i = 0; i < bt_ign2->NumFrames(); i++) { 254 if (check) { 255 EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_ign1->GetFrame(i+1)->pc); 256 EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_ign1->GetFrame(i+1)->sp); 257 EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_ign1->GetFrame(i+1)->stack_size); 258 259 EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_all->GetFrame(i+2)->pc); 260 EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_all->GetFrame(i+2)->sp); 261 EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_all->GetFrame(i+2)->stack_size); 262 } 263 if (!check && bt_ign2->GetFrame(i)->func_name == cur_proc) { 264 check = true; 265 } 266 } 267 } 268 269 void VerifyLevelIgnoreFrames(void*) { 270 std::unique_ptr<Backtrace> all( 271 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD)); 272 ASSERT_TRUE(all.get() != nullptr); 273 ASSERT_TRUE(all->Unwind(0)); 274 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, all->GetError()); 275 276 std::unique_ptr<Backtrace> ign1( 277 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD)); 278 ASSERT_TRUE(ign1.get() != nullptr); 279 ASSERT_TRUE(ign1->Unwind(1)); 280 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign1->GetError()); 281 282 std::unique_ptr<Backtrace> ign2( 283 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD)); 284 ASSERT_TRUE(ign2.get() != nullptr); 285 ASSERT_TRUE(ign2->Unwind(2)); 286 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign2->GetError()); 287 288 VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), "VerifyLevelIgnoreFrames"); 289 } 290 291 TEST(libbacktrace, local_trace_ignore_frames) { 292 ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelIgnoreFrames, nullptr), 0); 293 } 294 295 TEST(libbacktrace, local_max_trace) { 296 ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxBacktrace, nullptr), 0); 297 } 298 299 void VerifyProcTest(pid_t pid, pid_t tid, bool share_map, 300 bool (*ReadyFunc)(Backtrace*), 301 void (*VerifyFunc)(Backtrace*)) { 302 pid_t ptrace_tid; 303 if (tid < 0) { 304 ptrace_tid = pid; 305 } else { 306 ptrace_tid = tid; 307 } 308 uint64_t start = NanoTime(); 309 bool verified = false; 310 std::string last_dump; 311 do { 312 usleep(US_PER_MSEC); 313 if (ptrace(PTRACE_ATTACH, ptrace_tid, 0, 0) == 0) { 314 // Wait for the process to get to a stopping point. 315 WaitForStop(ptrace_tid); 316 317 std::unique_ptr<BacktraceMap> map; 318 if (share_map) { 319 map.reset(BacktraceMap::Create(pid)); 320 } 321 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, tid, map.get())); 322 ASSERT_TRUE(backtrace.get() != nullptr); 323 ASSERT_TRUE(backtrace->Unwind(0)); 324 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 325 if (ReadyFunc(backtrace.get())) { 326 VerifyFunc(backtrace.get()); 327 verified = true; 328 } else { 329 last_dump = DumpFrames(backtrace.get()); 330 } 331 332 ASSERT_TRUE(ptrace(PTRACE_DETACH, ptrace_tid, 0, 0) == 0); 333 } 334 // If 5 seconds have passed, then we are done. 335 } while (!verified && (NanoTime() - start) <= 5 * NS_PER_SEC); 336 ASSERT_TRUE(verified) << "Last backtrace:\n" << last_dump; 337 } 338 339 TEST(libbacktrace, ptrace_trace) { 340 pid_t pid; 341 if ((pid = fork()) == 0) { 342 ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0); 343 _exit(1); 344 } 345 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyLevelDump); 346 347 kill(pid, SIGKILL); 348 int status; 349 ASSERT_EQ(waitpid(pid, &status, 0), pid); 350 } 351 352 TEST(libbacktrace, ptrace_trace_shared_map) { 353 pid_t pid; 354 if ((pid = fork()) == 0) { 355 ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0); 356 _exit(1); 357 } 358 359 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, true, ReadyLevelBacktrace, VerifyLevelDump); 360 361 kill(pid, SIGKILL); 362 int status; 363 ASSERT_EQ(waitpid(pid, &status, 0), pid); 364 } 365 366 TEST(libbacktrace, ptrace_max_trace) { 367 pid_t pid; 368 if ((pid = fork()) == 0) { 369 ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, nullptr, nullptr), 0); 370 _exit(1); 371 } 372 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyMaxBacktrace, VerifyMaxDump); 373 374 kill(pid, SIGKILL); 375 int status; 376 ASSERT_EQ(waitpid(pid, &status, 0), pid); 377 } 378 379 void VerifyProcessIgnoreFrames(Backtrace* bt_all) { 380 std::unique_ptr<Backtrace> ign1(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD)); 381 ASSERT_TRUE(ign1.get() != nullptr); 382 ASSERT_TRUE(ign1->Unwind(1)); 383 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign1->GetError()); 384 385 std::unique_ptr<Backtrace> ign2(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD)); 386 ASSERT_TRUE(ign2.get() != nullptr); 387 ASSERT_TRUE(ign2->Unwind(2)); 388 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign2->GetError()); 389 390 VerifyIgnoreFrames(bt_all, ign1.get(), ign2.get(), nullptr); 391 } 392 393 TEST(libbacktrace, ptrace_ignore_frames) { 394 pid_t pid; 395 if ((pid = fork()) == 0) { 396 ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0); 397 _exit(1); 398 } 399 VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyProcessIgnoreFrames); 400 401 kill(pid, SIGKILL); 402 int status; 403 ASSERT_EQ(waitpid(pid, &status, 0), pid); 404 } 405 406 // Create a process with multiple threads and dump all of the threads. 407 void* PtraceThreadLevelRun(void*) { 408 EXPECT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0); 409 return nullptr; 410 } 411 412 void GetThreads(pid_t pid, std::vector<pid_t>* threads) { 413 // Get the list of tasks. 414 char task_path[128]; 415 snprintf(task_path, sizeof(task_path), "/proc/%d/task", pid); 416 417 DIR* tasks_dir = opendir(task_path); 418 ASSERT_TRUE(tasks_dir != nullptr); 419 struct dirent* entry; 420 while ((entry = readdir(tasks_dir)) != nullptr) { 421 char* end; 422 pid_t tid = strtoul(entry->d_name, &end, 10); 423 if (*end == '\0') { 424 threads->push_back(tid); 425 } 426 } 427 closedir(tasks_dir); 428 } 429 430 TEST(libbacktrace, ptrace_threads) { 431 pid_t pid; 432 if ((pid = fork()) == 0) { 433 for (size_t i = 0; i < NUM_PTRACE_THREADS; i++) { 434 pthread_attr_t attr; 435 pthread_attr_init(&attr); 436 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 437 438 pthread_t thread; 439 ASSERT_TRUE(pthread_create(&thread, &attr, PtraceThreadLevelRun, nullptr) == 0); 440 } 441 ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0); 442 _exit(1); 443 } 444 445 // Check to see that all of the threads are running before unwinding. 446 std::vector<pid_t> threads; 447 uint64_t start = NanoTime(); 448 do { 449 usleep(US_PER_MSEC); 450 threads.clear(); 451 GetThreads(pid, &threads); 452 } while ((threads.size() != NUM_PTRACE_THREADS + 1) && 453 ((NanoTime() - start) <= 5 * NS_PER_SEC)); 454 ASSERT_EQ(threads.size(), static_cast<size_t>(NUM_PTRACE_THREADS + 1)); 455 456 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0); 457 WaitForStop(pid); 458 for (std::vector<int>::const_iterator it = threads.begin(); it != threads.end(); ++it) { 459 // Skip the current forked process, we only care about the threads. 460 if (pid == *it) { 461 continue; 462 } 463 VerifyProcTest(pid, *it, false, ReadyLevelBacktrace, VerifyLevelDump); 464 } 465 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0); 466 467 kill(pid, SIGKILL); 468 int status; 469 ASSERT_EQ(waitpid(pid, &status, 0), pid); 470 } 471 472 void VerifyLevelThread(void*) { 473 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid())); 474 ASSERT_TRUE(backtrace.get() != nullptr); 475 ASSERT_TRUE(backtrace->Unwind(0)); 476 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 477 478 VerifyLevelDump(backtrace.get()); 479 } 480 481 TEST(libbacktrace, thread_current_level) { 482 ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelThread, nullptr), 0); 483 } 484 485 void VerifyMaxThread(void*) { 486 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid())); 487 ASSERT_TRUE(backtrace.get() != nullptr); 488 ASSERT_TRUE(backtrace->Unwind(0)); 489 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 490 491 VerifyMaxDump(backtrace.get()); 492 } 493 494 TEST(libbacktrace, thread_current_max) { 495 ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxThread, nullptr), 0); 496 } 497 498 void* ThreadLevelRun(void* data) { 499 thread_t* thread = reinterpret_cast<thread_t*>(data); 500 501 thread->tid = gettid(); 502 EXPECT_NE(test_level_one(1, 2, 3, 4, ThreadSetState, data), 0); 503 return nullptr; 504 } 505 506 TEST(libbacktrace, thread_level_trace) { 507 pthread_attr_t attr; 508 pthread_attr_init(&attr); 509 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 510 511 thread_t thread_data = { 0, 0, 0, nullptr }; 512 pthread_t thread; 513 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0); 514 515 // Wait up to 2 seconds for the tid to be set. 516 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2)); 517 518 // Make sure that the thread signal used is not visible when compiled for 519 // the target. 520 #if !defined(__GLIBC__) 521 ASSERT_LT(THREAD_SIGNAL, SIGRTMIN); 522 #endif 523 524 // Save the current signal action and make sure it is restored afterwards. 525 struct sigaction cur_action; 526 ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &cur_action) == 0); 527 528 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid)); 529 ASSERT_TRUE(backtrace.get() != nullptr); 530 ASSERT_TRUE(backtrace->Unwind(0)); 531 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 532 533 VerifyLevelDump(backtrace.get()); 534 535 // Tell the thread to exit its infinite loop. 536 android_atomic_acquire_store(0, &thread_data.state); 537 538 // Verify that the old action was restored. 539 struct sigaction new_action; 540 ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &new_action) == 0); 541 EXPECT_EQ(cur_action.sa_sigaction, new_action.sa_sigaction); 542 // The SA_RESTORER flag gets set behind our back, so a direct comparison 543 // doesn't work unless we mask the value off. Mips doesn't have this 544 // flag, so skip this on that platform. 545 #if defined(SA_RESTORER) 546 cur_action.sa_flags &= ~SA_RESTORER; 547 new_action.sa_flags &= ~SA_RESTORER; 548 #elif defined(__GLIBC__) 549 // Our host compiler doesn't appear to define this flag for some reason. 550 cur_action.sa_flags &= ~0x04000000; 551 new_action.sa_flags &= ~0x04000000; 552 #endif 553 EXPECT_EQ(cur_action.sa_flags, new_action.sa_flags); 554 } 555 556 TEST(libbacktrace, thread_ignore_frames) { 557 pthread_attr_t attr; 558 pthread_attr_init(&attr); 559 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 560 561 thread_t thread_data = { 0, 0, 0, nullptr }; 562 pthread_t thread; 563 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0); 564 565 // Wait up to 2 seconds for the tid to be set. 566 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2)); 567 568 std::unique_ptr<Backtrace> all(Backtrace::Create(getpid(), thread_data.tid)); 569 ASSERT_TRUE(all.get() != nullptr); 570 ASSERT_TRUE(all->Unwind(0)); 571 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, all->GetError()); 572 573 std::unique_ptr<Backtrace> ign1(Backtrace::Create(getpid(), thread_data.tid)); 574 ASSERT_TRUE(ign1.get() != nullptr); 575 ASSERT_TRUE(ign1->Unwind(1)); 576 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign1->GetError()); 577 578 std::unique_ptr<Backtrace> ign2(Backtrace::Create(getpid(), thread_data.tid)); 579 ASSERT_TRUE(ign2.get() != nullptr); 580 ASSERT_TRUE(ign2->Unwind(2)); 581 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign2->GetError()); 582 583 VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), nullptr); 584 585 // Tell the thread to exit its infinite loop. 586 android_atomic_acquire_store(0, &thread_data.state); 587 } 588 589 void* ThreadMaxRun(void* data) { 590 thread_t* thread = reinterpret_cast<thread_t*>(data); 591 592 thread->tid = gettid(); 593 EXPECT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, ThreadSetState, data), 0); 594 return nullptr; 595 } 596 597 TEST(libbacktrace, thread_max_trace) { 598 pthread_attr_t attr; 599 pthread_attr_init(&attr); 600 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 601 602 thread_t thread_data = { 0, 0, 0, nullptr }; 603 pthread_t thread; 604 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadMaxRun, &thread_data) == 0); 605 606 // Wait for the tid to be set. 607 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2)); 608 609 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid)); 610 ASSERT_TRUE(backtrace.get() != nullptr); 611 ASSERT_TRUE(backtrace->Unwind(0)); 612 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 613 614 VerifyMaxDump(backtrace.get()); 615 616 // Tell the thread to exit its infinite loop. 617 android_atomic_acquire_store(0, &thread_data.state); 618 } 619 620 void* ThreadDump(void* data) { 621 dump_thread_t* dump = reinterpret_cast<dump_thread_t*>(data); 622 while (true) { 623 if (android_atomic_acquire_load(dump->now)) { 624 break; 625 } 626 } 627 628 // The status of the actual unwind will be checked elsewhere. 629 dump->backtrace = Backtrace::Create(getpid(), dump->thread.tid); 630 dump->backtrace->Unwind(0); 631 632 android_atomic_acquire_store(1, &dump->done); 633 634 return nullptr; 635 } 636 637 TEST(libbacktrace, thread_multiple_dump) { 638 // Dump NUM_THREADS simultaneously. 639 std::vector<thread_t> runners(NUM_THREADS); 640 std::vector<dump_thread_t> dumpers(NUM_THREADS); 641 642 pthread_attr_t attr; 643 pthread_attr_init(&attr); 644 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 645 for (size_t i = 0; i < NUM_THREADS; i++) { 646 // Launch the runners, they will spin in hard loops doing nothing. 647 runners[i].tid = 0; 648 runners[i].state = 0; 649 ASSERT_TRUE(pthread_create(&runners[i].threadId, &attr, ThreadMaxRun, &runners[i]) == 0); 650 } 651 652 // Wait for tids to be set. 653 for (std::vector<thread_t>::iterator it = runners.begin(); it != runners.end(); ++it) { 654 ASSERT_TRUE(WaitForNonZero(&it->state, 30)); 655 } 656 657 // Start all of the dumpers at once, they will spin until they are signalled 658 // to begin their dump run. 659 int32_t dump_now = 0; 660 for (size_t i = 0; i < NUM_THREADS; i++) { 661 dumpers[i].thread.tid = runners[i].tid; 662 dumpers[i].thread.state = 0; 663 dumpers[i].done = 0; 664 dumpers[i].now = &dump_now; 665 666 ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0); 667 } 668 669 // Start all of the dumpers going at once. 670 android_atomic_acquire_store(1, &dump_now); 671 672 for (size_t i = 0; i < NUM_THREADS; i++) { 673 ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30)); 674 675 // Tell the runner thread to exit its infinite loop. 676 android_atomic_acquire_store(0, &runners[i].state); 677 678 ASSERT_TRUE(dumpers[i].backtrace != nullptr); 679 VerifyMaxDump(dumpers[i].backtrace); 680 681 delete dumpers[i].backtrace; 682 dumpers[i].backtrace = nullptr; 683 } 684 } 685 686 TEST(libbacktrace, thread_multiple_dump_same_thread) { 687 pthread_attr_t attr; 688 pthread_attr_init(&attr); 689 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 690 thread_t runner; 691 runner.tid = 0; 692 runner.state = 0; 693 ASSERT_TRUE(pthread_create(&runner.threadId, &attr, ThreadMaxRun, &runner) == 0); 694 695 // Wait for tids to be set. 696 ASSERT_TRUE(WaitForNonZero(&runner.state, 30)); 697 698 // Start all of the dumpers at once, they will spin until they are signalled 699 // to begin their dump run. 700 int32_t dump_now = 0; 701 // Dump the same thread NUM_THREADS simultaneously. 702 std::vector<dump_thread_t> dumpers(NUM_THREADS); 703 for (size_t i = 0; i < NUM_THREADS; i++) { 704 dumpers[i].thread.tid = runner.tid; 705 dumpers[i].thread.state = 0; 706 dumpers[i].done = 0; 707 dumpers[i].now = &dump_now; 708 709 ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0); 710 } 711 712 // Start all of the dumpers going at once. 713 android_atomic_acquire_store(1, &dump_now); 714 715 for (size_t i = 0; i < NUM_THREADS; i++) { 716 ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30)); 717 718 ASSERT_TRUE(dumpers[i].backtrace != nullptr); 719 VerifyMaxDump(dumpers[i].backtrace); 720 721 delete dumpers[i].backtrace; 722 dumpers[i].backtrace = nullptr; 723 } 724 725 // Tell the runner thread to exit its infinite loop. 726 android_atomic_acquire_store(0, &runner.state); 727 } 728 729 // This test is for UnwindMaps that should share the same map cursor when 730 // multiple maps are created for the current process at the same time. 731 TEST(libbacktrace, simultaneous_maps) { 732 BacktraceMap* map1 = BacktraceMap::Create(getpid()); 733 BacktraceMap* map2 = BacktraceMap::Create(getpid()); 734 BacktraceMap* map3 = BacktraceMap::Create(getpid()); 735 736 Backtrace* back1 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map1); 737 ASSERT_TRUE(back1 != nullptr); 738 EXPECT_TRUE(back1->Unwind(0)); 739 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, back1->GetError()); 740 delete back1; 741 delete map1; 742 743 Backtrace* back2 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map2); 744 ASSERT_TRUE(back2 != nullptr); 745 EXPECT_TRUE(back2->Unwind(0)); 746 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, back2->GetError()); 747 delete back2; 748 delete map2; 749 750 Backtrace* back3 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map3); 751 ASSERT_TRUE(back3 != nullptr); 752 EXPECT_TRUE(back3->Unwind(0)); 753 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, back3->GetError()); 754 delete back3; 755 delete map3; 756 } 757 758 TEST(libbacktrace, fillin_erases) { 759 BacktraceMap* back_map = BacktraceMap::Create(getpid()); 760 761 backtrace_map_t map; 762 763 map.start = 1; 764 map.end = 3; 765 map.flags = 1; 766 map.name = "Initialized"; 767 back_map->FillIn(0, &map); 768 delete back_map; 769 770 ASSERT_FALSE(BacktraceMap::IsValid(map)); 771 ASSERT_EQ(static_cast<uintptr_t>(0), map.start); 772 ASSERT_EQ(static_cast<uintptr_t>(0), map.end); 773 ASSERT_EQ(0, map.flags); 774 ASSERT_EQ("", map.name); 775 } 776 777 TEST(libbacktrace, format_test) { 778 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD)); 779 ASSERT_TRUE(backtrace.get() != nullptr); 780 781 backtrace_frame_data_t frame; 782 frame.num = 1; 783 frame.pc = 2; 784 frame.sp = 0; 785 frame.stack_size = 0; 786 frame.func_offset = 0; 787 788 // Check no map set. 789 frame.num = 1; 790 #if defined(__LP64__) 791 EXPECT_EQ("#01 pc 0000000000000002 <unknown>", 792 #else 793 EXPECT_EQ("#01 pc 00000002 <unknown>", 794 #endif 795 backtrace->FormatFrameData(&frame)); 796 797 // Check map name empty, but exists. 798 frame.pc = 0xb0020; 799 frame.map.start = 0xb0000; 800 frame.map.end = 0xbffff; 801 frame.map.load_base = 0; 802 #if defined(__LP64__) 803 EXPECT_EQ("#01 pc 0000000000000020 <anonymous:00000000000b0000>", 804 #else 805 EXPECT_EQ("#01 pc 00000020 <anonymous:000b0000>", 806 #endif 807 backtrace->FormatFrameData(&frame)); 808 809 // Check map name begins with a [. 810 frame.pc = 0xc0020; 811 frame.map.start = 0xc0000; 812 frame.map.end = 0xcffff; 813 frame.map.load_base = 0; 814 frame.map.name = "[anon:thread signal stack]"; 815 #if defined(__LP64__) 816 EXPECT_EQ("#01 pc 0000000000000020 [anon:thread signal stack:00000000000c0000]", 817 #else 818 EXPECT_EQ("#01 pc 00000020 [anon:thread signal stack:000c0000]", 819 #endif 820 backtrace->FormatFrameData(&frame)); 821 822 // Check relative pc is set and map name is set. 823 frame.pc = 0x12345679; 824 frame.map.name = "MapFake"; 825 frame.map.start = 1; 826 frame.map.end = 1; 827 #if defined(__LP64__) 828 EXPECT_EQ("#01 pc 0000000012345678 MapFake", 829 #else 830 EXPECT_EQ("#01 pc 12345678 MapFake", 831 #endif 832 backtrace->FormatFrameData(&frame)); 833 834 // Check func_name is set, but no func offset. 835 frame.func_name = "ProcFake"; 836 #if defined(__LP64__) 837 EXPECT_EQ("#01 pc 0000000012345678 MapFake (ProcFake)", 838 #else 839 EXPECT_EQ("#01 pc 12345678 MapFake (ProcFake)", 840 #endif 841 backtrace->FormatFrameData(&frame)); 842 843 // Check func_name is set, and func offset is non-zero. 844 frame.func_offset = 645; 845 #if defined(__LP64__) 846 EXPECT_EQ("#01 pc 0000000012345678 MapFake (ProcFake+645)", 847 #else 848 EXPECT_EQ("#01 pc 12345678 MapFake (ProcFake+645)", 849 #endif 850 backtrace->FormatFrameData(&frame)); 851 852 // Check func_name is set, func offset is non-zero, and load_base is non-zero. 853 frame.func_offset = 645; 854 frame.map.load_base = 100; 855 #if defined(__LP64__) 856 EXPECT_EQ("#01 pc 00000000123456dc MapFake (ProcFake+645)", 857 #else 858 EXPECT_EQ("#01 pc 123456dc MapFake (ProcFake+645)", 859 #endif 860 backtrace->FormatFrameData(&frame)); 861 862 // Check a non-zero map offset. 863 frame.map.offset = 0x1000; 864 #if defined(__LP64__) 865 EXPECT_EQ("#01 pc 00000000123456dc MapFake (offset 0x1000) (ProcFake+645)", 866 #else 867 EXPECT_EQ("#01 pc 123456dc MapFake (offset 0x1000) (ProcFake+645)", 868 #endif 869 backtrace->FormatFrameData(&frame)); 870 } 871 872 struct map_test_t { 873 uintptr_t start; 874 uintptr_t end; 875 }; 876 877 bool map_sort(map_test_t i, map_test_t j) { 878 return i.start < j.start; 879 } 880 881 void VerifyMap(pid_t pid) { 882 char buffer[4096]; 883 snprintf(buffer, sizeof(buffer), "/proc/%d/maps", pid); 884 885 FILE* map_file = fopen(buffer, "r"); 886 ASSERT_TRUE(map_file != nullptr); 887 std::vector<map_test_t> test_maps; 888 while (fgets(buffer, sizeof(buffer), map_file)) { 889 map_test_t map; 890 ASSERT_EQ(2, sscanf(buffer, "%" SCNxPTR "-%" SCNxPTR " ", &map.start, &map.end)); 891 test_maps.push_back(map); 892 } 893 fclose(map_file); 894 std::sort(test_maps.begin(), test_maps.end(), map_sort); 895 896 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid)); 897 898 // Basic test that verifies that the map is in the expected order. 899 ScopedBacktraceMapIteratorLock lock(map.get()); 900 std::vector<map_test_t>::const_iterator test_it = test_maps.begin(); 901 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { 902 ASSERT_TRUE(test_it != test_maps.end()); 903 ASSERT_EQ(test_it->start, it->start); 904 ASSERT_EQ(test_it->end, it->end); 905 ++test_it; 906 } 907 ASSERT_TRUE(test_it == test_maps.end()); 908 } 909 910 TEST(libbacktrace, verify_map_remote) { 911 pid_t pid; 912 913 if ((pid = fork()) == 0) { 914 while (true) { 915 } 916 _exit(0); 917 } 918 ASSERT_LT(0, pid); 919 920 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0); 921 922 // Wait for the process to get to a stopping point. 923 WaitForStop(pid); 924 925 // The maps should match exactly since the forked process has been paused. 926 VerifyMap(pid); 927 928 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0); 929 930 kill(pid, SIGKILL); 931 ASSERT_EQ(waitpid(pid, nullptr, 0), pid); 932 } 933 934 void InitMemory(uint8_t* memory, size_t bytes) { 935 for (size_t i = 0; i < bytes; i++) { 936 memory[i] = i; 937 if (memory[i] == '\0') { 938 // Don't use '\0' in our data so we can verify that an overread doesn't 939 // occur by using a '\0' as the character after the read data. 940 memory[i] = 23; 941 } 942 } 943 } 944 945 void* ThreadReadTest(void* data) { 946 thread_t* thread_data = reinterpret_cast<thread_t*>(data); 947 948 thread_data->tid = gettid(); 949 950 // Create two map pages. 951 // Mark the second page as not-readable. 952 size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE)); 953 uint8_t* memory; 954 if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) { 955 return reinterpret_cast<void*>(-1); 956 } 957 958 if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) { 959 return reinterpret_cast<void*>(-1); 960 } 961 962 // Set up a simple pattern in memory. 963 InitMemory(memory, pagesize); 964 965 thread_data->data = memory; 966 967 // Tell the caller it's okay to start reading memory. 968 android_atomic_acquire_store(1, &thread_data->state); 969 970 // Loop waiting for the caller to finish reading the memory. 971 while (thread_data->state) { 972 } 973 974 // Re-enable read-write on the page so that we don't crash if we try 975 // and access data on this page when freeing the memory. 976 if (mprotect(&memory[pagesize], pagesize, PROT_READ | PROT_WRITE) != 0) { 977 return reinterpret_cast<void*>(-1); 978 } 979 free(memory); 980 981 android_atomic_acquire_store(1, &thread_data->state); 982 983 return nullptr; 984 } 985 986 void RunReadTest(Backtrace* backtrace, uintptr_t read_addr) { 987 size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE)); 988 989 // Create a page of data to use to do quick compares. 990 uint8_t* expected = new uint8_t[pagesize]; 991 InitMemory(expected, pagesize); 992 993 uint8_t* data = new uint8_t[2*pagesize]; 994 // Verify that we can only read one page worth of data. 995 size_t bytes_read = backtrace->Read(read_addr, data, 2 * pagesize); 996 ASSERT_EQ(pagesize, bytes_read); 997 ASSERT_TRUE(memcmp(data, expected, pagesize) == 0); 998 999 // Verify unaligned reads. 1000 for (size_t i = 1; i < sizeof(word_t); i++) { 1001 bytes_read = backtrace->Read(read_addr + i, data, 2 * sizeof(word_t)); 1002 ASSERT_EQ(2 * sizeof(word_t), bytes_read); 1003 ASSERT_TRUE(memcmp(data, &expected[i], 2 * sizeof(word_t)) == 0) 1004 << "Offset at " << i << " failed"; 1005 } 1006 1007 // Verify small unaligned reads. 1008 for (size_t i = 1; i < sizeof(word_t); i++) { 1009 for (size_t j = 1; j < sizeof(word_t); j++) { 1010 // Set one byte past what we expect to read, to guarantee we don't overread. 1011 data[j] = '\0'; 1012 bytes_read = backtrace->Read(read_addr + i, data, j); 1013 ASSERT_EQ(j, bytes_read); 1014 ASSERT_TRUE(memcmp(data, &expected[i], j) == 0) 1015 << "Offset at " << i << " length " << j << " miscompared"; 1016 ASSERT_EQ('\0', data[j]) 1017 << "Offset at " << i << " length " << j << " wrote too much data"; 1018 } 1019 } 1020 delete[] data; 1021 delete[] expected; 1022 } 1023 1024 TEST(libbacktrace, thread_read) { 1025 pthread_attr_t attr; 1026 pthread_attr_init(&attr); 1027 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 1028 pthread_t thread; 1029 thread_t thread_data = { 0, 0, 0, nullptr }; 1030 ASSERT_TRUE(pthread_create(&thread, &attr, ThreadReadTest, &thread_data) == 0); 1031 1032 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10)); 1033 1034 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid)); 1035 ASSERT_TRUE(backtrace.get() != nullptr); 1036 1037 RunReadTest(backtrace.get(), reinterpret_cast<uintptr_t>(thread_data.data)); 1038 1039 android_atomic_acquire_store(0, &thread_data.state); 1040 1041 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10)); 1042 } 1043 1044 volatile uintptr_t g_ready = 0; 1045 volatile uintptr_t g_addr = 0; 1046 1047 void ForkedReadTest() { 1048 // Create two map pages. 1049 size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE)); 1050 uint8_t* memory; 1051 if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) { 1052 perror("Failed to allocate memory\n"); 1053 exit(1); 1054 } 1055 1056 // Mark the second page as not-readable. 1057 if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) { 1058 perror("Failed to mprotect memory\n"); 1059 exit(1); 1060 } 1061 1062 // Set up a simple pattern in memory. 1063 InitMemory(memory, pagesize); 1064 1065 g_addr = reinterpret_cast<uintptr_t>(memory); 1066 g_ready = 1; 1067 1068 while (1) { 1069 usleep(US_PER_MSEC); 1070 } 1071 } 1072 1073 TEST(libbacktrace, process_read) { 1074 g_ready = 0; 1075 pid_t pid; 1076 if ((pid = fork()) == 0) { 1077 ForkedReadTest(); 1078 exit(0); 1079 } 1080 ASSERT_NE(-1, pid); 1081 1082 bool test_executed = false; 1083 uint64_t start = NanoTime(); 1084 while (1) { 1085 if (ptrace(PTRACE_ATTACH, pid, 0, 0) == 0) { 1086 WaitForStop(pid); 1087 1088 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid)); 1089 ASSERT_TRUE(backtrace.get() != nullptr); 1090 1091 uintptr_t read_addr; 1092 size_t bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_ready), 1093 reinterpret_cast<uint8_t*>(&read_addr), 1094 sizeof(uintptr_t)); 1095 ASSERT_EQ(sizeof(uintptr_t), bytes_read); 1096 if (read_addr) { 1097 // The forked process is ready to be read. 1098 bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_addr), 1099 reinterpret_cast<uint8_t*>(&read_addr), 1100 sizeof(uintptr_t)); 1101 ASSERT_EQ(sizeof(uintptr_t), bytes_read); 1102 1103 RunReadTest(backtrace.get(), read_addr); 1104 1105 test_executed = true; 1106 break; 1107 } 1108 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0); 1109 } 1110 if ((NanoTime() - start) > 5 * NS_PER_SEC) { 1111 break; 1112 } 1113 usleep(US_PER_MSEC); 1114 } 1115 kill(pid, SIGKILL); 1116 ASSERT_EQ(waitpid(pid, nullptr, 0), pid); 1117 1118 ASSERT_TRUE(test_executed); 1119 } 1120 1121 void VerifyFunctionsFound(const std::vector<std::string>& found_functions) { 1122 // We expect to find these functions in libbacktrace_test. If we don't 1123 // find them, that's a bug in the memory read handling code in libunwind. 1124 std::list<std::string> expected_functions; 1125 expected_functions.push_back("test_recursive_call"); 1126 expected_functions.push_back("test_level_one"); 1127 expected_functions.push_back("test_level_two"); 1128 expected_functions.push_back("test_level_three"); 1129 expected_functions.push_back("test_level_four"); 1130 for (const auto& found_function : found_functions) { 1131 for (const auto& expected_function : expected_functions) { 1132 if (found_function == expected_function) { 1133 expected_functions.remove(found_function); 1134 break; 1135 } 1136 } 1137 } 1138 ASSERT_TRUE(expected_functions.empty()) << "Not all functions found in shared library."; 1139 } 1140 1141 const char* CopySharedLibrary() { 1142 #if defined(__LP64__) 1143 const char* lib_name = "lib64"; 1144 #else 1145 const char* lib_name = "lib"; 1146 #endif 1147 1148 #if defined(__BIONIC__) 1149 const char* tmp_so_name = "/data/local/tmp/libbacktrace_test.so"; 1150 std::string cp_cmd = android::base::StringPrintf("cp /system/%s/libbacktrace_test.so %s", 1151 lib_name, tmp_so_name); 1152 #else 1153 const char* tmp_so_name = "/tmp/libbacktrace_test.so"; 1154 if (getenv("ANDROID_HOST_OUT") == NULL) { 1155 fprintf(stderr, "ANDROID_HOST_OUT not set, make sure you run lunch."); 1156 return nullptr; 1157 } 1158 std::string cp_cmd = android::base::StringPrintf("cp %s/%s/libbacktrace_test.so %s", 1159 getenv("ANDROID_HOST_OUT"), lib_name, 1160 tmp_so_name); 1161 #endif 1162 1163 // Copy the shared so to a tempory directory. 1164 system(cp_cmd.c_str()); 1165 1166 return tmp_so_name; 1167 } 1168 1169 TEST(libbacktrace, check_unreadable_elf_local) { 1170 const char* tmp_so_name = CopySharedLibrary(); 1171 ASSERT_TRUE(tmp_so_name != nullptr); 1172 1173 struct stat buf; 1174 ASSERT_TRUE(stat(tmp_so_name, &buf) != -1); 1175 uintptr_t map_size = buf.st_size; 1176 1177 int fd = open(tmp_so_name, O_RDONLY); 1178 ASSERT_TRUE(fd != -1); 1179 1180 void* map = mmap(NULL, map_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0); 1181 ASSERT_TRUE(map != MAP_FAILED); 1182 close(fd); 1183 ASSERT_TRUE(unlink(tmp_so_name) != -1); 1184 1185 std::vector<std::string> found_functions; 1186 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, 1187 BACKTRACE_CURRENT_THREAD)); 1188 ASSERT_TRUE(backtrace.get() != nullptr); 1189 1190 // Needed before GetFunctionName will work. 1191 backtrace->Unwind(0); 1192 1193 // Loop through the entire map, and get every function we can find. 1194 map_size += reinterpret_cast<uintptr_t>(map); 1195 std::string last_func; 1196 for (uintptr_t read_addr = reinterpret_cast<uintptr_t>(map); 1197 read_addr < map_size; read_addr += 4) { 1198 uintptr_t offset; 1199 std::string func_name = backtrace->GetFunctionName(read_addr, &offset); 1200 if (!func_name.empty() && last_func != func_name) { 1201 found_functions.push_back(func_name); 1202 } 1203 last_func = func_name; 1204 } 1205 1206 ASSERT_TRUE(munmap(map, map_size - reinterpret_cast<uintptr_t>(map)) == 0); 1207 1208 VerifyFunctionsFound(found_functions); 1209 } 1210 1211 TEST(libbacktrace, check_unreadable_elf_remote) { 1212 const char* tmp_so_name = CopySharedLibrary(); 1213 ASSERT_TRUE(tmp_so_name != nullptr); 1214 1215 g_ready = 0; 1216 1217 struct stat buf; 1218 ASSERT_TRUE(stat(tmp_so_name, &buf) != -1); 1219 uintptr_t map_size = buf.st_size; 1220 1221 pid_t pid; 1222 if ((pid = fork()) == 0) { 1223 int fd = open(tmp_so_name, O_RDONLY); 1224 if (fd == -1) { 1225 fprintf(stderr, "Failed to open file %s: %s\n", tmp_so_name, strerror(errno)); 1226 unlink(tmp_so_name); 1227 exit(0); 1228 } 1229 1230 void* map = mmap(NULL, map_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0); 1231 if (map == MAP_FAILED) { 1232 fprintf(stderr, "Failed to map in memory: %s\n", strerror(errno)); 1233 unlink(tmp_so_name); 1234 exit(0); 1235 } 1236 close(fd); 1237 if (unlink(tmp_so_name) == -1) { 1238 fprintf(stderr, "Failed to unlink: %s\n", strerror(errno)); 1239 exit(0); 1240 } 1241 1242 g_addr = reinterpret_cast<uintptr_t>(map); 1243 g_ready = 1; 1244 while (true) { 1245 usleep(US_PER_MSEC); 1246 } 1247 exit(0); 1248 } 1249 ASSERT_TRUE(pid > 0); 1250 1251 std::vector<std::string> found_functions; 1252 uint64_t start = NanoTime(); 1253 while (true) { 1254 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0); 1255 1256 // Wait for the process to get to a stopping point. 1257 WaitForStop(pid); 1258 1259 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD)); 1260 ASSERT_TRUE(backtrace.get() != nullptr); 1261 1262 uintptr_t read_addr; 1263 ASSERT_EQ(sizeof(uintptr_t), backtrace->Read(reinterpret_cast<uintptr_t>(&g_ready), reinterpret_cast<uint8_t*>(&read_addr), sizeof(uintptr_t))); 1264 if (read_addr) { 1265 ASSERT_EQ(sizeof(uintptr_t), backtrace->Read(reinterpret_cast<uintptr_t>(&g_addr), reinterpret_cast<uint8_t*>(&read_addr), sizeof(uintptr_t))); 1266 1267 // Needed before GetFunctionName will work. 1268 backtrace->Unwind(0); 1269 1270 // Loop through the entire map, and get every function we can find. 1271 map_size += read_addr; 1272 std::string last_func; 1273 for (; read_addr < map_size; read_addr += 4) { 1274 uintptr_t offset; 1275 std::string func_name = backtrace->GetFunctionName(read_addr, &offset); 1276 if (!func_name.empty() && last_func != func_name) { 1277 found_functions.push_back(func_name); 1278 } 1279 last_func = func_name; 1280 } 1281 break; 1282 } 1283 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0); 1284 1285 if ((NanoTime() - start) > 5 * NS_PER_SEC) { 1286 break; 1287 } 1288 usleep(US_PER_MSEC); 1289 } 1290 1291 kill(pid, SIGKILL); 1292 ASSERT_EQ(waitpid(pid, nullptr, 0), pid); 1293 1294 VerifyFunctionsFound(found_functions); 1295 } 1296 1297 bool FindFuncFrameInBacktrace(Backtrace* backtrace, uintptr_t test_func, size_t* frame_num) { 1298 backtrace_map_t map; 1299 backtrace->FillInMap(test_func, &map); 1300 if (!BacktraceMap::IsValid(map)) { 1301 return false; 1302 } 1303 1304 // Loop through the frames, and find the one that is in the map. 1305 *frame_num = 0; 1306 for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) { 1307 if (BacktraceMap::IsValid(it->map) && map.start == it->map.start && 1308 it->pc >= test_func) { 1309 *frame_num = it->num; 1310 return true; 1311 } 1312 } 1313 return false; 1314 } 1315 1316 void VerifyUnreadableElfFrame(Backtrace* backtrace, uintptr_t test_func, size_t frame_num) { 1317 ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES)) 1318 << DumpFrames(backtrace); 1319 1320 ASSERT_TRUE(frame_num != 0) << DumpFrames(backtrace); 1321 // Make sure that there is at least one more frame above the test func call. 1322 ASSERT_LT(frame_num, backtrace->NumFrames()) << DumpFrames(backtrace); 1323 1324 uintptr_t diff = backtrace->GetFrame(frame_num)->pc - test_func; 1325 ASSERT_LT(diff, 200U) << DumpFrames(backtrace); 1326 } 1327 1328 void VerifyUnreadableElfBacktrace(uintptr_t test_func) { 1329 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, 1330 BACKTRACE_CURRENT_THREAD)); 1331 ASSERT_TRUE(backtrace.get() != nullptr); 1332 ASSERT_TRUE(backtrace->Unwind(0)); 1333 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 1334 1335 size_t frame_num; 1336 ASSERT_TRUE(FindFuncFrameInBacktrace(backtrace.get(), test_func, &frame_num)); 1337 1338 VerifyUnreadableElfFrame(backtrace.get(), test_func, frame_num); 1339 } 1340 1341 typedef int (*test_func_t)(int, int, int, int, void (*)(uintptr_t), uintptr_t); 1342 1343 TEST(libbacktrace, unwind_through_unreadable_elf_local) { 1344 const char* tmp_so_name = CopySharedLibrary(); 1345 ASSERT_TRUE(tmp_so_name != nullptr); 1346 void* lib_handle = dlopen(tmp_so_name, RTLD_NOW); 1347 ASSERT_TRUE(lib_handle != nullptr); 1348 ASSERT_TRUE(unlink(tmp_so_name) != -1); 1349 1350 test_func_t test_func; 1351 test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one")); 1352 ASSERT_TRUE(test_func != nullptr); 1353 1354 ASSERT_NE(test_func(1, 2, 3, 4, VerifyUnreadableElfBacktrace, 1355 reinterpret_cast<uintptr_t>(test_func)), 0); 1356 1357 ASSERT_TRUE(dlclose(lib_handle) == 0); 1358 } 1359 1360 TEST(libbacktrace, unwind_through_unreadable_elf_remote) { 1361 const char* tmp_so_name = CopySharedLibrary(); 1362 ASSERT_TRUE(tmp_so_name != nullptr); 1363 void* lib_handle = dlopen(tmp_so_name, RTLD_NOW); 1364 ASSERT_TRUE(lib_handle != nullptr); 1365 ASSERT_TRUE(unlink(tmp_so_name) != -1); 1366 1367 test_func_t test_func; 1368 test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one")); 1369 ASSERT_TRUE(test_func != nullptr); 1370 1371 pid_t pid; 1372 if ((pid = fork()) == 0) { 1373 test_func(1, 2, 3, 4, 0, 0); 1374 exit(0); 1375 } 1376 ASSERT_TRUE(pid > 0); 1377 ASSERT_TRUE(dlclose(lib_handle) == 0); 1378 1379 uint64_t start = NanoTime(); 1380 bool done = false; 1381 while (!done) { 1382 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0); 1383 1384 // Wait for the process to get to a stopping point. 1385 WaitForStop(pid); 1386 1387 std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD)); 1388 ASSERT_TRUE(backtrace.get() != nullptr); 1389 ASSERT_TRUE(backtrace->Unwind(0)); 1390 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 1391 1392 size_t frame_num; 1393 if (FindFuncFrameInBacktrace(backtrace.get(), 1394 reinterpret_cast<uintptr_t>(test_func), &frame_num)) { 1395 1396 VerifyUnreadableElfFrame(backtrace.get(), reinterpret_cast<uintptr_t>(test_func), frame_num); 1397 done = true; 1398 } 1399 1400 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0); 1401 1402 if ((NanoTime() - start) > 5 * NS_PER_SEC) { 1403 break; 1404 } 1405 usleep(US_PER_MSEC); 1406 } 1407 1408 kill(pid, SIGKILL); 1409 ASSERT_EQ(waitpid(pid, nullptr, 0), pid); 1410 1411 ASSERT_TRUE(done) << "Test function never found in unwind."; 1412 } 1413 1414 TEST(libbacktrace, unwind_thread_doesnt_exist) { 1415 std::unique_ptr<Backtrace> backtrace( 1416 Backtrace::Create(BACKTRACE_CURRENT_PROCESS, 99999999)); 1417 ASSERT_TRUE(backtrace.get() != nullptr); 1418 ASSERT_FALSE(backtrace->Unwind(0)); 1419 ASSERT_EQ(BACKTRACE_UNWIND_ERROR_THREAD_DOESNT_EXIST, backtrace->GetError()); 1420 } 1421 1422 #if defined(ENABLE_PSS_TESTS) 1423 #include "GetPss.h" 1424 1425 #define MAX_LEAK_BYTES 32*1024UL 1426 1427 void CheckForLeak(pid_t pid, pid_t tid) { 1428 // Do a few runs to get the PSS stable. 1429 for (size_t i = 0; i < 100; i++) { 1430 Backtrace* backtrace = Backtrace::Create(pid, tid); 1431 ASSERT_TRUE(backtrace != nullptr); 1432 ASSERT_TRUE(backtrace->Unwind(0)); 1433 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 1434 delete backtrace; 1435 } 1436 size_t stable_pss = GetPssBytes(); 1437 ASSERT_TRUE(stable_pss != 0); 1438 1439 // Loop enough that even a small leak should be detectable. 1440 for (size_t i = 0; i < 4096; i++) { 1441 Backtrace* backtrace = Backtrace::Create(pid, tid); 1442 ASSERT_TRUE(backtrace != nullptr); 1443 ASSERT_TRUE(backtrace->Unwind(0)); 1444 ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError()); 1445 delete backtrace; 1446 } 1447 size_t new_pss = GetPssBytes(); 1448 ASSERT_TRUE(new_pss != 0); 1449 size_t abs_diff = (new_pss > stable_pss) ? new_pss - stable_pss : stable_pss - new_pss; 1450 // As long as the new pss is within a certain amount, consider everything okay. 1451 ASSERT_LE(abs_diff, MAX_LEAK_BYTES); 1452 } 1453 1454 TEST(libbacktrace, check_for_leak_local) { 1455 CheckForLeak(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD); 1456 } 1457 1458 TEST(libbacktrace, check_for_leak_local_thread) { 1459 thread_t thread_data = { 0, 0, 0, nullptr }; 1460 pthread_t thread; 1461 ASSERT_TRUE(pthread_create(&thread, nullptr, ThreadLevelRun, &thread_data) == 0); 1462 1463 // Wait up to 2 seconds for the tid to be set. 1464 ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2)); 1465 1466 CheckForLeak(BACKTRACE_CURRENT_PROCESS, thread_data.tid); 1467 1468 // Tell the thread to exit its infinite loop. 1469 android_atomic_acquire_store(0, &thread_data.state); 1470 1471 ASSERT_TRUE(pthread_join(thread, nullptr) == 0); 1472 } 1473 1474 TEST(libbacktrace, check_for_leak_remote) { 1475 pid_t pid; 1476 1477 if ((pid = fork()) == 0) { 1478 while (true) { 1479 } 1480 _exit(0); 1481 } 1482 ASSERT_LT(0, pid); 1483 1484 ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0); 1485 1486 // Wait for the process to get to a stopping point. 1487 WaitForStop(pid); 1488 1489 CheckForLeak(pid, BACKTRACE_CURRENT_THREAD); 1490 1491 ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0); 1492 1493 kill(pid, SIGKILL); 1494 ASSERT_EQ(waitpid(pid, nullptr, 0), pid); 1495 } 1496 #endif 1497