Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "trace.h"
     18 
     19 #include <sys/uio.h>
     20 #include <unistd.h>
     21 
     22 #include "android-base/stringprintf.h"
     23 
     24 #include "art_method-inl.h"
     25 #include "base/casts.h"
     26 #include "base/enums.h"
     27 #include "base/os.h"
     28 #include "base/stl_util.h"
     29 #include "base/systrace.h"
     30 #include "base/time_utils.h"
     31 #include "base/unix_file/fd_file.h"
     32 #include "base/utils.h"
     33 #include "class_linker.h"
     34 #include "common_throws.h"
     35 #include "debugger.h"
     36 #include "dex/descriptors_names.h"
     37 #include "dex/dex_file-inl.h"
     38 #include "entrypoints/quick/quick_entrypoints.h"
     39 #include "gc/scoped_gc_critical_section.h"
     40 #include "instrumentation.h"
     41 #include "jit/jit.h"
     42 #include "jit/jit_code_cache.h"
     43 #include "mirror/class-inl.h"
     44 #include "mirror/dex_cache-inl.h"
     45 #include "mirror/object-inl.h"
     46 #include "mirror/object_array-inl.h"
     47 #include "nativehelper/scoped_local_ref.h"
     48 #include "scoped_thread_state_change-inl.h"
     49 #include "stack.h"
     50 #include "thread.h"
     51 #include "thread_list.h"
     52 
     53 namespace art {
     54 
     55 using android::base::StringPrintf;
     56 
     57 static constexpr size_t TraceActionBits = MinimumBitsToStore(
     58     static_cast<size_t>(kTraceMethodActionMask));
     59 static constexpr uint8_t kOpNewMethod = 1U;
     60 static constexpr uint8_t kOpNewThread = 2U;
     61 static constexpr uint8_t kOpTraceSummary = 3U;
     62 
     63 static const char     kTraceTokenChar             = '*';
     64 static const uint16_t kTraceHeaderLength          = 32;
     65 static const uint32_t kTraceMagicValue            = 0x574f4c53;
     66 static const uint16_t kTraceVersionSingleClock    = 2;
     67 static const uint16_t kTraceVersionDualClock      = 3;
     68 static const uint16_t kTraceRecordSizeSingleClock = 10;  // using v2
     69 static const uint16_t kTraceRecordSizeDualClock   = 14;  // using v3 with two timestamps
     70 
     71 TraceClockSource Trace::default_clock_source_ = kDefaultTraceClockSource;
     72 
     73 Trace* volatile Trace::the_trace_ = nullptr;
     74 pthread_t Trace::sampling_pthread_ = 0U;
     75 std::unique_ptr<std::vector<ArtMethod*>> Trace::temp_stack_trace_;
     76 
     77 // The key identifying the tracer to update instrumentation.
     78 static constexpr const char* kTracerInstrumentationKey = "Tracer";
     79 
     80 static TraceAction DecodeTraceAction(uint32_t tmid) {
     81   return static_cast<TraceAction>(tmid & kTraceMethodActionMask);
     82 }
     83 
     84 ArtMethod* Trace::DecodeTraceMethod(uint32_t tmid) {
     85   MutexLock mu(Thread::Current(), *unique_methods_lock_);
     86   return unique_methods_[tmid >> TraceActionBits];
     87 }
     88 
     89 uint32_t Trace::EncodeTraceMethod(ArtMethod* method) {
     90   MutexLock mu(Thread::Current(), *unique_methods_lock_);
     91   uint32_t idx;
     92   auto it = art_method_id_map_.find(method);
     93   if (it != art_method_id_map_.end()) {
     94     idx = it->second;
     95   } else {
     96     unique_methods_.push_back(method);
     97     idx = unique_methods_.size() - 1;
     98     art_method_id_map_.emplace(method, idx);
     99   }
    100   DCHECK_LT(idx, unique_methods_.size());
    101   DCHECK_EQ(unique_methods_[idx], method);
    102   return idx;
    103 }
    104 
    105 uint32_t Trace::EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action) {
    106   uint32_t tmid = (EncodeTraceMethod(method) << TraceActionBits) | action;
    107   DCHECK_EQ(method, DecodeTraceMethod(tmid));
    108   return tmid;
    109 }
    110 
    111 std::vector<ArtMethod*>* Trace::AllocStackTrace() {
    112   return (temp_stack_trace_.get() != nullptr)  ? temp_stack_trace_.release() :
    113       new std::vector<ArtMethod*>();
    114 }
    115 
    116 void Trace::FreeStackTrace(std::vector<ArtMethod*>* stack_trace) {
    117   stack_trace->clear();
    118   temp_stack_trace_.reset(stack_trace);
    119 }
    120 
    121 void Trace::SetDefaultClockSource(TraceClockSource clock_source) {
    122 #if defined(__linux__)
    123   default_clock_source_ = clock_source;
    124 #else
    125   if (clock_source != TraceClockSource::kWall) {
    126     LOG(WARNING) << "Ignoring tracing request to use CPU time.";
    127   }
    128 #endif
    129 }
    130 
    131 static uint16_t GetTraceVersion(TraceClockSource clock_source) {
    132   return (clock_source == TraceClockSource::kDual) ? kTraceVersionDualClock
    133                                                     : kTraceVersionSingleClock;
    134 }
    135 
    136 static uint16_t GetRecordSize(TraceClockSource clock_source) {
    137   return (clock_source == TraceClockSource::kDual) ? kTraceRecordSizeDualClock
    138                                                     : kTraceRecordSizeSingleClock;
    139 }
    140 
    141 bool Trace::UseThreadCpuClock() {
    142   return (clock_source_ == TraceClockSource::kThreadCpu) ||
    143       (clock_source_ == TraceClockSource::kDual);
    144 }
    145 
    146 bool Trace::UseWallClock() {
    147   return (clock_source_ == TraceClockSource::kWall) ||
    148       (clock_source_ == TraceClockSource::kDual);
    149 }
    150 
    151 void Trace::MeasureClockOverhead() {
    152   if (UseThreadCpuClock()) {
    153     Thread::Current()->GetCpuMicroTime();
    154   }
    155   if (UseWallClock()) {
    156     MicroTime();
    157   }
    158 }
    159 
    160 // Compute an average time taken to measure clocks.
    161 uint32_t Trace::GetClockOverheadNanoSeconds() {
    162   Thread* self = Thread::Current();
    163   uint64_t start = self->GetCpuMicroTime();
    164 
    165   for (int i = 4000; i > 0; i--) {
    166     MeasureClockOverhead();
    167     MeasureClockOverhead();
    168     MeasureClockOverhead();
    169     MeasureClockOverhead();
    170     MeasureClockOverhead();
    171     MeasureClockOverhead();
    172     MeasureClockOverhead();
    173     MeasureClockOverhead();
    174   }
    175 
    176   uint64_t elapsed_us = self->GetCpuMicroTime() - start;
    177   return static_cast<uint32_t>(elapsed_us / 32);
    178 }
    179 
    180 // TODO: put this somewhere with the big-endian equivalent used by JDWP.
    181 static void Append2LE(uint8_t* buf, uint16_t val) {
    182   *buf++ = static_cast<uint8_t>(val);
    183   *buf++ = static_cast<uint8_t>(val >> 8);
    184 }
    185 
    186 // TODO: put this somewhere with the big-endian equivalent used by JDWP.
    187 static void Append4LE(uint8_t* buf, uint32_t val) {
    188   *buf++ = static_cast<uint8_t>(val);
    189   *buf++ = static_cast<uint8_t>(val >> 8);
    190   *buf++ = static_cast<uint8_t>(val >> 16);
    191   *buf++ = static_cast<uint8_t>(val >> 24);
    192 }
    193 
    194 // TODO: put this somewhere with the big-endian equivalent used by JDWP.
    195 static void Append8LE(uint8_t* buf, uint64_t val) {
    196   *buf++ = static_cast<uint8_t>(val);
    197   *buf++ = static_cast<uint8_t>(val >> 8);
    198   *buf++ = static_cast<uint8_t>(val >> 16);
    199   *buf++ = static_cast<uint8_t>(val >> 24);
    200   *buf++ = static_cast<uint8_t>(val >> 32);
    201   *buf++ = static_cast<uint8_t>(val >> 40);
    202   *buf++ = static_cast<uint8_t>(val >> 48);
    203   *buf++ = static_cast<uint8_t>(val >> 56);
    204 }
    205 
    206 static void GetSample(Thread* thread, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
    207   std::vector<ArtMethod*>* const stack_trace = Trace::AllocStackTrace();
    208   StackVisitor::WalkStack(
    209       [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
    210         ArtMethod* m = stack_visitor->GetMethod();
    211         // Ignore runtime frames (in particular callee save).
    212         if (!m->IsRuntimeMethod()) {
    213           stack_trace->push_back(m);
    214         }
    215         return true;
    216       },
    217       thread,
    218       /* context= */ nullptr,
    219       art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
    220   Trace* the_trace = reinterpret_cast<Trace*>(arg);
    221   the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
    222 }
    223 
    224 static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg ATTRIBUTE_UNUSED) {
    225   thread->SetTraceClockBase(0);
    226   std::vector<ArtMethod*>* stack_trace = thread->GetStackTraceSample();
    227   thread->SetStackTraceSample(nullptr);
    228   delete stack_trace;
    229 }
    230 
    231 void Trace::CompareAndUpdateStackTrace(Thread* thread,
    232                                        std::vector<ArtMethod*>* stack_trace) {
    233   CHECK_EQ(pthread_self(), sampling_pthread_);
    234   std::vector<ArtMethod*>* old_stack_trace = thread->GetStackTraceSample();
    235   // Update the thread's stack trace sample.
    236   thread->SetStackTraceSample(stack_trace);
    237   // Read timer clocks to use for all events in this trace.
    238   uint32_t thread_clock_diff = 0;
    239   uint32_t wall_clock_diff = 0;
    240   ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
    241   if (old_stack_trace == nullptr) {
    242     // If there's no previous stack trace sample for this thread, log an entry event for all
    243     // methods in the trace.
    244     for (auto rit = stack_trace->rbegin(); rit != stack_trace->rend(); ++rit) {
    245       LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered,
    246                           thread_clock_diff, wall_clock_diff);
    247     }
    248   } else {
    249     // If there's a previous stack trace for this thread, diff the traces and emit entry and exit
    250     // events accordingly.
    251     auto old_rit = old_stack_trace->rbegin();
    252     auto rit = stack_trace->rbegin();
    253     // Iterate bottom-up over both traces until there's a difference between them.
    254     while (old_rit != old_stack_trace->rend() && rit != stack_trace->rend() && *old_rit == *rit) {
    255       old_rit++;
    256       rit++;
    257     }
    258     // Iterate top-down over the old trace until the point where they differ, emitting exit events.
    259     for (auto old_it = old_stack_trace->begin(); old_it != old_rit.base(); ++old_it) {
    260       LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited,
    261                           thread_clock_diff, wall_clock_diff);
    262     }
    263     // Iterate bottom-up over the new trace from the point where they differ, emitting entry events.
    264     for (; rit != stack_trace->rend(); ++rit) {
    265       LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered,
    266                           thread_clock_diff, wall_clock_diff);
    267     }
    268     FreeStackTrace(old_stack_trace);
    269   }
    270 }
    271 
    272 void* Trace::RunSamplingThread(void* arg) {
    273   Runtime* runtime = Runtime::Current();
    274   intptr_t interval_us = reinterpret_cast<intptr_t>(arg);
    275   CHECK_GE(interval_us, 0);
    276   CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(),
    277                                      !runtime->IsAotCompiler()));
    278 
    279   while (true) {
    280     usleep(interval_us);
    281     ScopedTrace trace("Profile sampling");
    282     Thread* self = Thread::Current();
    283     Trace* the_trace;
    284     {
    285       MutexLock mu(self, *Locks::trace_lock_);
    286       the_trace = the_trace_;
    287       if (the_trace == nullptr) {
    288         break;
    289       }
    290     }
    291     {
    292       // Avoid a deadlock between a thread doing garbage collection
    293       // and the profile sampling thread, by blocking GC when sampling
    294       // thread stacks (see b/73624630).
    295       gc::ScopedGCCriticalSection gcs(self,
    296                                       art::gc::kGcCauseInstrumentation,
    297                                       art::gc::kCollectorTypeInstrumentation);
    298       ScopedSuspendAll ssa(__FUNCTION__);
    299       MutexLock mu(self, *Locks::thread_list_lock_);
    300       runtime->GetThreadList()->ForEach(GetSample, the_trace);
    301     }
    302   }
    303 
    304   runtime->DetachCurrentThread();
    305   return nullptr;
    306 }
    307 
    308 void Trace::Start(const char* trace_filename,
    309                   size_t buffer_size,
    310                   int flags,
    311                   TraceOutputMode output_mode,
    312                   TraceMode trace_mode,
    313                   int interval_us) {
    314   std::unique_ptr<File> file(OS::CreateEmptyFileWriteOnly(trace_filename));
    315   if (file == nullptr) {
    316     std::string msg = android::base::StringPrintf("Unable to open trace file '%s'", trace_filename);
    317     PLOG(ERROR) << msg;
    318     ScopedObjectAccess soa(Thread::Current());
    319     Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", msg.c_str());
    320     return;
    321   }
    322   Start(std::move(file), buffer_size, flags, output_mode, trace_mode, interval_us);
    323 }
    324 
    325 void Trace::Start(int trace_fd,
    326                   size_t buffer_size,
    327                   int flags,
    328                   TraceOutputMode output_mode,
    329                   TraceMode trace_mode,
    330                   int interval_us) {
    331   if (trace_fd < 0) {
    332     std::string msg = android::base::StringPrintf("Unable to start tracing with invalid fd %d",
    333                                                   trace_fd);
    334     LOG(ERROR) << msg;
    335     ScopedObjectAccess soa(Thread::Current());
    336     Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", msg.c_str());
    337     return;
    338   }
    339   std::unique_ptr<File> file(new File(trace_fd, "tracefile"));
    340   Start(std::move(file), buffer_size, flags, output_mode, trace_mode, interval_us);
    341 }
    342 
    343 void Trace::StartDDMS(size_t buffer_size,
    344                       int flags,
    345                       TraceMode trace_mode,
    346                       int interval_us) {
    347   Start(std::unique_ptr<File>(),
    348         buffer_size,
    349         flags,
    350         TraceOutputMode::kDDMS,
    351         trace_mode,
    352         interval_us);
    353 }
    354 
    355 void Trace::Start(std::unique_ptr<File>&& trace_file_in,
    356                   size_t buffer_size,
    357                   int flags,
    358                   TraceOutputMode output_mode,
    359                   TraceMode trace_mode,
    360                   int interval_us) {
    361   // We own trace_file now and are responsible for closing it. To account for error situations, use
    362   // a specialized unique_ptr to ensure we close it on the way out (if it hasn't been passed to a
    363   // Trace instance).
    364   auto deleter = [](File* file) {
    365     if (file != nullptr) {
    366       file->MarkUnchecked();  // Don't deal with flushing requirements.
    367       int result ATTRIBUTE_UNUSED = file->Close();
    368       delete file;
    369     }
    370   };
    371   std::unique_ptr<File, decltype(deleter)> trace_file(trace_file_in.release(), deleter);
    372 
    373   Thread* self = Thread::Current();
    374   {
    375     MutexLock mu(self, *Locks::trace_lock_);
    376     if (the_trace_ != nullptr) {
    377       LOG(ERROR) << "Trace already in progress, ignoring this request";
    378       return;
    379     }
    380   }
    381 
    382   // Check interval if sampling is enabled
    383   if (trace_mode == TraceMode::kSampling && interval_us <= 0) {
    384     LOG(ERROR) << "Invalid sampling interval: " << interval_us;
    385     ScopedObjectAccess soa(self);
    386     ThrowRuntimeException("Invalid sampling interval: %d", interval_us);
    387     return;
    388   }
    389 
    390   Runtime* runtime = Runtime::Current();
    391 
    392   // Enable count of allocs if specified in the flags.
    393   bool enable_stats = false;
    394 
    395   if (runtime->GetJit() != nullptr) {
    396     // TODO b/110263880 It would be better if we didn't need to do this.
    397     // Since we need to hold the method entrypoint across a suspend to ensure instrumentation
    398     // hooks are called correctly we have to disable jit-gc to ensure that the entrypoint doesn't
    399     // go away. Furthermore we need to leave this off permanently since one could get the same
    400     // effect by causing this to be toggled on and off.
    401     runtime->GetJit()->GetCodeCache()->SetGarbageCollectCode(false);
    402   }
    403 
    404   // Create Trace object.
    405   {
    406     // Required since EnableMethodTracing calls ConfigureStubs which visits class linker classes.
    407     gc::ScopedGCCriticalSection gcs(self,
    408                                     gc::kGcCauseInstrumentation,
    409                                     gc::kCollectorTypeInstrumentation);
    410     ScopedSuspendAll ssa(__FUNCTION__);
    411     MutexLock mu(self, *Locks::trace_lock_);
    412     if (the_trace_ != nullptr) {
    413       LOG(ERROR) << "Trace already in progress, ignoring this request";
    414     } else {
    415       enable_stats = (flags & kTraceCountAllocs) != 0;
    416       the_trace_ = new Trace(trace_file.release(), buffer_size, flags, output_mode, trace_mode);
    417       if (trace_mode == TraceMode::kSampling) {
    418         CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
    419                                             reinterpret_cast<void*>(interval_us)),
    420                                             "Sampling profiler thread");
    421         the_trace_->interval_us_ = interval_us;
    422       } else {
    423         runtime->GetInstrumentation()->AddListener(the_trace_,
    424                                                    instrumentation::Instrumentation::kMethodEntered |
    425                                                    instrumentation::Instrumentation::kMethodExited |
    426                                                    instrumentation::Instrumentation::kMethodUnwind);
    427         // TODO: In full-PIC mode, we don't need to fully deopt.
    428         // TODO: We can only use trampoline entrypoints if we are java-debuggable since in that case
    429         // we know that inlining and other problematic optimizations are disabled. We might just
    430         // want to use the trampolines anyway since it is faster. It makes the story with disabling
    431         // jit-gc more complex though.
    432         runtime->GetInstrumentation()->EnableMethodTracing(
    433             kTracerInstrumentationKey, /*needs_interpreter=*/!runtime->IsJavaDebuggable());
    434       }
    435     }
    436   }
    437 
    438   // Can't call this when holding the mutator lock.
    439   if (enable_stats) {
    440     runtime->SetStatsEnabled(true);
    441   }
    442 }
    443 
    444 void Trace::StopTracing(bool finish_tracing, bool flush_file) {
    445   bool stop_alloc_counting = false;
    446   Runtime* const runtime = Runtime::Current();
    447   Trace* the_trace = nullptr;
    448   Thread* const self = Thread::Current();
    449   pthread_t sampling_pthread = 0U;
    450   {
    451     MutexLock mu(self, *Locks::trace_lock_);
    452     if (the_trace_ == nullptr) {
    453       LOG(ERROR) << "Trace stop requested, but no trace currently running";
    454     } else {
    455       the_trace = the_trace_;
    456       the_trace_ = nullptr;
    457       sampling_pthread = sampling_pthread_;
    458     }
    459   }
    460   // Make sure that we join before we delete the trace since we don't want to have
    461   // the sampling thread access a stale pointer. This finishes since the sampling thread exits when
    462   // the_trace_ is null.
    463   if (sampling_pthread != 0U) {
    464     CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, nullptr), "sampling thread shutdown");
    465     sampling_pthread_ = 0U;
    466   }
    467 
    468   if (the_trace != nullptr) {
    469     stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
    470     // Stop the trace sources adding more entries to the trace buffer and synchronise stores.
    471     {
    472       gc::ScopedGCCriticalSection gcs(self,
    473                                       gc::kGcCauseInstrumentation,
    474                                       gc::kCollectorTypeInstrumentation);
    475       ScopedSuspendAll ssa(__FUNCTION__);
    476 
    477       if (the_trace->trace_mode_ == TraceMode::kSampling) {
    478         MutexLock mu(self, *Locks::thread_list_lock_);
    479         runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
    480       } else {
    481         runtime->GetInstrumentation()->RemoveListener(
    482             the_trace, instrumentation::Instrumentation::kMethodEntered |
    483             instrumentation::Instrumentation::kMethodExited |
    484             instrumentation::Instrumentation::kMethodUnwind);
    485         runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
    486       }
    487     }
    488     // At this point, code may read buf_ as it's writers are shutdown
    489     // and the ScopedSuspendAll above has ensured all stores to buf_
    490     // are now visible.
    491     if (finish_tracing) {
    492       the_trace->FinishTracing();
    493     }
    494     if (the_trace->trace_file_.get() != nullptr) {
    495       // Do not try to erase, so flush and close explicitly.
    496       if (flush_file) {
    497         if (the_trace->trace_file_->Flush() != 0) {
    498           PLOG(WARNING) << "Could not flush trace file.";
    499         }
    500       } else {
    501         the_trace->trace_file_->MarkUnchecked();  // Do not trigger guard.
    502       }
    503       if (the_trace->trace_file_->Close() != 0) {
    504         PLOG(ERROR) << "Could not close trace file.";
    505       }
    506     }
    507     delete the_trace;
    508   }
    509   if (stop_alloc_counting) {
    510     // Can be racy since SetStatsEnabled is not guarded by any locks.
    511     runtime->SetStatsEnabled(false);
    512   }
    513 }
    514 
    515 void Trace::Abort() {
    516   // Do not write anything anymore.
    517   StopTracing(false, false);
    518 }
    519 
    520 void Trace::Stop() {
    521   // Finish writing.
    522   StopTracing(true, true);
    523 }
    524 
    525 void Trace::Shutdown() {
    526   if (GetMethodTracingMode() != kTracingInactive) {
    527     Stop();
    528   }
    529 }
    530 
    531 TracingMode Trace::GetMethodTracingMode() {
    532   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
    533   if (the_trace_ == nullptr) {
    534     return kTracingInactive;
    535   } else {
    536     switch (the_trace_->trace_mode_) {
    537       case TraceMode::kSampling:
    538         return kSampleProfilingActive;
    539       case TraceMode::kMethodTracing:
    540         return kMethodTracingActive;
    541     }
    542     LOG(FATAL) << "Unreachable";
    543     UNREACHABLE();
    544   }
    545 }
    546 
    547 static constexpr size_t kMinBufSize = 18U;  // Trace header is up to 18B.
    548 
    549 Trace::Trace(File* trace_file,
    550              size_t buffer_size,
    551              int flags,
    552              TraceOutputMode output_mode,
    553              TraceMode trace_mode)
    554     : trace_file_(trace_file),
    555       buf_(new uint8_t[std::max(kMinBufSize, buffer_size)]()),
    556       flags_(flags), trace_output_mode_(output_mode), trace_mode_(trace_mode),
    557       clock_source_(default_clock_source_),
    558       buffer_size_(std::max(kMinBufSize, buffer_size)),
    559       start_time_(MicroTime()), clock_overhead_ns_(GetClockOverheadNanoSeconds()),
    560       overflow_(false), interval_us_(0), streaming_lock_(nullptr),
    561       unique_methods_lock_(new Mutex("unique methods lock", kTracingUniqueMethodsLock)) {
    562   CHECK(trace_file != nullptr || output_mode == TraceOutputMode::kDDMS);
    563 
    564   uint16_t trace_version = GetTraceVersion(clock_source_);
    565   if (output_mode == TraceOutputMode::kStreaming) {
    566     trace_version |= 0xF0U;
    567   }
    568   // Set up the beginning of the trace.
    569   memset(buf_.get(), 0, kTraceHeaderLength);
    570   Append4LE(buf_.get(), kTraceMagicValue);
    571   Append2LE(buf_.get() + 4, trace_version);
    572   Append2LE(buf_.get() + 6, kTraceHeaderLength);
    573   Append8LE(buf_.get() + 8, start_time_);
    574   if (trace_version >= kTraceVersionDualClock) {
    575     uint16_t record_size = GetRecordSize(clock_source_);
    576     Append2LE(buf_.get() + 16, record_size);
    577   }
    578   static_assert(18 <= kMinBufSize, "Minimum buffer size not large enough for trace header");
    579 
    580   cur_offset_.store(kTraceHeaderLength, std::memory_order_relaxed);
    581 
    582   if (output_mode == TraceOutputMode::kStreaming) {
    583     streaming_lock_ = new Mutex("tracing lock", LockLevel::kTracingStreamingLock);
    584     seen_threads_.reset(new ThreadIDBitSet());
    585   }
    586 }
    587 
    588 Trace::~Trace() {
    589   delete streaming_lock_;
    590   delete unique_methods_lock_;
    591 }
    592 
    593 static uint64_t ReadBytes(uint8_t* buf, size_t bytes) {
    594   uint64_t ret = 0;
    595   for (size_t i = 0; i < bytes; ++i) {
    596     ret |= static_cast<uint64_t>(buf[i]) << (i * 8);
    597   }
    598   return ret;
    599 }
    600 
    601 void Trace::DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source) {
    602   uint8_t* ptr = buf + kTraceHeaderLength;
    603   uint8_t* end = buf + buf_size;
    604 
    605   while (ptr < end) {
    606     uint32_t tmid = ReadBytes(ptr + 2, sizeof(tmid));
    607     ArtMethod* method = DecodeTraceMethod(tmid);
    608     TraceAction action = DecodeTraceAction(tmid);
    609     LOG(INFO) << ArtMethod::PrettyMethod(method) << " " << static_cast<int>(action);
    610     ptr += GetRecordSize(clock_source);
    611   }
    612 }
    613 
    614 void Trace::FinishTracing() {
    615   size_t final_offset = 0;
    616   std::set<ArtMethod*> visited_methods;
    617   if (trace_output_mode_ == TraceOutputMode::kStreaming) {
    618     // Clean up.
    619     MutexLock mu(Thread::Current(), *streaming_lock_);
    620     STLDeleteValues(&seen_methods_);
    621   } else {
    622     final_offset = cur_offset_.load(std::memory_order_relaxed);
    623     GetVisitedMethods(final_offset, &visited_methods);
    624   }
    625 
    626   // Compute elapsed time.
    627   uint64_t elapsed = MicroTime() - start_time_;
    628 
    629   std::ostringstream os;
    630 
    631   os << StringPrintf("%cversion\n", kTraceTokenChar);
    632   os << StringPrintf("%d\n", GetTraceVersion(clock_source_));
    633   os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false");
    634   if (UseThreadCpuClock()) {
    635     if (UseWallClock()) {
    636       os << StringPrintf("clock=dual\n");
    637     } else {
    638       os << StringPrintf("clock=thread-cpu\n");
    639     }
    640   } else {
    641     os << StringPrintf("clock=wall\n");
    642   }
    643   os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed);
    644   if (trace_output_mode_ != TraceOutputMode::kStreaming) {
    645     size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_);
    646     os << StringPrintf("num-method-calls=%zd\n", num_records);
    647   }
    648   os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_);
    649   os << StringPrintf("vm=art\n");
    650   os << StringPrintf("pid=%d\n", getpid());
    651   if ((flags_ & kTraceCountAllocs) != 0) {
    652     os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS));
    653     os << StringPrintf("alloc-size=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES));
    654     os << StringPrintf("gc-count=%d\n", Runtime::Current()->GetStat(KIND_GC_INVOCATIONS));
    655   }
    656   os << StringPrintf("%cthreads\n", kTraceTokenChar);
    657   DumpThreadList(os);
    658   os << StringPrintf("%cmethods\n", kTraceTokenChar);
    659   DumpMethodList(os, visited_methods);
    660   os << StringPrintf("%cend\n", kTraceTokenChar);
    661   std::string header(os.str());
    662 
    663   if (trace_output_mode_ == TraceOutputMode::kStreaming) {
    664     // Protect access to buf_ and satisfy sanitizer for calls to WriteBuf / FlushBuf.
    665     MutexLock mu(Thread::Current(), *streaming_lock_);
    666     // Write a special token to mark the end of trace records and the start of
    667     // trace summary.
    668     uint8_t buf[7];
    669     Append2LE(buf, 0);
    670     buf[2] = kOpTraceSummary;
    671     Append4LE(buf + 3, static_cast<uint32_t>(header.length()));
    672     WriteToBuf(buf, sizeof(buf));
    673     // Write the trace summary. The summary is identical to the file header when
    674     // the output mode is not streaming (except for methods).
    675     WriteToBuf(reinterpret_cast<const uint8_t*>(header.c_str()), header.length());
    676     // Flush the buffer, which may include some trace records before the summary.
    677     FlushBuf();
    678   } else {
    679     if (trace_file_.get() == nullptr) {
    680       std::vector<uint8_t> data;
    681       data.resize(header.length() + final_offset);
    682       memcpy(data.data(), header.c_str(), header.length());
    683       memcpy(data.data() + header.length(), buf_.get(), final_offset);
    684       Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(CHUNK_TYPE("MPSE"),
    685                                                                  ArrayRef<const uint8_t>(data));
    686       const bool kDumpTraceInfo = false;
    687       if (kDumpTraceInfo) {
    688         LOG(INFO) << "Trace sent:\n" << header;
    689         DumpBuf(buf_.get(), final_offset, clock_source_);
    690       }
    691     } else {
    692       if (!trace_file_->WriteFully(header.c_str(), header.length()) ||
    693           !trace_file_->WriteFully(buf_.get(), final_offset)) {
    694         std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno)));
    695         PLOG(ERROR) << detail;
    696         ThrowRuntimeException("%s", detail.c_str());
    697       }
    698     }
    699   }
    700 }
    701 
    702 void Trace::DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
    703                        Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
    704                        ArtMethod* method,
    705                        uint32_t new_dex_pc) {
    706   // We're not recorded to listen to this kind of event, so complain.
    707   LOG(ERROR) << "Unexpected dex PC event in tracing " << ArtMethod::PrettyMethod(method)
    708              << " " << new_dex_pc;
    709 }
    710 
    711 void Trace::FieldRead(Thread* thread ATTRIBUTE_UNUSED,
    712                       Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
    713                       ArtMethod* method,
    714                       uint32_t dex_pc,
    715                       ArtField* field ATTRIBUTE_UNUSED)
    716     REQUIRES_SHARED(Locks::mutator_lock_) {
    717   // We're not recorded to listen to this kind of event, so complain.
    718   LOG(ERROR) << "Unexpected field read event in tracing " << ArtMethod::PrettyMethod(method)
    719              << " " << dex_pc;
    720 }
    721 
    722 void Trace::FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
    723                          Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
    724                          ArtMethod* method,
    725                          uint32_t dex_pc,
    726                          ArtField* field ATTRIBUTE_UNUSED,
    727                          const JValue& field_value ATTRIBUTE_UNUSED)
    728     REQUIRES_SHARED(Locks::mutator_lock_) {
    729   // We're not recorded to listen to this kind of event, so complain.
    730   LOG(ERROR) << "Unexpected field write event in tracing " << ArtMethod::PrettyMethod(method)
    731              << " " << dex_pc;
    732 }
    733 
    734 void Trace::MethodEntered(Thread* thread,
    735                           Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
    736                           ArtMethod* method,
    737                           uint32_t dex_pc ATTRIBUTE_UNUSED) {
    738   uint32_t thread_clock_diff = 0;
    739   uint32_t wall_clock_diff = 0;
    740   ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
    741   LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodEntered,
    742                       thread_clock_diff, wall_clock_diff);
    743 }
    744 
    745 void Trace::MethodExited(Thread* thread,
    746                          Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
    747                          ArtMethod* method,
    748                          uint32_t dex_pc ATTRIBUTE_UNUSED,
    749                          const JValue& return_value ATTRIBUTE_UNUSED) {
    750   uint32_t thread_clock_diff = 0;
    751   uint32_t wall_clock_diff = 0;
    752   ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
    753   LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodExited,
    754                       thread_clock_diff, wall_clock_diff);
    755 }
    756 
    757 void Trace::MethodUnwind(Thread* thread,
    758                          Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
    759                          ArtMethod* method,
    760                          uint32_t dex_pc ATTRIBUTE_UNUSED) {
    761   uint32_t thread_clock_diff = 0;
    762   uint32_t wall_clock_diff = 0;
    763   ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
    764   LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodUnwind,
    765                       thread_clock_diff, wall_clock_diff);
    766 }
    767 
    768 void Trace::ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
    769                             Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
    770     REQUIRES_SHARED(Locks::mutator_lock_) {
    771   LOG(ERROR) << "Unexpected exception thrown event in tracing";
    772 }
    773 
    774 void Trace::ExceptionHandled(Thread* thread ATTRIBUTE_UNUSED,
    775                              Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
    776     REQUIRES_SHARED(Locks::mutator_lock_) {
    777   LOG(ERROR) << "Unexpected exception thrown event in tracing";
    778 }
    779 
    780 void Trace::Branch(Thread* /*thread*/, ArtMethod* method,
    781                    uint32_t /*dex_pc*/, int32_t /*dex_pc_offset*/)
    782       REQUIRES_SHARED(Locks::mutator_lock_) {
    783   LOG(ERROR) << "Unexpected branch event in tracing" << ArtMethod::PrettyMethod(method);
    784 }
    785 
    786 void Trace::WatchedFramePop(Thread* self ATTRIBUTE_UNUSED,
    787                             const ShadowFrame& frame ATTRIBUTE_UNUSED) {
    788   LOG(ERROR) << "Unexpected WatchedFramePop event in tracing";
    789 }
    790 
    791 void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) {
    792   if (UseThreadCpuClock()) {
    793     uint64_t clock_base = thread->GetTraceClockBase();
    794     if (UNLIKELY(clock_base == 0)) {
    795       // First event, record the base time in the map.
    796       uint64_t time = thread->GetCpuMicroTime();
    797       thread->SetTraceClockBase(time);
    798     } else {
    799       *thread_clock_diff = thread->GetCpuMicroTime() - clock_base;
    800     }
    801   }
    802   if (UseWallClock()) {
    803     *wall_clock_diff = MicroTime() - start_time_;
    804   }
    805 }
    806 
    807 bool Trace::RegisterMethod(ArtMethod* method) {
    808   const DexFile* dex_file = method->GetDexFile();
    809   if (seen_methods_.find(dex_file) == seen_methods_.end()) {
    810     seen_methods_.insert(std::make_pair(dex_file, new DexIndexBitSet()));
    811   }
    812   DexIndexBitSet* bit_set = seen_methods_.find(dex_file)->second;
    813   if (!(*bit_set)[method->GetDexMethodIndex()]) {
    814     bit_set->set(method->GetDexMethodIndex());
    815     return true;
    816   }
    817   return false;
    818 }
    819 
    820 bool Trace::RegisterThread(Thread* thread) {
    821   pid_t tid = thread->GetTid();
    822   CHECK_LT(0U, static_cast<uint32_t>(tid));
    823   CHECK_LT(static_cast<uint32_t>(tid), kMaxThreadIdNumber);
    824 
    825   if (!(*seen_threads_)[tid]) {
    826     seen_threads_->set(tid);
    827     return true;
    828   }
    829   return false;
    830 }
    831 
    832 std::string Trace::GetMethodLine(ArtMethod* method) {
    833   method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
    834   return StringPrintf("%#x\t%s\t%s\t%s\t%s\n", (EncodeTraceMethod(method) << TraceActionBits),
    835       PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(),
    836       method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile());
    837 }
    838 
    839 void Trace::WriteToBuf(const uint8_t* src, size_t src_size) {
    840   // Updates to cur_offset_ are done under the streaming_lock_ here as in streaming mode.
    841   int32_t old_offset = cur_offset_.load(std::memory_order_relaxed);
    842   int32_t new_offset = old_offset + static_cast<int32_t>(src_size);
    843   if (dchecked_integral_cast<size_t>(new_offset) > buffer_size_) {
    844     // Flush buffer.
    845     if (!trace_file_->WriteFully(buf_.get(), old_offset)) {
    846       PLOG(WARNING) << "Failed streaming a tracing event.";
    847     }
    848 
    849     // Check whether the data is too large for the buffer, then write immediately.
    850     if (src_size >= buffer_size_) {
    851       if (!trace_file_->WriteFully(src, src_size)) {
    852         PLOG(WARNING) << "Failed streaming a tracing event.";
    853       }
    854       cur_offset_.store(0, std::memory_order_relaxed);  // Buffer is empty now.
    855       return;
    856     }
    857 
    858     old_offset = 0;
    859     new_offset = static_cast<int32_t>(src_size);
    860   }
    861   cur_offset_.store(new_offset, std::memory_order_relaxed);
    862   // Fill in data.
    863   memcpy(buf_.get() + old_offset, src, src_size);
    864 }
    865 
    866 void Trace::FlushBuf() {
    867   // Updates to cur_offset_ are done under the streaming_lock_ here as in streaming mode.
    868   int32_t offset = cur_offset_.load(std::memory_order_relaxed);
    869   if (!trace_file_->WriteFully(buf_.get(), offset)) {
    870     PLOG(WARNING) << "Failed flush the remaining data in streaming.";
    871   }
    872   cur_offset_.store(0, std::memory_order_relaxed);
    873 }
    874 
    875 void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method,
    876                                 instrumentation::Instrumentation::InstrumentationEvent event,
    877                                 uint32_t thread_clock_diff, uint32_t wall_clock_diff) {
    878   // This method is called in both tracing modes (method and
    879   // sampling). In sampling mode, this method is only called by the
    880   // sampling thread. In method tracing mode, it can be called
    881   // concurrently.
    882 
    883   // Ensure we always use the non-obsolete version of the method so that entry/exit events have the
    884   // same pointer value.
    885   method = method->GetNonObsoleteMethod();
    886 
    887   // Advance cur_offset_ atomically.
    888   int32_t new_offset;
    889   int32_t old_offset = 0;
    890 
    891   // In the non-streaming case, we do a busy loop here trying to get
    892   // an offset to write our record and advance cur_offset_ for the
    893   // next use.
    894   if (trace_output_mode_ != TraceOutputMode::kStreaming) {
    895     // Although multiple threads can call this method concurrently,
    896     // the compare_exchange_weak here is still atomic (by definition).
    897     // A succeeding update is visible to other cores when they pass
    898     // through this point.
    899     old_offset = cur_offset_.load(std::memory_order_relaxed);  // Speculative read
    900     do {
    901       new_offset = old_offset + GetRecordSize(clock_source_);
    902       if (static_cast<size_t>(new_offset) > buffer_size_) {
    903         overflow_ = true;
    904         return;
    905       }
    906     } while (!cur_offset_.compare_exchange_weak(old_offset, new_offset, std::memory_order_relaxed));
    907   }
    908 
    909   TraceAction action = kTraceMethodEnter;
    910   switch (event) {
    911     case instrumentation::Instrumentation::kMethodEntered:
    912       action = kTraceMethodEnter;
    913       break;
    914     case instrumentation::Instrumentation::kMethodExited:
    915       action = kTraceMethodExit;
    916       break;
    917     case instrumentation::Instrumentation::kMethodUnwind:
    918       action = kTraceUnroll;
    919       break;
    920     default:
    921       UNIMPLEMENTED(FATAL) << "Unexpected event: " << event;
    922   }
    923 
    924   uint32_t method_value = EncodeTraceMethodAndAction(method, action);
    925 
    926   // Write data into the tracing buffer (if not streaming) or into a
    927   // small buffer on the stack (if streaming) which we'll put into the
    928   // tracing buffer below.
    929   //
    930   // These writes to the tracing buffer are synchronised with the
    931   // future reads that (only) occur under FinishTracing(). The callers
    932   // of FinishTracing() acquire locks and (implicitly) synchronise
    933   // the buffer memory.
    934   uint8_t* ptr;
    935   static constexpr size_t kPacketSize = 14U;  // The maximum size of data in a packet.
    936   uint8_t stack_buf[kPacketSize];             // Space to store a packet when in streaming mode.
    937   if (trace_output_mode_ == TraceOutputMode::kStreaming) {
    938     ptr = stack_buf;
    939   } else {
    940     ptr = buf_.get() + old_offset;
    941   }
    942 
    943   Append2LE(ptr, thread->GetTid());
    944   Append4LE(ptr + 2, method_value);
    945   ptr += 6;
    946 
    947   if (UseThreadCpuClock()) {
    948     Append4LE(ptr, thread_clock_diff);
    949     ptr += 4;
    950   }
    951   if (UseWallClock()) {
    952     Append4LE(ptr, wall_clock_diff);
    953   }
    954   static_assert(kPacketSize == 2 + 4 + 4 + 4, "Packet size incorrect.");
    955 
    956   if (trace_output_mode_ == TraceOutputMode::kStreaming) {
    957     MutexLock mu(Thread::Current(), *streaming_lock_);  // To serialize writing.
    958     if (RegisterMethod(method)) {
    959       // Write a special block with the name.
    960       std::string method_line(GetMethodLine(method));
    961       uint8_t buf2[5];
    962       Append2LE(buf2, 0);
    963       buf2[2] = kOpNewMethod;
    964       Append2LE(buf2 + 3, static_cast<uint16_t>(method_line.length()));
    965       WriteToBuf(buf2, sizeof(buf2));
    966       WriteToBuf(reinterpret_cast<const uint8_t*>(method_line.c_str()), method_line.length());
    967     }
    968     if (RegisterThread(thread)) {
    969       // It might be better to postpone this. Threads might not have received names...
    970       std::string thread_name;
    971       thread->GetThreadName(thread_name);
    972       uint8_t buf2[7];
    973       Append2LE(buf2, 0);
    974       buf2[2] = kOpNewThread;
    975       Append2LE(buf2 + 3, static_cast<uint16_t>(thread->GetTid()));
    976       Append2LE(buf2 + 5, static_cast<uint16_t>(thread_name.length()));
    977       WriteToBuf(buf2, sizeof(buf2));
    978       WriteToBuf(reinterpret_cast<const uint8_t*>(thread_name.c_str()), thread_name.length());
    979     }
    980     WriteToBuf(stack_buf, sizeof(stack_buf));
    981   }
    982 }
    983 
    984 void Trace::GetVisitedMethods(size_t buf_size,
    985                               std::set<ArtMethod*>* visited_methods) {
    986   uint8_t* ptr = buf_.get() + kTraceHeaderLength;
    987   uint8_t* end = buf_.get() + buf_size;
    988 
    989   while (ptr < end) {
    990     uint32_t tmid = ReadBytes(ptr + 2, sizeof(tmid));
    991     ArtMethod* method = DecodeTraceMethod(tmid);
    992     visited_methods->insert(method);
    993     ptr += GetRecordSize(clock_source_);
    994   }
    995 }
    996 
    997 void Trace::DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods) {
    998   for (const auto& method : visited_methods) {
    999     os << GetMethodLine(method);
   1000   }
   1001 }
   1002 
   1003 static void DumpThread(Thread* t, void* arg) {
   1004   std::ostream& os = *reinterpret_cast<std::ostream*>(arg);
   1005   std::string name;
   1006   t->GetThreadName(name);
   1007   os << t->GetTid() << "\t" << name << "\n";
   1008 }
   1009 
   1010 void Trace::DumpThreadList(std::ostream& os) {
   1011   Thread* self = Thread::Current();
   1012   for (const auto& it : exited_threads_) {
   1013     os << it.first << "\t" << it.second << "\n";
   1014   }
   1015   Locks::thread_list_lock_->AssertNotHeld(self);
   1016   MutexLock mu(self, *Locks::thread_list_lock_);
   1017   Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os);
   1018 }
   1019 
   1020 void Trace::StoreExitingThreadInfo(Thread* thread) {
   1021   MutexLock mu(thread, *Locks::trace_lock_);
   1022   if (the_trace_ != nullptr) {
   1023     std::string name;
   1024     thread->GetThreadName(name);
   1025     // The same thread/tid may be used multiple times. As SafeMap::Put does not allow to override
   1026     // a previous mapping, use SafeMap::Overwrite.
   1027     the_trace_->exited_threads_.Overwrite(thread->GetTid(), name);
   1028   }
   1029 }
   1030 
   1031 Trace::TraceOutputMode Trace::GetOutputMode() {
   1032   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
   1033   CHECK(the_trace_ != nullptr) << "Trace output mode requested, but no trace currently running";
   1034   return the_trace_->trace_output_mode_;
   1035 }
   1036 
   1037 Trace::TraceMode Trace::GetMode() {
   1038   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
   1039   CHECK(the_trace_ != nullptr) << "Trace mode requested, but no trace currently running";
   1040   return the_trace_->trace_mode_;
   1041 }
   1042 
   1043 size_t Trace::GetBufferSize() {
   1044   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
   1045   CHECK(the_trace_ != nullptr) << "Trace mode requested, but no trace currently running";
   1046   return the_trace_->buffer_size_;
   1047 }
   1048 
   1049 bool Trace::IsTracingEnabled() {
   1050   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
   1051   return the_trace_ != nullptr;
   1052 }
   1053 
   1054 }  // namespace art
   1055