Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "trace.h"
     18 
     19 #include <sys/uio.h>
     20 #include <unistd.h>
     21 
     22 #include "android-base/stringprintf.h"
     23 
     24 #include "art_method-inl.h"
     25 #include "base/casts.h"
     26 #include "base/enums.h"
     27 #include "base/stl_util.h"
     28 #include "base/systrace.h"
     29 #include "base/time_utils.h"
     30 #include "base/unix_file/fd_file.h"
     31 #include "class_linker.h"
     32 #include "common_throws.h"
     33 #include "debugger.h"
     34 #include "dex_file-inl.h"
     35 #include "entrypoints/quick/quick_entrypoints.h"
     36 #include "gc/scoped_gc_critical_section.h"
     37 #include "instrumentation.h"
     38 #include "mirror/class-inl.h"
     39 #include "mirror/dex_cache-inl.h"
     40 #include "mirror/object-inl.h"
     41 #include "mirror/object_array-inl.h"
     42 #include "nativehelper/ScopedLocalRef.h"
     43 #include "os.h"
     44 #include "scoped_thread_state_change-inl.h"
     45 #include "stack.h"
     46 #include "thread.h"
     47 #include "thread_list.h"
     48 #include "utils.h"
     49 
     50 namespace art {
     51 
     52 using android::base::StringPrintf;
     53 
     54 static constexpr size_t TraceActionBits = MinimumBitsToStore(
     55     static_cast<size_t>(kTraceMethodActionMask));
     56 static constexpr uint8_t kOpNewMethod = 1U;
     57 static constexpr uint8_t kOpNewThread = 2U;
     58 static constexpr uint8_t kOpTraceSummary = 3U;
     59 
     60 class BuildStackTraceVisitor : public StackVisitor {
     61  public:
     62   explicit BuildStackTraceVisitor(Thread* thread)
     63       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
     64         method_trace_(Trace::AllocStackTrace()) {}
     65 
     66   bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
     67     ArtMethod* m = GetMethod();
     68     // Ignore runtime frames (in particular callee save).
     69     if (!m->IsRuntimeMethod()) {
     70       method_trace_->push_back(m);
     71     }
     72     return true;
     73   }
     74 
     75   // Returns a stack trace where the topmost frame corresponds with the first element of the vector.
     76   std::vector<ArtMethod*>* GetStackTrace() const {
     77     return method_trace_;
     78   }
     79 
     80  private:
     81   std::vector<ArtMethod*>* const method_trace_;
     82 
     83   DISALLOW_COPY_AND_ASSIGN(BuildStackTraceVisitor);
     84 };
     85 
     86 static const char     kTraceTokenChar             = '*';
     87 static const uint16_t kTraceHeaderLength          = 32;
     88 static const uint32_t kTraceMagicValue            = 0x574f4c53;
     89 static const uint16_t kTraceVersionSingleClock    = 2;
     90 static const uint16_t kTraceVersionDualClock      = 3;
     91 static const uint16_t kTraceRecordSizeSingleClock = 10;  // using v2
     92 static const uint16_t kTraceRecordSizeDualClock   = 14;  // using v3 with two timestamps
     93 
     94 TraceClockSource Trace::default_clock_source_ = kDefaultTraceClockSource;
     95 
     96 Trace* volatile Trace::the_trace_ = nullptr;
     97 pthread_t Trace::sampling_pthread_ = 0U;
     98 std::unique_ptr<std::vector<ArtMethod*>> Trace::temp_stack_trace_;
     99 
    100 // The key identifying the tracer to update instrumentation.
    101 static constexpr const char* kTracerInstrumentationKey = "Tracer";
    102 
    103 static TraceAction DecodeTraceAction(uint32_t tmid) {
    104   return static_cast<TraceAction>(tmid & kTraceMethodActionMask);
    105 }
    106 
    107 ArtMethod* Trace::DecodeTraceMethod(uint32_t tmid) {
    108   MutexLock mu(Thread::Current(), *unique_methods_lock_);
    109   return unique_methods_[tmid >> TraceActionBits];
    110 }
    111 
    112 uint32_t Trace::EncodeTraceMethod(ArtMethod* method) {
    113   MutexLock mu(Thread::Current(), *unique_methods_lock_);
    114   uint32_t idx;
    115   auto it = art_method_id_map_.find(method);
    116   if (it != art_method_id_map_.end()) {
    117     idx = it->second;
    118   } else {
    119     unique_methods_.push_back(method);
    120     idx = unique_methods_.size() - 1;
    121     art_method_id_map_.emplace(method, idx);
    122   }
    123   DCHECK_LT(idx, unique_methods_.size());
    124   DCHECK_EQ(unique_methods_[idx], method);
    125   return idx;
    126 }
    127 
    128 uint32_t Trace::EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action) {
    129   uint32_t tmid = (EncodeTraceMethod(method) << TraceActionBits) | action;
    130   DCHECK_EQ(method, DecodeTraceMethod(tmid));
    131   return tmid;
    132 }
    133 
    134 std::vector<ArtMethod*>* Trace::AllocStackTrace() {
    135   return (temp_stack_trace_.get() != nullptr)  ? temp_stack_trace_.release() :
    136       new std::vector<ArtMethod*>();
    137 }
    138 
    139 void Trace::FreeStackTrace(std::vector<ArtMethod*>* stack_trace) {
    140   stack_trace->clear();
    141   temp_stack_trace_.reset(stack_trace);
    142 }
    143 
    144 void Trace::SetDefaultClockSource(TraceClockSource clock_source) {
    145 #if defined(__linux__)
    146   default_clock_source_ = clock_source;
    147 #else
    148   if (clock_source != TraceClockSource::kWall) {
    149     LOG(WARNING) << "Ignoring tracing request to use CPU time.";
    150   }
    151 #endif
    152 }
    153 
    154 static uint16_t GetTraceVersion(TraceClockSource clock_source) {
    155   return (clock_source == TraceClockSource::kDual) ? kTraceVersionDualClock
    156                                                     : kTraceVersionSingleClock;
    157 }
    158 
    159 static uint16_t GetRecordSize(TraceClockSource clock_source) {
    160   return (clock_source == TraceClockSource::kDual) ? kTraceRecordSizeDualClock
    161                                                     : kTraceRecordSizeSingleClock;
    162 }
    163 
    164 bool Trace::UseThreadCpuClock() {
    165   return (clock_source_ == TraceClockSource::kThreadCpu) ||
    166       (clock_source_ == TraceClockSource::kDual);
    167 }
    168 
    169 bool Trace::UseWallClock() {
    170   return (clock_source_ == TraceClockSource::kWall) ||
    171       (clock_source_ == TraceClockSource::kDual);
    172 }
    173 
    174 void Trace::MeasureClockOverhead() {
    175   if (UseThreadCpuClock()) {
    176     Thread::Current()->GetCpuMicroTime();
    177   }
    178   if (UseWallClock()) {
    179     MicroTime();
    180   }
    181 }
    182 
    183 // Compute an average time taken to measure clocks.
    184 uint32_t Trace::GetClockOverheadNanoSeconds() {
    185   Thread* self = Thread::Current();
    186   uint64_t start = self->GetCpuMicroTime();
    187 
    188   for (int i = 4000; i > 0; i--) {
    189     MeasureClockOverhead();
    190     MeasureClockOverhead();
    191     MeasureClockOverhead();
    192     MeasureClockOverhead();
    193     MeasureClockOverhead();
    194     MeasureClockOverhead();
    195     MeasureClockOverhead();
    196     MeasureClockOverhead();
    197   }
    198 
    199   uint64_t elapsed_us = self->GetCpuMicroTime() - start;
    200   return static_cast<uint32_t>(elapsed_us / 32);
    201 }
    202 
    203 // TODO: put this somewhere with the big-endian equivalent used by JDWP.
    204 static void Append2LE(uint8_t* buf, uint16_t val) {
    205   *buf++ = static_cast<uint8_t>(val);
    206   *buf++ = static_cast<uint8_t>(val >> 8);
    207 }
    208 
    209 // TODO: put this somewhere with the big-endian equivalent used by JDWP.
    210 static void Append4LE(uint8_t* buf, uint32_t val) {
    211   *buf++ = static_cast<uint8_t>(val);
    212   *buf++ = static_cast<uint8_t>(val >> 8);
    213   *buf++ = static_cast<uint8_t>(val >> 16);
    214   *buf++ = static_cast<uint8_t>(val >> 24);
    215 }
    216 
    217 // TODO: put this somewhere with the big-endian equivalent used by JDWP.
    218 static void Append8LE(uint8_t* buf, uint64_t val) {
    219   *buf++ = static_cast<uint8_t>(val);
    220   *buf++ = static_cast<uint8_t>(val >> 8);
    221   *buf++ = static_cast<uint8_t>(val >> 16);
    222   *buf++ = static_cast<uint8_t>(val >> 24);
    223   *buf++ = static_cast<uint8_t>(val >> 32);
    224   *buf++ = static_cast<uint8_t>(val >> 40);
    225   *buf++ = static_cast<uint8_t>(val >> 48);
    226   *buf++ = static_cast<uint8_t>(val >> 56);
    227 }
    228 
    229 static void GetSample(Thread* thread, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
    230   BuildStackTraceVisitor build_trace_visitor(thread);
    231   build_trace_visitor.WalkStack();
    232   std::vector<ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace();
    233   Trace* the_trace = reinterpret_cast<Trace*>(arg);
    234   the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
    235 }
    236 
    237 static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg ATTRIBUTE_UNUSED) {
    238   thread->SetTraceClockBase(0);
    239   std::vector<ArtMethod*>* stack_trace = thread->GetStackTraceSample();
    240   thread->SetStackTraceSample(nullptr);
    241   delete stack_trace;
    242 }
    243 
    244 void Trace::CompareAndUpdateStackTrace(Thread* thread,
    245                                        std::vector<ArtMethod*>* stack_trace) {
    246   CHECK_EQ(pthread_self(), sampling_pthread_);
    247   std::vector<ArtMethod*>* old_stack_trace = thread->GetStackTraceSample();
    248   // Update the thread's stack trace sample.
    249   thread->SetStackTraceSample(stack_trace);
    250   // Read timer clocks to use for all events in this trace.
    251   uint32_t thread_clock_diff = 0;
    252   uint32_t wall_clock_diff = 0;
    253   ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
    254   if (old_stack_trace == nullptr) {
    255     // If there's no previous stack trace sample for this thread, log an entry event for all
    256     // methods in the trace.
    257     for (auto rit = stack_trace->rbegin(); rit != stack_trace->rend(); ++rit) {
    258       LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered,
    259                           thread_clock_diff, wall_clock_diff);
    260     }
    261   } else {
    262     // If there's a previous stack trace for this thread, diff the traces and emit entry and exit
    263     // events accordingly.
    264     auto old_rit = old_stack_trace->rbegin();
    265     auto rit = stack_trace->rbegin();
    266     // Iterate bottom-up over both traces until there's a difference between them.
    267     while (old_rit != old_stack_trace->rend() && rit != stack_trace->rend() && *old_rit == *rit) {
    268       old_rit++;
    269       rit++;
    270     }
    271     // Iterate top-down over the old trace until the point where they differ, emitting exit events.
    272     for (auto old_it = old_stack_trace->begin(); old_it != old_rit.base(); ++old_it) {
    273       LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited,
    274                           thread_clock_diff, wall_clock_diff);
    275     }
    276     // Iterate bottom-up over the new trace from the point where they differ, emitting entry events.
    277     for (; rit != stack_trace->rend(); ++rit) {
    278       LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered,
    279                           thread_clock_diff, wall_clock_diff);
    280     }
    281     FreeStackTrace(old_stack_trace);
    282   }
    283 }
    284 
    285 void* Trace::RunSamplingThread(void* arg) {
    286   Runtime* runtime = Runtime::Current();
    287   intptr_t interval_us = reinterpret_cast<intptr_t>(arg);
    288   CHECK_GE(interval_us, 0);
    289   CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(),
    290                                      !runtime->IsAotCompiler()));
    291 
    292   while (true) {
    293     usleep(interval_us);
    294     ScopedTrace trace("Profile sampling");
    295     Thread* self = Thread::Current();
    296     Trace* the_trace;
    297     {
    298       MutexLock mu(self, *Locks::trace_lock_);
    299       the_trace = the_trace_;
    300       if (the_trace == nullptr) {
    301         break;
    302       }
    303     }
    304     {
    305       ScopedSuspendAll ssa(__FUNCTION__);
    306       MutexLock mu(self, *Locks::thread_list_lock_);
    307       runtime->GetThreadList()->ForEach(GetSample, the_trace);
    308     }
    309   }
    310 
    311   runtime->DetachCurrentThread();
    312   return nullptr;
    313 }
    314 
    315 void Trace::Start(const char* trace_filename, int trace_fd, size_t buffer_size, int flags,
    316                   TraceOutputMode output_mode, TraceMode trace_mode, int interval_us) {
    317   Thread* self = Thread::Current();
    318   {
    319     MutexLock mu(self, *Locks::trace_lock_);
    320     if (the_trace_ != nullptr) {
    321       LOG(ERROR) << "Trace already in progress, ignoring this request";
    322       return;
    323     }
    324   }
    325 
    326   // Check interval if sampling is enabled
    327   if (trace_mode == TraceMode::kSampling && interval_us <= 0) {
    328     LOG(ERROR) << "Invalid sampling interval: " << interval_us;
    329     ScopedObjectAccess soa(self);
    330     ThrowRuntimeException("Invalid sampling interval: %d", interval_us);
    331     return;
    332   }
    333 
    334   // Open trace file if not going directly to ddms.
    335   std::unique_ptr<File> trace_file;
    336   if (output_mode != TraceOutputMode::kDDMS) {
    337     if (trace_fd < 0) {
    338       trace_file.reset(OS::CreateEmptyFileWriteOnly(trace_filename));
    339     } else {
    340       trace_file.reset(new File(trace_fd, "tracefile"));
    341       trace_file->DisableAutoClose();
    342     }
    343     if (trace_file.get() == nullptr) {
    344       PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'";
    345       ScopedObjectAccess soa(self);
    346       ThrowRuntimeException("Unable to open trace file '%s'", trace_filename);
    347       return;
    348     }
    349   }
    350 
    351   Runtime* runtime = Runtime::Current();
    352 
    353   // Enable count of allocs if specified in the flags.
    354   bool enable_stats = false;
    355 
    356   // Create Trace object.
    357   {
    358     // Required since EnableMethodTracing calls ConfigureStubs which visits class linker classes.
    359     gc::ScopedGCCriticalSection gcs(self,
    360                                     gc::kGcCauseInstrumentation,
    361                                     gc::kCollectorTypeInstrumentation);
    362     ScopedSuspendAll ssa(__FUNCTION__);
    363     MutexLock mu(self, *Locks::trace_lock_);
    364     if (the_trace_ != nullptr) {
    365       LOG(ERROR) << "Trace already in progress, ignoring this request";
    366     } else {
    367       enable_stats = (flags && kTraceCountAllocs) != 0;
    368       the_trace_ = new Trace(trace_file.release(), trace_filename, buffer_size, flags, output_mode,
    369                              trace_mode);
    370       if (trace_mode == TraceMode::kSampling) {
    371         CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
    372                                             reinterpret_cast<void*>(interval_us)),
    373                                             "Sampling profiler thread");
    374         the_trace_->interval_us_ = interval_us;
    375       } else {
    376         runtime->GetInstrumentation()->AddListener(the_trace_,
    377                                                    instrumentation::Instrumentation::kMethodEntered |
    378                                                    instrumentation::Instrumentation::kMethodExited |
    379                                                    instrumentation::Instrumentation::kMethodUnwind);
    380         // TODO: In full-PIC mode, we don't need to fully deopt.
    381         runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey);
    382       }
    383     }
    384   }
    385 
    386   // Can't call this when holding the mutator lock.
    387   if (enable_stats) {
    388     runtime->SetStatsEnabled(true);
    389   }
    390 }
    391 
    392 void Trace::StopTracing(bool finish_tracing, bool flush_file) {
    393   bool stop_alloc_counting = false;
    394   Runtime* const runtime = Runtime::Current();
    395   Trace* the_trace = nullptr;
    396   Thread* const self = Thread::Current();
    397   pthread_t sampling_pthread = 0U;
    398   {
    399     MutexLock mu(self, *Locks::trace_lock_);
    400     if (the_trace_ == nullptr) {
    401       LOG(ERROR) << "Trace stop requested, but no trace currently running";
    402     } else {
    403       the_trace = the_trace_;
    404       the_trace_ = nullptr;
    405       sampling_pthread = sampling_pthread_;
    406     }
    407   }
    408   // Make sure that we join before we delete the trace since we don't want to have
    409   // the sampling thread access a stale pointer. This finishes since the sampling thread exits when
    410   // the_trace_ is null.
    411   if (sampling_pthread != 0U) {
    412     CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, nullptr), "sampling thread shutdown");
    413     sampling_pthread_ = 0U;
    414   }
    415 
    416   {
    417     gc::ScopedGCCriticalSection gcs(self,
    418                                     gc::kGcCauseInstrumentation,
    419                                     gc::kCollectorTypeInstrumentation);
    420     ScopedSuspendAll ssa(__FUNCTION__);
    421     if (the_trace != nullptr) {
    422       stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
    423       if (finish_tracing) {
    424         the_trace->FinishTracing();
    425       }
    426 
    427       if (the_trace->trace_mode_ == TraceMode::kSampling) {
    428         MutexLock mu(self, *Locks::thread_list_lock_);
    429         runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
    430       } else {
    431         runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
    432         runtime->GetInstrumentation()->RemoveListener(
    433             the_trace, instrumentation::Instrumentation::kMethodEntered |
    434             instrumentation::Instrumentation::kMethodExited |
    435             instrumentation::Instrumentation::kMethodUnwind);
    436       }
    437       if (the_trace->trace_file_.get() != nullptr) {
    438         // Do not try to erase, so flush and close explicitly.
    439         if (flush_file) {
    440           if (the_trace->trace_file_->Flush() != 0) {
    441             PLOG(WARNING) << "Could not flush trace file.";
    442           }
    443         } else {
    444           the_trace->trace_file_->MarkUnchecked();  // Do not trigger guard.
    445         }
    446         if (the_trace->trace_file_->Close() != 0) {
    447           PLOG(ERROR) << "Could not close trace file.";
    448         }
    449       }
    450       delete the_trace;
    451     }
    452   }
    453   if (stop_alloc_counting) {
    454     // Can be racy since SetStatsEnabled is not guarded by any locks.
    455     runtime->SetStatsEnabled(false);
    456   }
    457 }
    458 
    459 void Trace::Abort() {
    460   // Do not write anything anymore.
    461   StopTracing(false, false);
    462 }
    463 
    464 void Trace::Stop() {
    465   // Finish writing.
    466   StopTracing(true, true);
    467 }
    468 
    469 void Trace::Shutdown() {
    470   if (GetMethodTracingMode() != kTracingInactive) {
    471     Stop();
    472   }
    473 }
    474 
    475 void Trace::Pause() {
    476   bool stop_alloc_counting = false;
    477   Runtime* runtime = Runtime::Current();
    478   Trace* the_trace = nullptr;
    479 
    480   Thread* const self = Thread::Current();
    481   pthread_t sampling_pthread = 0U;
    482   {
    483     MutexLock mu(self, *Locks::trace_lock_);
    484     if (the_trace_ == nullptr) {
    485       LOG(ERROR) << "Trace pause requested, but no trace currently running";
    486       return;
    487     } else {
    488       the_trace = the_trace_;
    489       sampling_pthread = sampling_pthread_;
    490     }
    491   }
    492 
    493   if (sampling_pthread != 0U) {
    494     {
    495       MutexLock mu(self, *Locks::trace_lock_);
    496       the_trace_ = nullptr;
    497     }
    498     CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, nullptr), "sampling thread shutdown");
    499     sampling_pthread_ = 0U;
    500     {
    501       MutexLock mu(self, *Locks::trace_lock_);
    502       the_trace_ = the_trace;
    503     }
    504   }
    505 
    506   if (the_trace != nullptr) {
    507     gc::ScopedGCCriticalSection gcs(self,
    508                                     gc::kGcCauseInstrumentation,
    509                                     gc::kCollectorTypeInstrumentation);
    510     ScopedSuspendAll ssa(__FUNCTION__);
    511     stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
    512 
    513     if (the_trace->trace_mode_ == TraceMode::kSampling) {
    514       MutexLock mu(self, *Locks::thread_list_lock_);
    515       runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
    516     } else {
    517       runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
    518       runtime->GetInstrumentation()->RemoveListener(
    519           the_trace,
    520           instrumentation::Instrumentation::kMethodEntered |
    521           instrumentation::Instrumentation::kMethodExited |
    522           instrumentation::Instrumentation::kMethodUnwind);
    523     }
    524   }
    525 
    526   if (stop_alloc_counting) {
    527     // Can be racy since SetStatsEnabled is not guarded by any locks.
    528     Runtime::Current()->SetStatsEnabled(false);
    529   }
    530 }
    531 
    532 void Trace::Resume() {
    533   Thread* self = Thread::Current();
    534   Trace* the_trace;
    535   {
    536     MutexLock mu(self, *Locks::trace_lock_);
    537     if (the_trace_ == nullptr) {
    538       LOG(ERROR) << "No trace to resume (or sampling mode), ignoring this request";
    539       return;
    540     }
    541     the_trace = the_trace_;
    542   }
    543 
    544   Runtime* runtime = Runtime::Current();
    545 
    546   // Enable count of allocs if specified in the flags.
    547   bool enable_stats = (the_trace->flags_ && kTraceCountAllocs) != 0;
    548 
    549   {
    550     gc::ScopedGCCriticalSection gcs(self,
    551                                     gc::kGcCauseInstrumentation,
    552                                     gc::kCollectorTypeInstrumentation);
    553     ScopedSuspendAll ssa(__FUNCTION__);
    554 
    555     // Reenable.
    556     if (the_trace->trace_mode_ == TraceMode::kSampling) {
    557       CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
    558           reinterpret_cast<void*>(the_trace->interval_us_)), "Sampling profiler thread");
    559     } else {
    560       runtime->GetInstrumentation()->AddListener(the_trace,
    561                                                  instrumentation::Instrumentation::kMethodEntered |
    562                                                  instrumentation::Instrumentation::kMethodExited |
    563                                                  instrumentation::Instrumentation::kMethodUnwind);
    564       // TODO: In full-PIC mode, we don't need to fully deopt.
    565       runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey);
    566     }
    567   }
    568 
    569   // Can't call this when holding the mutator lock.
    570   if (enable_stats) {
    571     runtime->SetStatsEnabled(true);
    572   }
    573 }
    574 
    575 TracingMode Trace::GetMethodTracingMode() {
    576   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
    577   if (the_trace_ == nullptr) {
    578     return kTracingInactive;
    579   } else {
    580     switch (the_trace_->trace_mode_) {
    581       case TraceMode::kSampling:
    582         return kSampleProfilingActive;
    583       case TraceMode::kMethodTracing:
    584         return kMethodTracingActive;
    585     }
    586     LOG(FATAL) << "Unreachable";
    587     UNREACHABLE();
    588   }
    589 }
    590 
    591 static constexpr size_t kMinBufSize = 18U;  // Trace header is up to 18B.
    592 
    593 Trace::Trace(File* trace_file, const char* trace_name, size_t buffer_size, int flags,
    594              TraceOutputMode output_mode, TraceMode trace_mode)
    595     : trace_file_(trace_file),
    596       buf_(new uint8_t[std::max(kMinBufSize, buffer_size)]()),
    597       flags_(flags), trace_output_mode_(output_mode), trace_mode_(trace_mode),
    598       clock_source_(default_clock_source_),
    599       buffer_size_(std::max(kMinBufSize, buffer_size)),
    600       start_time_(MicroTime()), clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0),
    601       overflow_(false), interval_us_(0), streaming_lock_(nullptr),
    602       unique_methods_lock_(new Mutex("unique methods lock", kTracingUniqueMethodsLock)) {
    603   uint16_t trace_version = GetTraceVersion(clock_source_);
    604   if (output_mode == TraceOutputMode::kStreaming) {
    605     trace_version |= 0xF0U;
    606   }
    607   // Set up the beginning of the trace.
    608   memset(buf_.get(), 0, kTraceHeaderLength);
    609   Append4LE(buf_.get(), kTraceMagicValue);
    610   Append2LE(buf_.get() + 4, trace_version);
    611   Append2LE(buf_.get() + 6, kTraceHeaderLength);
    612   Append8LE(buf_.get() + 8, start_time_);
    613   if (trace_version >= kTraceVersionDualClock) {
    614     uint16_t record_size = GetRecordSize(clock_source_);
    615     Append2LE(buf_.get() + 16, record_size);
    616   }
    617   static_assert(18 <= kMinBufSize, "Minimum buffer size not large enough for trace header");
    618 
    619   // Update current offset.
    620   cur_offset_.StoreRelaxed(kTraceHeaderLength);
    621 
    622   if (output_mode == TraceOutputMode::kStreaming) {
    623     streaming_file_name_ = trace_name;
    624     streaming_lock_ = new Mutex("tracing lock", LockLevel::kTracingStreamingLock);
    625     seen_threads_.reset(new ThreadIDBitSet());
    626   }
    627 }
    628 
    629 Trace::~Trace() {
    630   delete streaming_lock_;
    631   delete unique_methods_lock_;
    632 }
    633 
    634 static uint64_t ReadBytes(uint8_t* buf, size_t bytes) {
    635   uint64_t ret = 0;
    636   for (size_t i = 0; i < bytes; ++i) {
    637     ret |= static_cast<uint64_t>(buf[i]) << (i * 8);
    638   }
    639   return ret;
    640 }
    641 
    642 void Trace::DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source) {
    643   uint8_t* ptr = buf + kTraceHeaderLength;
    644   uint8_t* end = buf + buf_size;
    645 
    646   while (ptr < end) {
    647     uint32_t tmid = ReadBytes(ptr + 2, sizeof(tmid));
    648     ArtMethod* method = DecodeTraceMethod(tmid);
    649     TraceAction action = DecodeTraceAction(tmid);
    650     LOG(INFO) << ArtMethod::PrettyMethod(method) << " " << static_cast<int>(action);
    651     ptr += GetRecordSize(clock_source);
    652   }
    653 }
    654 
    655 void Trace::FinishTracing() {
    656   size_t final_offset = 0;
    657 
    658   std::set<ArtMethod*> visited_methods;
    659   if (trace_output_mode_ == TraceOutputMode::kStreaming) {
    660     // Clean up.
    661     STLDeleteValues(&seen_methods_);
    662   } else {
    663     final_offset = cur_offset_.LoadRelaxed();
    664     GetVisitedMethods(final_offset, &visited_methods);
    665   }
    666 
    667   // Compute elapsed time.
    668   uint64_t elapsed = MicroTime() - start_time_;
    669 
    670   std::ostringstream os;
    671 
    672   os << StringPrintf("%cversion\n", kTraceTokenChar);
    673   os << StringPrintf("%d\n", GetTraceVersion(clock_source_));
    674   os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false");
    675   if (UseThreadCpuClock()) {
    676     if (UseWallClock()) {
    677       os << StringPrintf("clock=dual\n");
    678     } else {
    679       os << StringPrintf("clock=thread-cpu\n");
    680     }
    681   } else {
    682     os << StringPrintf("clock=wall\n");
    683   }
    684   os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed);
    685   if (trace_output_mode_ != TraceOutputMode::kStreaming) {
    686     size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_);
    687     os << StringPrintf("num-method-calls=%zd\n", num_records);
    688   }
    689   os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_);
    690   os << StringPrintf("vm=art\n");
    691   os << StringPrintf("pid=%d\n", getpid());
    692   if ((flags_ & kTraceCountAllocs) != 0) {
    693     os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS));
    694     os << StringPrintf("alloc-size=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES));
    695     os << StringPrintf("gc-count=%d\n", Runtime::Current()->GetStat(KIND_GC_INVOCATIONS));
    696   }
    697   os << StringPrintf("%cthreads\n", kTraceTokenChar);
    698   DumpThreadList(os);
    699   os << StringPrintf("%cmethods\n", kTraceTokenChar);
    700   DumpMethodList(os, visited_methods);
    701   os << StringPrintf("%cend\n", kTraceTokenChar);
    702   std::string header(os.str());
    703 
    704   if (trace_output_mode_ == TraceOutputMode::kStreaming) {
    705     MutexLock mu(Thread::Current(), *streaming_lock_);  // To serialize writing.
    706     // Write a special token to mark the end of trace records and the start of
    707     // trace summary.
    708     uint8_t buf[7];
    709     Append2LE(buf, 0);
    710     buf[2] = kOpTraceSummary;
    711     Append4LE(buf + 3, static_cast<uint32_t>(header.length()));
    712     WriteToBuf(buf, sizeof(buf));
    713     // Write the trace summary. The summary is identical to the file header when
    714     // the output mode is not streaming (except for methods).
    715     WriteToBuf(reinterpret_cast<const uint8_t*>(header.c_str()), header.length());
    716     // Flush the buffer, which may include some trace records before the summary.
    717     FlushBuf();
    718   } else {
    719     if (trace_file_.get() == nullptr) {
    720       iovec iov[2];
    721       iov[0].iov_base = reinterpret_cast<void*>(const_cast<char*>(header.c_str()));
    722       iov[0].iov_len = header.length();
    723       iov[1].iov_base = buf_.get();
    724       iov[1].iov_len = final_offset;
    725       Dbg::DdmSendChunkV(CHUNK_TYPE("MPSE"), iov, 2);
    726       const bool kDumpTraceInfo = false;
    727       if (kDumpTraceInfo) {
    728         LOG(INFO) << "Trace sent:\n" << header;
    729         DumpBuf(buf_.get(), final_offset, clock_source_);
    730       }
    731     } else {
    732       if (!trace_file_->WriteFully(header.c_str(), header.length()) ||
    733           !trace_file_->WriteFully(buf_.get(), final_offset)) {
    734         std::string detail(StringPrintf("Trace data write failed: %s", strerror(errno)));
    735         PLOG(ERROR) << detail;
    736         ThrowRuntimeException("%s", detail.c_str());
    737       }
    738     }
    739   }
    740 }
    741 
    742 void Trace::DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
    743                        Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
    744                        ArtMethod* method,
    745                        uint32_t new_dex_pc) {
    746   // We're not recorded to listen to this kind of event, so complain.
    747   LOG(ERROR) << "Unexpected dex PC event in tracing " << ArtMethod::PrettyMethod(method)
    748              << " " << new_dex_pc;
    749 }
    750 
    751 void Trace::FieldRead(Thread* thread ATTRIBUTE_UNUSED,
    752                       Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
    753                       ArtMethod* method,
    754                       uint32_t dex_pc,
    755                       ArtField* field ATTRIBUTE_UNUSED)
    756     REQUIRES_SHARED(Locks::mutator_lock_) {
    757   // We're not recorded to listen to this kind of event, so complain.
    758   LOG(ERROR) << "Unexpected field read event in tracing " << ArtMethod::PrettyMethod(method)
    759              << " " << dex_pc;
    760 }
    761 
    762 void Trace::FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
    763                          Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
    764                          ArtMethod* method,
    765                          uint32_t dex_pc,
    766                          ArtField* field ATTRIBUTE_UNUSED,
    767                          const JValue& field_value ATTRIBUTE_UNUSED)
    768     REQUIRES_SHARED(Locks::mutator_lock_) {
    769   // We're not recorded to listen to this kind of event, so complain.
    770   LOG(ERROR) << "Unexpected field write event in tracing " << ArtMethod::PrettyMethod(method)
    771              << " " << dex_pc;
    772 }
    773 
    774 void Trace::MethodEntered(Thread* thread,
    775                           Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
    776                           ArtMethod* method,
    777                           uint32_t dex_pc ATTRIBUTE_UNUSED) {
    778   uint32_t thread_clock_diff = 0;
    779   uint32_t wall_clock_diff = 0;
    780   ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
    781   LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodEntered,
    782                       thread_clock_diff, wall_clock_diff);
    783 }
    784 
    785 void Trace::MethodExited(Thread* thread,
    786                          Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
    787                          ArtMethod* method,
    788                          uint32_t dex_pc ATTRIBUTE_UNUSED,
    789                          const JValue& return_value ATTRIBUTE_UNUSED) {
    790   uint32_t thread_clock_diff = 0;
    791   uint32_t wall_clock_diff = 0;
    792   ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
    793   LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodExited,
    794                       thread_clock_diff, wall_clock_diff);
    795 }
    796 
    797 void Trace::MethodUnwind(Thread* thread,
    798                          Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
    799                          ArtMethod* method,
    800                          uint32_t dex_pc ATTRIBUTE_UNUSED) {
    801   uint32_t thread_clock_diff = 0;
    802   uint32_t wall_clock_diff = 0;
    803   ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
    804   LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodUnwind,
    805                       thread_clock_diff, wall_clock_diff);
    806 }
    807 
    808 void Trace::ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
    809                             Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
    810     REQUIRES_SHARED(Locks::mutator_lock_) {
    811   LOG(ERROR) << "Unexpected exception caught event in tracing";
    812 }
    813 
    814 void Trace::Branch(Thread* /*thread*/, ArtMethod* method,
    815                    uint32_t /*dex_pc*/, int32_t /*dex_pc_offset*/)
    816       REQUIRES_SHARED(Locks::mutator_lock_) {
    817   LOG(ERROR) << "Unexpected branch event in tracing" << ArtMethod::PrettyMethod(method);
    818 }
    819 
    820 void Trace::InvokeVirtualOrInterface(Thread*,
    821                                      Handle<mirror::Object>,
    822                                      ArtMethod* method,
    823                                      uint32_t dex_pc,
    824                                      ArtMethod*) {
    825   LOG(ERROR) << "Unexpected invoke event in tracing" << ArtMethod::PrettyMethod(method)
    826              << " " << dex_pc;
    827 }
    828 
    829 void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) {
    830   if (UseThreadCpuClock()) {
    831     uint64_t clock_base = thread->GetTraceClockBase();
    832     if (UNLIKELY(clock_base == 0)) {
    833       // First event, record the base time in the map.
    834       uint64_t time = thread->GetCpuMicroTime();
    835       thread->SetTraceClockBase(time);
    836     } else {
    837       *thread_clock_diff = thread->GetCpuMicroTime() - clock_base;
    838     }
    839   }
    840   if (UseWallClock()) {
    841     *wall_clock_diff = MicroTime() - start_time_;
    842   }
    843 }
    844 
    845 bool Trace::RegisterMethod(ArtMethod* method) {
    846   mirror::DexCache* dex_cache = method->GetDexCache();
    847   const DexFile* dex_file = dex_cache->GetDexFile();
    848   if (seen_methods_.find(dex_file) == seen_methods_.end()) {
    849     seen_methods_.insert(std::make_pair(dex_file, new DexIndexBitSet()));
    850   }
    851   DexIndexBitSet* bit_set = seen_methods_.find(dex_file)->second;
    852   if (!(*bit_set)[method->GetDexMethodIndex()]) {
    853     bit_set->set(method->GetDexMethodIndex());
    854     return true;
    855   }
    856   return false;
    857 }
    858 
    859 bool Trace::RegisterThread(Thread* thread) {
    860   pid_t tid = thread->GetTid();
    861   CHECK_LT(0U, static_cast<uint32_t>(tid));
    862   CHECK_LT(static_cast<uint32_t>(tid), kMaxThreadIdNumber);
    863 
    864   if (!(*seen_threads_)[tid]) {
    865     seen_threads_->set(tid);
    866     return true;
    867   }
    868   return false;
    869 }
    870 
    871 std::string Trace::GetMethodLine(ArtMethod* method) {
    872   method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
    873   return StringPrintf("%#x\t%s\t%s\t%s\t%s\n", (EncodeTraceMethod(method) << TraceActionBits),
    874       PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(),
    875       method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile());
    876 }
    877 
    878 void Trace::WriteToBuf(const uint8_t* src, size_t src_size) {
    879   int32_t old_offset = cur_offset_.LoadRelaxed();
    880   int32_t new_offset = old_offset + static_cast<int32_t>(src_size);
    881   if (dchecked_integral_cast<size_t>(new_offset) > buffer_size_) {
    882     // Flush buffer.
    883     if (!trace_file_->WriteFully(buf_.get(), old_offset)) {
    884       PLOG(WARNING) << "Failed streaming a tracing event.";
    885     }
    886 
    887     // Check whether the data is too large for the buffer, then write immediately.
    888     if (src_size >= buffer_size_) {
    889       if (!trace_file_->WriteFully(src, src_size)) {
    890         PLOG(WARNING) << "Failed streaming a tracing event.";
    891       }
    892       cur_offset_.StoreRelease(0);  // Buffer is empty now.
    893       return;
    894     }
    895 
    896     old_offset = 0;
    897     new_offset = static_cast<int32_t>(src_size);
    898   }
    899   cur_offset_.StoreRelease(new_offset);
    900   // Fill in data.
    901   memcpy(buf_.get() + old_offset, src, src_size);
    902 }
    903 
    904 void Trace::FlushBuf() {
    905   int32_t offset = cur_offset_.LoadRelaxed();
    906   if (!trace_file_->WriteFully(buf_.get(), offset)) {
    907     PLOG(WARNING) << "Failed flush the remaining data in streaming.";
    908   }
    909   cur_offset_.StoreRelease(0);
    910 }
    911 
    912 void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method,
    913                                 instrumentation::Instrumentation::InstrumentationEvent event,
    914                                 uint32_t thread_clock_diff, uint32_t wall_clock_diff) {
    915   // Ensure we always use the non-obsolete version of the method so that entry/exit events have the
    916   // same pointer value.
    917   method = method->GetNonObsoleteMethod();
    918   // Advance cur_offset_ atomically.
    919   int32_t new_offset;
    920   int32_t old_offset = 0;
    921 
    922   // We do a busy loop here trying to acquire the next offset.
    923   if (trace_output_mode_ != TraceOutputMode::kStreaming) {
    924     do {
    925       old_offset = cur_offset_.LoadRelaxed();
    926       new_offset = old_offset + GetRecordSize(clock_source_);
    927       if (static_cast<size_t>(new_offset) > buffer_size_) {
    928         overflow_ = true;
    929         return;
    930       }
    931     } while (!cur_offset_.CompareExchangeWeakSequentiallyConsistent(old_offset, new_offset));
    932   }
    933 
    934   TraceAction action = kTraceMethodEnter;
    935   switch (event) {
    936     case instrumentation::Instrumentation::kMethodEntered:
    937       action = kTraceMethodEnter;
    938       break;
    939     case instrumentation::Instrumentation::kMethodExited:
    940       action = kTraceMethodExit;
    941       break;
    942     case instrumentation::Instrumentation::kMethodUnwind:
    943       action = kTraceUnroll;
    944       break;
    945     default:
    946       UNIMPLEMENTED(FATAL) << "Unexpected event: " << event;
    947   }
    948 
    949   uint32_t method_value = EncodeTraceMethodAndAction(method, action);
    950 
    951   // Write data
    952   uint8_t* ptr;
    953   static constexpr size_t kPacketSize = 14U;  // The maximum size of data in a packet.
    954   uint8_t stack_buf[kPacketSize];             // Space to store a packet when in streaming mode.
    955   if (trace_output_mode_ == TraceOutputMode::kStreaming) {
    956     ptr = stack_buf;
    957   } else {
    958     ptr = buf_.get() + old_offset;
    959   }
    960 
    961   Append2LE(ptr, thread->GetTid());
    962   Append4LE(ptr + 2, method_value);
    963   ptr += 6;
    964 
    965   if (UseThreadCpuClock()) {
    966     Append4LE(ptr, thread_clock_diff);
    967     ptr += 4;
    968   }
    969   if (UseWallClock()) {
    970     Append4LE(ptr, wall_clock_diff);
    971   }
    972   static_assert(kPacketSize == 2 + 4 + 4 + 4, "Packet size incorrect.");
    973 
    974   if (trace_output_mode_ == TraceOutputMode::kStreaming) {
    975     MutexLock mu(Thread::Current(), *streaming_lock_);  // To serialize writing.
    976     if (RegisterMethod(method)) {
    977       // Write a special block with the name.
    978       std::string method_line(GetMethodLine(method));
    979       uint8_t buf2[5];
    980       Append2LE(buf2, 0);
    981       buf2[2] = kOpNewMethod;
    982       Append2LE(buf2 + 3, static_cast<uint16_t>(method_line.length()));
    983       WriteToBuf(buf2, sizeof(buf2));
    984       WriteToBuf(reinterpret_cast<const uint8_t*>(method_line.c_str()), method_line.length());
    985     }
    986     if (RegisterThread(thread)) {
    987       // It might be better to postpone this. Threads might not have received names...
    988       std::string thread_name;
    989       thread->GetThreadName(thread_name);
    990       uint8_t buf2[7];
    991       Append2LE(buf2, 0);
    992       buf2[2] = kOpNewThread;
    993       Append2LE(buf2 + 3, static_cast<uint16_t>(thread->GetTid()));
    994       Append2LE(buf2 + 5, static_cast<uint16_t>(thread_name.length()));
    995       WriteToBuf(buf2, sizeof(buf2));
    996       WriteToBuf(reinterpret_cast<const uint8_t*>(thread_name.c_str()), thread_name.length());
    997     }
    998     WriteToBuf(stack_buf, sizeof(stack_buf));
    999   }
   1000 }
   1001 
   1002 void Trace::GetVisitedMethods(size_t buf_size,
   1003                               std::set<ArtMethod*>* visited_methods) {
   1004   uint8_t* ptr = buf_.get() + kTraceHeaderLength;
   1005   uint8_t* end = buf_.get() + buf_size;
   1006 
   1007   while (ptr < end) {
   1008     uint32_t tmid = ReadBytes(ptr + 2, sizeof(tmid));
   1009     ArtMethod* method = DecodeTraceMethod(tmid);
   1010     visited_methods->insert(method);
   1011     ptr += GetRecordSize(clock_source_);
   1012   }
   1013 }
   1014 
   1015 void Trace::DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods) {
   1016   for (const auto& method : visited_methods) {
   1017     os << GetMethodLine(method);
   1018   }
   1019 }
   1020 
   1021 static void DumpThread(Thread* t, void* arg) {
   1022   std::ostream& os = *reinterpret_cast<std::ostream*>(arg);
   1023   std::string name;
   1024   t->GetThreadName(name);
   1025   os << t->GetTid() << "\t" << name << "\n";
   1026 }
   1027 
   1028 void Trace::DumpThreadList(std::ostream& os) {
   1029   Thread* self = Thread::Current();
   1030   for (auto it : exited_threads_) {
   1031     os << it.first << "\t" << it.second << "\n";
   1032   }
   1033   Locks::thread_list_lock_->AssertNotHeld(self);
   1034   MutexLock mu(self, *Locks::thread_list_lock_);
   1035   Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os);
   1036 }
   1037 
   1038 void Trace::StoreExitingThreadInfo(Thread* thread) {
   1039   MutexLock mu(thread, *Locks::trace_lock_);
   1040   if (the_trace_ != nullptr) {
   1041     std::string name;
   1042     thread->GetThreadName(name);
   1043     // The same thread/tid may be used multiple times. As SafeMap::Put does not allow to override
   1044     // a previous mapping, use SafeMap::Overwrite.
   1045     the_trace_->exited_threads_.Overwrite(thread->GetTid(), name);
   1046   }
   1047 }
   1048 
   1049 Trace::TraceOutputMode Trace::GetOutputMode() {
   1050   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
   1051   CHECK(the_trace_ != nullptr) << "Trace output mode requested, but no trace currently running";
   1052   return the_trace_->trace_output_mode_;
   1053 }
   1054 
   1055 Trace::TraceMode Trace::GetMode() {
   1056   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
   1057   CHECK(the_trace_ != nullptr) << "Trace mode requested, but no trace currently running";
   1058   return the_trace_->trace_mode_;
   1059 }
   1060 
   1061 size_t Trace::GetBufferSize() {
   1062   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
   1063   CHECK(the_trace_ != nullptr) << "Trace mode requested, but no trace currently running";
   1064   return the_trace_->buffer_size_;
   1065 }
   1066 
   1067 bool Trace::IsTracingEnabled() {
   1068   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
   1069   return the_trace_ != nullptr;
   1070 }
   1071 
   1072 }  // namespace art
   1073