Home | History | Annotate | Download | only in ftrace_reader
      1 /*
      2  * Copyright (C) 2017 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "perfetto/ftrace_reader/ftrace_controller.h"
     18 
     19 #include <fcntl.h>
     20 #include <stdint.h>
     21 #include <string.h>
     22 #include <sys/stat.h>
     23 #include <sys/types.h>
     24 #include <sys/wait.h>
     25 #include <unistd.h>
     26 
     27 #include <array>
     28 #include <string>
     29 #include <utility>
     30 
     31 #include "perfetto/base/build_config.h"
     32 #include "perfetto/base/logging.h"
     33 #include "perfetto/base/time.h"
     34 #include "perfetto/base/utils.h"
     35 #include "src/ftrace_reader/cpu_reader.h"
     36 #include "src/ftrace_reader/cpu_stats_parser.h"
     37 #include "src/ftrace_reader/event_info.h"
     38 #include "src/ftrace_reader/ftrace_config_muxer.h"
     39 #include "src/ftrace_reader/ftrace_procfs.h"
     40 #include "src/ftrace_reader/proto_translation_table.h"
     41 
     42 #include "perfetto/trace/ftrace/ftrace_event_bundle.pbzero.h"
     43 #include "perfetto/trace/ftrace/ftrace_stats.pbzero.h"
     44 
     45 namespace perfetto {
     46 namespace {
     47 
     48 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
     49 constexpr const char* kTracingPaths[] = {
     50     "/sys/kernel/tracing/", "/sys/kernel/debug/tracing/", nullptr,
     51 };
     52 #else
     53 constexpr const char* kTracingPaths[] = {
     54     "/sys/kernel/debug/tracing/", nullptr,
     55 };
     56 #endif
     57 
     58 constexpr int kDefaultDrainPeriodMs = 100;
     59 constexpr int kMinDrainPeriodMs = 1;
     60 constexpr int kMaxDrainPeriodMs = 1000 * 60;
     61 
     62 uint32_t ClampDrainPeriodMs(uint32_t drain_period_ms) {
     63   if (drain_period_ms == 0) {
     64     return kDefaultDrainPeriodMs;
     65   }
     66   if (drain_period_ms < kMinDrainPeriodMs ||
     67       kMaxDrainPeriodMs < drain_period_ms) {
     68     PERFETTO_LOG("drain_period_ms was %u should be between %u and %u",
     69                  drain_period_ms, kMinDrainPeriodMs, kMaxDrainPeriodMs);
     70     return kDefaultDrainPeriodMs;
     71   }
     72   return drain_period_ms;
     73 }
     74 
     75 void WriteToFile(const char* path, const char* str) {
     76   int fd = open(path, O_WRONLY);
     77   if (fd == -1)
     78     return;
     79   perfetto::base::ignore_result(write(fd, str, strlen(str)));
     80   perfetto::base::ignore_result(close(fd));
     81 }
     82 
     83 void ClearFile(const char* path) {
     84   int fd = open(path, O_WRONLY | O_TRUNC);
     85   if (fd == -1)
     86     return;
     87   perfetto::base::ignore_result(close(fd));
     88 }
     89 
     90 }  // namespace
     91 
     92 // Method of last resort to reset ftrace state.
     93 // We don't know what state the rest of the system and process is so as far
     94 // as possible avoid allocations.
     95 void HardResetFtraceState() {
     96   WriteToFile("/sys/kernel/debug/tracing/tracing_on", "0");
     97   WriteToFile("/sys/kernel/debug/tracing/buffer_size_kb", "4");
     98   WriteToFile("/sys/kernel/debug/tracing/events/enable", "0");
     99   ClearFile("/sys/kernel/debug/tracing/trace");
    100 
    101   WriteToFile("/sys/kernel/tracing/tracing_on", "0");
    102   WriteToFile("/sys/kernel/tracing/buffer_size_kb", "4");
    103   WriteToFile("/sys/kernel/tracing/events/enable", "0");
    104   ClearFile("/sys/kernel/tracing/trace");
    105 }
    106 
    107 // static
    108 // TODO(taylori): Add a test for tracing paths in integration tests.
    109 std::unique_ptr<FtraceController> FtraceController::Create(
    110     base::TaskRunner* runner) {
    111   size_t index = 0;
    112   std::unique_ptr<FtraceProcfs> ftrace_procfs = nullptr;
    113   while (!ftrace_procfs && kTracingPaths[index]) {
    114     ftrace_procfs = FtraceProcfs::Create(kTracingPaths[index++]);
    115   }
    116 
    117   if (!ftrace_procfs)
    118     return nullptr;
    119 
    120   auto table = ProtoTranslationTable::Create(
    121       ftrace_procfs.get(), GetStaticEventInfo(), GetStaticCommonFieldsInfo());
    122 
    123   std::unique_ptr<FtraceConfigMuxer> model = std::unique_ptr<FtraceConfigMuxer>(
    124       new FtraceConfigMuxer(ftrace_procfs.get(), table.get()));
    125   return std::unique_ptr<FtraceController>(new FtraceController(
    126       std::move(ftrace_procfs), std::move(table), std::move(model), runner));
    127 }
    128 
    129 FtraceController::FtraceController(std::unique_ptr<FtraceProcfs> ftrace_procfs,
    130                                    std::unique_ptr<ProtoTranslationTable> table,
    131                                    std::unique_ptr<FtraceConfigMuxer> model,
    132                                    base::TaskRunner* task_runner)
    133     : ftrace_procfs_(std::move(ftrace_procfs)),
    134       table_(std::move(table)),
    135       ftrace_config_muxer_(std::move(model)),
    136       task_runner_(task_runner),
    137       weak_factory_(this) {}
    138 
    139 FtraceController::~FtraceController() {
    140   PERFETTO_DCHECK_THREAD(thread_checker_);
    141   for (const auto* sink : sinks_)
    142     ftrace_config_muxer_->RemoveConfig(sink->id_);
    143   sinks_.clear();
    144   StopIfNeeded();
    145 }
    146 
    147 uint64_t FtraceController::NowMs() const {
    148   return static_cast<uint64_t>(base::GetWallTimeMs().count());
    149 }
    150 
    151 // static
    152 void FtraceController::DrainCPUs(base::WeakPtr<FtraceController> weak_this,
    153                                  size_t generation) {
    154   // The controller might be gone.
    155   if (!weak_this)
    156     return;
    157   // We might have stopped tracing then quickly re-enabled it, in this case
    158   // we don't want to end up with two periodic tasks for each CPU:
    159   if (weak_this->generation_ != generation)
    160     return;
    161 
    162   PERFETTO_DCHECK_THREAD(weak_this->thread_checker_);
    163   std::bitset<kMaxCpus> cpus_to_drain;
    164   {
    165     std::unique_lock<std::mutex> lock(weak_this->lock_);
    166     // We might have stopped caring about events.
    167     if (!weak_this->listening_for_raw_trace_data_)
    168       return;
    169     std::swap(cpus_to_drain, weak_this->cpus_to_drain_);
    170   }
    171 
    172   for (size_t cpu = 0; cpu < weak_this->ftrace_procfs_->NumberOfCpus(); cpu++) {
    173     if (!cpus_to_drain[cpu])
    174       continue;
    175     weak_this->OnRawFtraceDataAvailable(cpu);
    176   }
    177 
    178   // If we filled up any SHM pages while draining the data, we will have posted
    179   // a task to notify traced about this. Only unblock the readers after this
    180   // notification is sent to make it less likely that they steal CPU time away
    181   // from traced.
    182   weak_this->task_runner_->PostTask(
    183       std::bind(&FtraceController::UnblockReaders, weak_this));
    184 }
    185 
    186 // static
    187 void FtraceController::UnblockReaders(
    188     const base::WeakPtr<FtraceController>& weak_this) {
    189   if (!weak_this)
    190     return;
    191   // Unblock all waiting readers to start moving more data into their
    192   // respective staging pipes.
    193   weak_this->data_drained_.notify_all();
    194 }
    195 
    196 void FtraceController::StartIfNeeded() {
    197   if (sinks_.size() > 1)
    198     return;
    199   PERFETTO_CHECK(!sinks_.empty());
    200   {
    201     std::unique_lock<std::mutex> lock(lock_);
    202     PERFETTO_CHECK(!listening_for_raw_trace_data_);
    203     listening_for_raw_trace_data_ = true;
    204   }
    205   generation_++;
    206   base::WeakPtr<FtraceController> weak_this = weak_factory_.GetWeakPtr();
    207   for (size_t cpu = 0; cpu < ftrace_procfs_->NumberOfCpus(); cpu++) {
    208     readers_.emplace(
    209         cpu, std::unique_ptr<CpuReader>(new CpuReader(
    210                  table_.get(), cpu, ftrace_procfs_->OpenPipeForCpu(cpu),
    211                  std::bind(&FtraceController::OnDataAvailable, this, weak_this,
    212                            generation_, cpu, GetDrainPeriodMs()))));
    213   }
    214 }
    215 
    216 uint32_t FtraceController::GetDrainPeriodMs() {
    217   if (sinks_.empty())
    218     return kDefaultDrainPeriodMs;
    219   uint32_t min_drain_period_ms = kMaxDrainPeriodMs + 1;
    220   for (const FtraceSink* sink : sinks_) {
    221     if (sink->config().drain_period_ms() < min_drain_period_ms)
    222       min_drain_period_ms = sink->config().drain_period_ms();
    223   }
    224   return ClampDrainPeriodMs(min_drain_period_ms);
    225 }
    226 
    227 void FtraceController::ClearTrace() {
    228   ftrace_procfs_->ClearTrace();
    229 }
    230 
    231 void FtraceController::DisableAllEvents() {
    232   ftrace_procfs_->DisableAllEvents();
    233 }
    234 
    235 void FtraceController::WriteTraceMarker(const std::string& s) {
    236   ftrace_procfs_->WriteTraceMarker(s);
    237 }
    238 
    239 void FtraceController::StopIfNeeded() {
    240   if (!sinks_.empty())
    241     return;
    242   {
    243     // Unblock any readers that are waiting for us to drain data.
    244     std::unique_lock<std::mutex> lock(lock_);
    245     listening_for_raw_trace_data_ = false;
    246     cpus_to_drain_.reset();
    247   }
    248   data_drained_.notify_all();
    249   readers_.clear();
    250 }
    251 
    252 void FtraceController::OnRawFtraceDataAvailable(size_t cpu) {
    253   PERFETTO_CHECK(cpu < ftrace_procfs_->NumberOfCpus());
    254   CpuReader* reader = readers_[cpu].get();
    255   using BundleHandle =
    256       protozero::MessageHandle<protos::pbzero::FtraceEventBundle>;
    257   std::array<const EventFilter*, kMaxSinks> filters{};
    258   std::array<BundleHandle, kMaxSinks> bundles{};
    259   std::array<FtraceMetadata*, kMaxSinks> metadatas{};
    260   size_t sink_count = sinks_.size();
    261   size_t i = 0;
    262   for (FtraceSink* sink : sinks_) {
    263     filters[i] = sink->event_filter();
    264     metadatas[i] = sink->metadata_mutable();
    265     bundles[i++] = sink->GetBundleForCpu(cpu);
    266   }
    267   reader->Drain(filters, bundles, metadatas);
    268   i = 0;
    269   for (FtraceSink* sink : sinks_)
    270     sink->OnBundleComplete(cpu, std::move(bundles[i++]));
    271   PERFETTO_DCHECK(sinks_.size() == sink_count);
    272 }
    273 
    274 std::unique_ptr<FtraceSink> FtraceController::CreateSink(
    275     FtraceConfig config,
    276     FtraceSink::Delegate* delegate) {
    277   PERFETTO_DCHECK_THREAD(thread_checker_);
    278   if (sinks_.size() >= kMaxSinks)
    279     return nullptr;
    280   if (!ValidConfig(config))
    281     return nullptr;
    282 
    283   FtraceConfigId id = ftrace_config_muxer_->RequestConfig(config);
    284   if (!id)
    285     return nullptr;
    286 
    287   auto controller_weak = weak_factory_.GetWeakPtr();
    288   auto filter = std::unique_ptr<EventFilter>(new EventFilter(
    289       *table_, FtraceEventsAsSet(*ftrace_config_muxer_->GetConfig(id))));
    290 
    291   auto sink = std::unique_ptr<FtraceSink>(
    292       new FtraceSink(std::move(controller_weak), id, std::move(config),
    293                      std::move(filter), delegate));
    294   Register(sink.get());
    295   delegate->OnCreate(sink.get());
    296   return sink;
    297 }
    298 
    299 void FtraceController::OnDataAvailable(
    300     base::WeakPtr<FtraceController> weak_this,
    301     size_t generation,
    302     size_t cpu,
    303     uint32_t drain_period_ms) {
    304   // Called on the worker thread.
    305   PERFETTO_DCHECK(cpu < ftrace_procfs_->NumberOfCpus());
    306   std::unique_lock<std::mutex> lock(lock_);
    307   if (!listening_for_raw_trace_data_)
    308     return;
    309   if (cpus_to_drain_.none()) {
    310     // If this was the first CPU to wake up, schedule a drain for the next drain
    311     // interval.
    312     uint32_t delay_ms = drain_period_ms - (NowMs() % drain_period_ms);
    313     task_runner_->PostDelayedTask(
    314         std::bind(&FtraceController::DrainCPUs, weak_this, generation),
    315         delay_ms);
    316   }
    317   cpus_to_drain_[cpu] = true;
    318 
    319   // Wait until the main thread has finished draining.
    320   // TODO(skyostil): The threads waiting here will all try to grab lock_
    321   // when woken up. Find a way to avoid this.
    322   data_drained_.wait(lock, [this, cpu] {
    323     return !cpus_to_drain_[cpu] || !listening_for_raw_trace_data_;
    324   });
    325 }
    326 
    327 void FtraceController::Register(FtraceSink* sink) {
    328   PERFETTO_DCHECK_THREAD(thread_checker_);
    329   auto it_and_inserted = sinks_.insert(sink);
    330   PERFETTO_DCHECK(it_and_inserted.second);
    331   StartIfNeeded();
    332 }
    333 
    334 void FtraceController::Unregister(FtraceSink* sink) {
    335   PERFETTO_DCHECK_THREAD(thread_checker_);
    336 
    337   size_t removed = sinks_.erase(sink);
    338   PERFETTO_DCHECK(removed == 1);
    339 
    340   ftrace_config_muxer_->RemoveConfig(sink->id_);
    341 
    342   StopIfNeeded();
    343 }
    344 
    345 void FtraceController::DumpFtraceStats(FtraceStats* stats) {
    346   DumpAllCpuStats(ftrace_procfs_.get(), stats);
    347 }
    348 
    349 FtraceSink::FtraceSink(base::WeakPtr<FtraceController> controller_weak,
    350                        FtraceConfigId id,
    351                        FtraceConfig config,
    352                        std::unique_ptr<EventFilter> filter,
    353                        Delegate* delegate)
    354     : controller_weak_(std::move(controller_weak)),
    355       id_(id),
    356       config_(std::move(config)),
    357       filter_(std::move(filter)),
    358       delegate_(delegate){};
    359 
    360 FtraceSink::~FtraceSink() {
    361   if (controller_weak_)
    362     controller_weak_->Unregister(this);
    363 };
    364 
    365 const std::set<std::string>& FtraceSink::enabled_events() {
    366   return filter_->enabled_names();
    367 }
    368 
    369 void FtraceSink::DumpFtraceStats(FtraceStats* stats) {
    370   if (controller_weak_)
    371     controller_weak_->DumpFtraceStats(stats);
    372 }
    373 
    374 void FtraceStats::Write(protos::pbzero::FtraceStats* writer) const {
    375   for (const FtraceCpuStats& cpu_specific_stats : cpu_stats) {
    376     cpu_specific_stats.Write(writer->add_cpu_stats());
    377   }
    378 }
    379 
    380 void FtraceCpuStats::Write(protos::pbzero::FtraceCpuStats* writer) const {
    381   writer->set_cpu(cpu);
    382   writer->set_entries(entries);
    383   writer->set_overrun(overrun);
    384   writer->set_commit_overrun(commit_overrun);
    385   writer->set_bytes_read(bytes_read);
    386   writer->set_oldest_event_ts(oldest_event_ts);
    387   writer->set_now_ts(now_ts);
    388   writer->set_dropped_events(dropped_events);
    389   writer->set_read_events(read_events);
    390 }
    391 
    392 FtraceMetadata::FtraceMetadata() {
    393   // A lot of the time there will only be a small number of inodes.
    394   inode_and_device.reserve(10);
    395   pids.reserve(10);
    396 }
    397 
    398 void FtraceMetadata::AddDevice(BlockDeviceID device_id) {
    399   last_seen_device_id = device_id;
    400 #if PERFETTO_DCHECK_IS_ON()
    401   seen_device_id = true;
    402 #endif
    403 }
    404 
    405 void FtraceMetadata::AddInode(Inode inode_number) {
    406 #if PERFETTO_DCHECK_IS_ON()
    407   PERFETTO_DCHECK(seen_device_id);
    408 #endif
    409   static int32_t cached_pid = 0;
    410   if (!cached_pid)
    411     cached_pid = getpid();
    412 
    413   PERFETTO_DCHECK(last_seen_common_pid);
    414   PERFETTO_DCHECK(cached_pid == getpid());
    415   // Ignore own scanning activity.
    416   if (cached_pid != last_seen_common_pid) {
    417     inode_and_device.push_back(
    418         std::make_pair(inode_number, last_seen_device_id));
    419   }
    420 }
    421 
    422 void FtraceMetadata::AddCommonPid(int32_t pid) {
    423   last_seen_common_pid = pid;
    424 }
    425 
    426 void FtraceMetadata::AddPid(int32_t pid) {
    427   // Speculative optimization aginst repated pid's while keeping
    428   // faster insertion than a set.
    429   if (!pids.empty() && pids.back() == pid)
    430     return;
    431   pids.push_back(pid);
    432 }
    433 
    434 void FtraceMetadata::FinishEvent() {
    435   last_seen_device_id = 0;
    436 #if PERFETTO_DCHECK_IS_ON()
    437   seen_device_id = false;
    438 #endif
    439   last_seen_common_pid = 0;
    440 }
    441 
    442 void FtraceMetadata::Clear() {
    443   inode_and_device.clear();
    444   pids.clear();
    445   overwrite_count = 0;
    446   FinishEvent();
    447 }
    448 
    449 FtraceSink::Delegate::~Delegate() = default;
    450 
    451 }  // namespace perfetto
    452