Home | History | Annotate | Download | only in simpleperf
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 #define ATRACE_TAG ATRACE_TAG_ALWAYS
     17 #include "event_fd.h"
     18 
     19 #include <fcntl.h>
     20 #include <stdio.h>
     21 #include <string.h>
     22 #include <sys/ioctl.h>
     23 #include <sys/mman.h>
     24 #include <sys/syscall.h>
     25 #include <sys/types.h>
     26 #include <atomic>
     27 #include <memory>
     28 #include <cutils/trace.h>
     29 #include <utils/Trace.h>
     30 
     31 #include <android-base/file.h>
     32 #include <android-base/logging.h>
     33 #include <android-base/stringprintf.h>
     34 
     35 #include "environment.h"
     36 #include "event_attr.h"
     37 #include "event_type.h"
     38 #include "perf_event.h"
     39 #include "utils.h"
     40 
     41 static int perf_event_open(const perf_event_attr& attr, pid_t pid, int cpu,
     42                            int group_fd, unsigned long flags) {  // NOLINT
     43   return syscall(__NR_perf_event_open, &attr, pid, cpu, group_fd, flags);
     44 }
     45 
     46 std::unique_ptr<EventFd> EventFd::OpenEventFile(const perf_event_attr& attr,
     47                                                 pid_t tid, int cpu,
     48                                                 EventFd* group_event_fd,
     49                                                 bool report_error) {
     50   std::string event_name = GetEventNameByAttr(attr);
     51   int group_fd = -1;
     52   if (group_event_fd != nullptr) {
     53     group_fd = group_event_fd->perf_event_fd_;
     54   }
     55   perf_event_attr real_attr = attr;
     56   if (attr.freq) {
     57     uint64_t max_sample_freq;
     58     if (GetMaxSampleFrequency(&max_sample_freq) && max_sample_freq < attr.sample_freq) {
     59       PLOG(INFO) << "Adjust sample freq to max allowed sample freq " << max_sample_freq;
     60       real_attr.sample_freq = max_sample_freq;
     61     }
     62   }
     63   int perf_event_fd = perf_event_open(real_attr, tid, cpu, group_fd, 0);
     64   if (perf_event_fd == -1) {
     65     if (report_error) {
     66       PLOG(ERROR) << "open perf_event_file (event " << event_name << ", tid "
     67                   << tid << ", cpu " << cpu << ", group_fd " << group_fd
     68                   << ") failed";
     69     } else {
     70       PLOG(DEBUG) << "open perf_event_file (event " << event_name << ", tid "
     71                   << tid << ", cpu " << cpu << ", group_fd " << group_fd
     72                   << ") failed";
     73     }
     74     return nullptr;
     75   }
     76   if (fcntl(perf_event_fd, F_SETFD, FD_CLOEXEC) == -1) {
     77     if (report_error) {
     78       PLOG(ERROR) << "fcntl(FD_CLOEXEC) for perf_event_file (event "
     79                   << event_name << ", tid " << tid << ", cpu " << cpu
     80                   << ", group_fd " << group_fd << ") failed";
     81     } else {
     82       PLOG(DEBUG) << "fcntl(FD_CLOEXEC) for perf_event_file (event "
     83                   << event_name << ", tid " << tid << ", cpu " << cpu
     84                   << ", group_fd " << group_fd << ") failed";
     85     }
     86     return nullptr;
     87   }
     88   return std::unique_ptr<EventFd>(
     89       new EventFd(real_attr, perf_event_fd, event_name, tid, cpu));
     90 }
     91 
     92 EventFd::~EventFd() {
     93   DestroyMappedBuffer();
     94   close(perf_event_fd_);
     95 }
     96 
     97 std::string EventFd::Name() const {
     98   return android::base::StringPrintf(
     99       "perf_event_file(event %s, tid %d, cpu %d)", event_name_.c_str(), tid_,
    100       cpu_);
    101 }
    102 
    103 uint64_t EventFd::Id() const {
    104   if (id_ == 0) {
    105     PerfCounter counter;
    106     if (InnerReadCounter(&counter)) {
    107       id_ = counter.id;
    108     }
    109   }
    110   return id_;
    111 }
    112 
    113 bool EventFd::EnableEvent() {
    114   int result = ioctl(perf_event_fd_, PERF_EVENT_IOC_ENABLE, 0);
    115   if (result < 0) {
    116     PLOG(ERROR) << "ioctl(enable) " << Name() << " failed";
    117     return false;
    118   }
    119   return true;
    120 }
    121 
    122 bool EventFd::InnerReadCounter(PerfCounter* counter) const {
    123   CHECK(counter != nullptr);
    124   if (!android::base::ReadFully(perf_event_fd_, counter, sizeof(*counter))) {
    125     PLOG(ERROR) << "ReadCounter from " << Name() << " failed";
    126     return false;
    127   }
    128   return true;
    129 }
    130 
    131 bool EventFd::ReadCounter(PerfCounter* counter) {
    132   if (!InnerReadCounter(counter)) {
    133     return false;
    134   }
    135   // Trace is always available to systrace if enabled
    136   if (tid_ > 0) {
    137     ATRACE_INT64(android::base::StringPrintf(
    138                    "%s_tid%d_cpu%d", event_name_.c_str(), tid_,
    139                    cpu_).c_str(), counter->value - last_counter_value_);
    140   } else {
    141     ATRACE_INT64(android::base::StringPrintf(
    142                    "%s_cpu%d", event_name_.c_str(),
    143                    cpu_).c_str(), counter->value - last_counter_value_);
    144   }
    145   last_counter_value_ = counter->value;
    146   return true;
    147 }
    148 
    149 bool EventFd::CreateMappedBuffer(size_t mmap_pages, bool report_error) {
    150   CHECK(IsPowerOfTwo(mmap_pages));
    151   size_t page_size = sysconf(_SC_PAGE_SIZE);
    152   size_t mmap_len = (mmap_pages + 1) * page_size;
    153   void* mmap_addr = mmap(nullptr, mmap_len, PROT_READ | PROT_WRITE, MAP_SHARED,
    154                          perf_event_fd_, 0);
    155   if (mmap_addr == MAP_FAILED) {
    156     bool is_perm_error = (errno == EPERM);
    157     if (report_error) {
    158       PLOG(ERROR) << "mmap(" << mmap_pages << ") failed for " << Name();
    159     } else {
    160       PLOG(DEBUG) << "mmap(" << mmap_pages << ") failed for " << Name();
    161     }
    162     if (report_error && is_perm_error) {
    163       LOG(ERROR)
    164           << "It seems the kernel doesn't allow allocating enough "
    165           << "buffer for dumping samples, consider decreasing mmap pages(-m).";
    166     }
    167     return false;
    168   }
    169   mmap_addr_ = mmap_addr;
    170   mmap_len_ = mmap_len;
    171   mmap_metadata_page_ = reinterpret_cast<perf_event_mmap_page*>(mmap_addr_);
    172   mmap_data_buffer_ = reinterpret_cast<char*>(mmap_addr_) + page_size;
    173   mmap_data_buffer_size_ = mmap_len_ - page_size;
    174   return true;
    175 }
    176 
    177 bool EventFd::ShareMappedBuffer(const EventFd& event_fd, bool report_error) {
    178   CHECK(!HasMappedBuffer());
    179   CHECK(event_fd.HasMappedBuffer());
    180   int result =
    181       ioctl(perf_event_fd_, PERF_EVENT_IOC_SET_OUTPUT, event_fd.perf_event_fd_);
    182   if (result != 0) {
    183     if (report_error) {
    184       PLOG(ERROR) << "failed to share mapped buffer of "
    185                   << event_fd.perf_event_fd_ << " with " << perf_event_fd_;
    186     }
    187     return false;
    188   }
    189   return true;
    190 }
    191 
    192 void EventFd::DestroyMappedBuffer() {
    193   if (HasMappedBuffer()) {
    194     munmap(mmap_addr_, mmap_len_);
    195     mmap_addr_ = nullptr;
    196     mmap_len_ = 0;
    197     mmap_metadata_page_ = nullptr;
    198     mmap_data_buffer_ = nullptr;
    199     mmap_data_buffer_size_ = 0;
    200   }
    201 }
    202 
    203 size_t EventFd::GetAvailableMmapData(std::vector<char>& buffer, size_t& buffer_pos) {
    204   if (!HasMappedBuffer()) {
    205     return 0;
    206   }
    207   // The mmap_data_buffer is used as a ring buffer between the kernel and
    208   // simpleperf. The kernel continuously writes records to the buffer, and
    209   // simpleperf continuously read records out.
    210   //         _________________________________________
    211   // buffer | can write   |   can read   |  can write |
    212   //                      ^              ^
    213   //                    read_head       write_head
    214   //
    215   // So simpleperf can read records in [read_head, write_head), and the kernel
    216   // can write records in [write_head, read_head). The kernel is responsible
    217   // for updating write_head, and simpleperf is responsible for updating
    218   // read_head.
    219 
    220   size_t buf_mask = mmap_data_buffer_size_ - 1;
    221   size_t write_head =
    222       static_cast<size_t>(mmap_metadata_page_->data_head & buf_mask);
    223   size_t read_head =
    224       static_cast<size_t>(mmap_metadata_page_->data_tail & buf_mask);
    225 
    226   if (read_head == write_head) {
    227     // No available data.
    228     return 0;
    229   }
    230   size_t read_bytes;
    231   if (read_head < write_head) {
    232     read_bytes = write_head - read_head;
    233   } else {
    234     read_bytes = mmap_data_buffer_size_ - read_head + write_head;
    235   }
    236   // Extend the buffer if it is not big enough.
    237   if (buffer.size() < buffer_pos + read_bytes) {
    238     buffer.resize(buffer_pos + read_bytes);
    239   }
    240 
    241   // rmb() used to ensure reading data after reading data_head.
    242   __sync_synchronize();
    243 
    244   // Copy records from mapped buffer. Note that records can be wrapped at the
    245   // end of the mapped buffer.
    246   char* to = &buffer[buffer_pos];
    247   if (read_head < write_head) {
    248     char* from = mmap_data_buffer_ + read_head;
    249     size_t n = write_head - read_head;
    250     memcpy(to, from, n);
    251   } else {
    252     char* from = mmap_data_buffer_ + read_head;
    253     size_t n = mmap_data_buffer_size_ - read_head;
    254     memcpy(to, from, n);
    255     to += n;
    256     from = mmap_data_buffer_;
    257     n = write_head;
    258     memcpy(to, from, n);
    259   }
    260   buffer_pos += read_bytes;
    261   DiscardMmapData(read_bytes);
    262   return read_bytes;
    263 }
    264 
    265 void EventFd::DiscardMmapData(size_t discard_size) {
    266   // mb() used to ensure finish reading data before writing data_tail.
    267   __sync_synchronize();
    268   mmap_metadata_page_->data_tail += discard_size;
    269 }
    270 
    271 bool EventFd::StartPolling(IOEventLoop& loop,
    272                            const std::function<bool()>& callback) {
    273   ioevent_ref_ = loop.AddReadEvent(perf_event_fd_, callback);
    274   return ioevent_ref_ != nullptr;
    275 }
    276 
    277 bool EventFd::StopPolling() { return IOEventLoop::DelEvent(ioevent_ref_); }
    278 
    279 bool IsEventAttrSupported(const perf_event_attr& attr) {
    280   if (attr.type == SIMPLEPERF_TYPE_USER_SPACE_SAMPLERS &&
    281       attr.config == SIMPLEPERF_CONFIG_INPLACE_SAMPLER) {
    282     // User space samplers don't need kernel support.
    283     return true;
    284   }
    285   std::unique_ptr<EventFd> event_fd = EventFd::OpenEventFile(attr, getpid(), -1, nullptr, false);
    286   return event_fd != nullptr;
    287 }
    288