Home | History | Annotate | Download | only in cpp
      1 /*
      2  * CPUDistribution Show load distribution across CPU cores during a period of
      3  *                 time. For Linux, uses BCC, eBPF. Embedded C.
      4  *
      5  * Basic example of BCC and kprobes.
      6  *
      7  * USAGE: CPUDistribution [duration]
      8  *
      9  * Copyright (c) Facebook, Inc.
     10  * Licensed under the Apache License, Version 2.0 (the "License")
     11  */
     12 
     13 #include <unistd.h>
     14 #include <cstdlib>
     15 #include <iomanip>
     16 #include <iostream>
     17 #include <string>
     18 
     19 #include "BPF.h"
     20 
     21 const std::string BPF_PROGRAM = R"(
     22 #include <linux/sched.h>
     23 #include <uapi/linux/ptrace.h>
     24 
     25 BPF_HASH(pid_to_cpu, pid_t, int);
     26 BPF_HASH(pid_to_ts, pid_t, uint64_t);
     27 BPF_HASH(cpu_time, int, uint64_t);
     28 
     29 int task_switch_event(struct pt_regs *ctx, struct task_struct *prev) {
     30   pid_t prev_pid = prev->pid;
     31   int* prev_cpu = pid_to_cpu.lookup(&prev_pid);
     32   uint64_t* prev_ts = pid_to_ts.lookup(&prev_pid);
     33 
     34   pid_t cur_pid = bpf_get_current_pid_tgid();
     35   int cur_cpu = bpf_get_smp_processor_id();
     36   uint64_t cur_ts = bpf_ktime_get_ns();
     37 
     38   uint64_t this_cpu_time = 0;
     39   if (prev_ts) {
     40     pid_to_ts.delete(&prev_pid);
     41     this_cpu_time = (cur_ts - *prev_ts);
     42   }
     43   if (prev_cpu) {
     44     pid_to_cpu.delete(&prev_pid);
     45     if (this_cpu_time > 0) {
     46       int cpu_key = *prev_cpu;
     47       uint64_t* history_time = cpu_time.lookup(&cpu_key);
     48       if (history_time)
     49         this_cpu_time += *history_time;
     50       cpu_time.update(&cpu_key, &this_cpu_time);
     51     }
     52   }
     53 
     54   pid_to_cpu.update(&cur_pid, &cur_cpu);
     55   pid_to_ts.update(&cur_pid, &cur_ts);
     56 
     57   return 0;
     58 }
     59 )";
     60 
     61 int main(int argc, char** argv) {
     62   ebpf::BPF bpf;
     63   auto init_res = bpf.init(BPF_PROGRAM);
     64   if (init_res.code() != 0) {
     65     std::cerr << init_res.msg() << std::endl;
     66     return 1;
     67   }
     68 
     69   auto attach_res =
     70       bpf.attach_kprobe("finish_task_switch", "task_switch_event");
     71   if (attach_res.code() != 0) {
     72     std::cerr << attach_res.msg() << std::endl;
     73     return 1;
     74   }
     75 
     76   int probe_time = 10;
     77   if (argc == 2) {
     78     probe_time = atoi(argv[1]);
     79   }
     80   std::cout << "Probing for " << probe_time << " seconds" << std::endl;
     81   sleep(probe_time);
     82 
     83   auto table = bpf.get_hash_table<int, uint64_t>("cpu_time");
     84   auto num_cores = sysconf(_SC_NPROCESSORS_ONLN);
     85   for (int i = 0; i < num_cores; i++) {
     86     std::cout << "CPU " << std::setw(2) << i << " worked for ";
     87     std::cout << (table[i] / 1000000.0) << " ms." << std::endl;
     88   }
     89 
     90   auto detach_res = bpf.detach_kprobe("finish_task_switch");
     91   if (detach_res.code() != 0) {
     92     std::cerr << detach_res.msg() << std::endl;
     93     return 1;
     94   }
     95 
     96   return 0;
     97 }
     98