Home | History | Annotate | Download | only in perf
      1 #ifndef _PERF_PERF_H
      2 #define _PERF_PERF_H
      3 
      4 #include <asm/unistd.h>
      5 
      6 #if defined(__i386__)
      7 #define rmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
      8 #define cpu_relax()	asm volatile("rep; nop" ::: "memory");
      9 #define CPUINFO_PROC	"model name"
     10 #ifndef __NR_perf_event_open
     11 # define __NR_perf_event_open 336
     12 #endif
     13 #endif
     14 
     15 #if defined(__x86_64__)
     16 #define rmb()		asm volatile("lfence" ::: "memory")
     17 #define cpu_relax()	asm volatile("rep; nop" ::: "memory");
     18 #define CPUINFO_PROC	"model name"
     19 #ifndef __NR_perf_event_open
     20 # define __NR_perf_event_open 298
     21 #endif
     22 #endif
     23 
     24 #ifdef __powerpc__
     25 #include "../../arch/powerpc/include/uapi/asm/unistd.h"
     26 #define rmb()		asm volatile ("sync" ::: "memory")
     27 #define cpu_relax()	asm volatile ("" ::: "memory");
     28 #define CPUINFO_PROC	"cpu"
     29 #endif
     30 
     31 #ifdef __s390__
     32 #define rmb()		asm volatile("bcr 15,0" ::: "memory")
     33 #define cpu_relax()	asm volatile("" ::: "memory");
     34 #endif
     35 
     36 #ifdef __sh__
     37 #if defined(__SH4A__) || defined(__SH5__)
     38 # define rmb()		asm volatile("synco" ::: "memory")
     39 #else
     40 # define rmb()		asm volatile("" ::: "memory")
     41 #endif
     42 #define cpu_relax()	asm volatile("" ::: "memory")
     43 #define CPUINFO_PROC	"cpu type"
     44 #endif
     45 
     46 #ifdef __hppa__
     47 #define rmb()		asm volatile("" ::: "memory")
     48 #define cpu_relax()	asm volatile("" ::: "memory");
     49 #define CPUINFO_PROC	"cpu"
     50 #endif
     51 
     52 #ifdef __sparc__
     53 #define rmb()		asm volatile("":::"memory")
     54 #define cpu_relax()	asm volatile("":::"memory")
     55 #define CPUINFO_PROC	"cpu"
     56 #endif
     57 
     58 #ifdef __alpha__
     59 #define rmb()		asm volatile("mb" ::: "memory")
     60 #define cpu_relax()	asm volatile("" ::: "memory")
     61 #define CPUINFO_PROC	"cpu model"
     62 #endif
     63 
     64 #ifdef __ia64__
     65 #define rmb()		asm volatile ("mf" ::: "memory")
     66 #define cpu_relax()	asm volatile ("hint @pause" ::: "memory")
     67 #define CPUINFO_PROC	"model name"
     68 #endif
     69 
     70 #ifdef __arm__
     71 /*
     72  * Use the __kuser_memory_barrier helper in the CPU helper page. See
     73  * arch/arm/kernel/entry-armv.S in the kernel source for details.
     74  */
     75 #define rmb()		((void(*)(void))0xffff0fa0)()
     76 #define cpu_relax()	asm volatile("":::"memory")
     77 #define CPUINFO_PROC	"Processor"
     78 #endif
     79 
     80 #ifdef __aarch64__
     81 #define rmb()		asm volatile("dmb ld" ::: "memory")
     82 #define cpu_relax()	asm volatile("yield" ::: "memory")
     83 #endif
     84 
     85 #ifdef __mips__
     86 #define rmb()		asm volatile(					\
     87 				".set	mips2\n\t"			\
     88 				"sync\n\t"				\
     89 				".set	mips0"				\
     90 				: /* no output */			\
     91 				: /* no input */			\
     92 				: "memory")
     93 #define cpu_relax()	asm volatile("" ::: "memory")
     94 #define CPUINFO_PROC	"cpu model"
     95 #endif
     96 
     97 #ifdef __arc__
     98 #define rmb()		asm volatile("" ::: "memory")
     99 #define cpu_relax()	rmb()
    100 #define CPUINFO_PROC	"Processor"
    101 #endif
    102 
    103 #ifdef __metag__
    104 #define rmb()		asm volatile("" ::: "memory")
    105 #define cpu_relax()	asm volatile("" ::: "memory")
    106 #define CPUINFO_PROC	"CPU"
    107 #endif
    108 
    109 #include <time.h>
    110 #include <unistd.h>
    111 #include <sys/types.h>
    112 #include <sys/syscall.h>
    113 
    114 #include <linux/perf_event.h>
    115 #include "util/types.h"
    116 #include <stdbool.h>
    117 
    118 /*
    119  * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
    120  * counters in the current task.
    121  */
    122 #define PR_TASK_PERF_EVENTS_DISABLE   31
    123 #define PR_TASK_PERF_EVENTS_ENABLE    32
    124 
    125 #ifndef NSEC_PER_SEC
    126 # define NSEC_PER_SEC			1000000000ULL
    127 #endif
    128 #ifndef NSEC_PER_USEC
    129 # define NSEC_PER_USEC			1000ULL
    130 #endif
    131 
    132 static inline unsigned long long rdclock(void)
    133 {
    134 	struct timespec ts;
    135 
    136 	clock_gettime(CLOCK_MONOTONIC, &ts);
    137 	return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
    138 }
    139 
    140 /*
    141  * Pick up some kernel type conventions:
    142  */
    143 #define __user
    144 #define asmlinkage
    145 
    146 #define unlikely(x)	__builtin_expect(!!(x), 0)
    147 #define min(x, y) ({				\
    148 	typeof(x) _min1 = (x);			\
    149 	typeof(y) _min2 = (y);			\
    150 	(void) (&_min1 == &_min2);		\
    151 	_min1 < _min2 ? _min1 : _min2; })
    152 
    153 extern bool test_attr__enabled;
    154 void test_attr__init(void);
    155 void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
    156 		     int fd, int group_fd, unsigned long flags);
    157 
    158 static inline int
    159 sys_perf_event_open(struct perf_event_attr *attr,
    160 		      pid_t pid, int cpu, int group_fd,
    161 		      unsigned long flags)
    162 {
    163 	int fd;
    164 
    165 	fd = syscall(__NR_perf_event_open, attr, pid, cpu,
    166 		     group_fd, flags);
    167 
    168 	if (unlikely(test_attr__enabled))
    169 		test_attr__open(attr, pid, cpu, fd, group_fd, flags);
    170 
    171 	return fd;
    172 }
    173 
    174 #define MAX_COUNTERS			256
    175 #define MAX_NR_CPUS			256
    176 
    177 struct ip_callchain {
    178 	u64 nr;
    179 	u64 ips[0];
    180 };
    181 
    182 struct branch_flags {
    183 	u64 mispred:1;
    184 	u64 predicted:1;
    185 	u64 reserved:62;
    186 };
    187 
    188 struct branch_entry {
    189 	u64				from;
    190 	u64				to;
    191 	struct branch_flags flags;
    192 };
    193 
    194 struct branch_stack {
    195 	u64				nr;
    196 	struct branch_entry	entries[0];
    197 };
    198 
    199 extern const char *input_name;
    200 extern bool perf_host, perf_guest;
    201 extern const char perf_version_string[];
    202 
    203 void pthread__unblock_sigwinch(void);
    204 
    205 #include "util/target.h"
    206 
    207 enum perf_call_graph_mode {
    208 	CALLCHAIN_NONE,
    209 	CALLCHAIN_FP,
    210 	CALLCHAIN_DWARF
    211 };
    212 
    213 struct perf_record_opts {
    214 	struct perf_target target;
    215 	int	     call_graph;
    216 	bool	     group;
    217 	bool	     inherit_stat;
    218 	bool	     no_delay;
    219 	bool	     no_inherit;
    220 	bool	     no_samples;
    221 	bool	     pipe_output;
    222 	bool	     raw_samples;
    223 	bool	     sample_address;
    224 	bool	     sample_weight;
    225 	bool	     sample_time;
    226 	bool	     period;
    227 	unsigned int freq;
    228 	unsigned int mmap_pages;
    229 	unsigned int user_freq;
    230 	u64          branch_stack;
    231 	u64	     default_interval;
    232 	u64	     user_interval;
    233 	u16	     stack_dump_size;
    234 };
    235 
    236 #endif
    237