1 /* 2 * Performance events: 3 * 4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx (at) linutronix.de> 5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra 7 * 8 * Data type definitions, declarations, prototypes. 9 * 10 * Started by: Thomas Gleixner and Ingo Molnar 11 * 12 * For licencing details see kernel-base/COPYING 13 */ 14 #ifndef _LINUX_PERF_EVENT_H 15 #define _LINUX_PERF_EVENT_H 16 17 #include <uapi/linux/perf_event.h> 18 19 /* 20 * Kernel-internal data types and definitions: 21 */ 22 23 #ifdef CONFIG_PERF_EVENTS 24 # include <asm/perf_event.h> 25 # include <asm/local64.h> 26 #endif 27 28 struct perf_guest_info_callbacks { 29 int (*is_in_guest)(void); 30 int (*is_user_mode)(void); 31 unsigned long (*get_guest_ip)(void); 32 }; 33 34 #ifdef CONFIG_HAVE_HW_BREAKPOINT 35 #include <asm/hw_breakpoint.h> 36 #endif 37 38 #include <linux/list.h> 39 #include <linux/mutex.h> 40 #include <linux/rculist.h> 41 #include <linux/rcupdate.h> 42 #include <linux/spinlock.h> 43 #include <linux/hrtimer.h> 44 #include <linux/fs.h> 45 #include <linux/pid_namespace.h> 46 #include <linux/workqueue.h> 47 #include <linux/ftrace.h> 48 #include <linux/cpu.h> 49 #include <linux/irq_work.h> 50 #include <linux/static_key.h> 51 #include <linux/jump_label_ratelimit.h> 52 #include <linux/atomic.h> 53 #include <linux/sysfs.h> 54 #include <linux/perf_regs.h> 55 #include <asm/local.h> 56 57 struct perf_callchain_entry { 58 __u64 nr; 59 __u64 ip[PERF_MAX_STACK_DEPTH]; 60 }; 61 62 struct perf_raw_record { 63 u32 size; 64 void *data; 65 }; 66 67 /* 68 * branch stack layout: 69 * nr: number of taken branches stored in entries[] 70 * 71 * Note that nr can vary from sample to sample 72 * branches (to, from) are stored from most recent 73 * to least recent, i.e., entries[0] contains the most 74 * recent branch. 75 */ 76 struct perf_branch_stack { 77 __u64 nr; 78 struct perf_branch_entry entries[0]; 79 }; 80 81 struct perf_regs_user { 82 __u64 abi; 83 struct pt_regs *regs; 84 }; 85 86 struct task_struct; 87 88 /* 89 * extra PMU register associated with an event 90 */ 91 struct hw_perf_event_extra { 92 u64 config; /* register value */ 93 unsigned int reg; /* register address or index */ 94 int alloc; /* extra register already allocated */ 95 int idx; /* index in shared_regs->regs[] */ 96 }; 97 98 struct event_constraint; 99 100 /** 101 * struct hw_perf_event - performance event hardware details: 102 */ 103 struct hw_perf_event { 104 #ifdef CONFIG_PERF_EVENTS 105 union { 106 struct { /* hardware */ 107 u64 config; 108 u64 last_tag; 109 unsigned long config_base; 110 unsigned long event_base; 111 int event_base_rdpmc; 112 int idx; 113 int last_cpu; 114 int flags; 115 116 struct hw_perf_event_extra extra_reg; 117 struct hw_perf_event_extra branch_reg; 118 119 struct event_constraint *constraint; 120 }; 121 struct { /* software */ 122 struct hrtimer hrtimer; 123 }; 124 struct { /* tracepoint */ 125 struct task_struct *tp_target; 126 /* for tp_event->class */ 127 struct list_head tp_list; 128 }; 129 #ifdef CONFIG_HAVE_HW_BREAKPOINT 130 struct { /* breakpoint */ 131 /* 132 * Crufty hack to avoid the chicken and egg 133 * problem hw_breakpoint has with context 134 * creation and event initalization. 135 */ 136 struct task_struct *bp_target; 137 struct arch_hw_breakpoint info; 138 struct list_head bp_list; 139 }; 140 #endif 141 }; 142 int state; 143 local64_t prev_count; 144 u64 sample_period; 145 u64 last_period; 146 local64_t period_left; 147 u64 interrupts_seq; 148 u64 interrupts; 149 150 u64 freq_time_stamp; 151 u64 freq_count_stamp; 152 #endif 153 }; 154 155 /* 156 * hw_perf_event::state flags 157 */ 158 #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ 159 #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ 160 #define PERF_HES_ARCH 0x04 161 162 struct perf_event; 163 164 /* 165 * Common implementation detail of pmu::{start,commit,cancel}_txn 166 */ 167 #define PERF_EVENT_TXN 0x1 168 169 /** 170 * struct pmu - generic performance monitoring unit 171 */ 172 struct pmu { 173 struct list_head entry; 174 175 struct device *dev; 176 const struct attribute_group **attr_groups; 177 const char *name; 178 int type; 179 180 int * __percpu pmu_disable_count; 181 struct perf_cpu_context * __percpu pmu_cpu_context; 182 int task_ctx_nr; 183 int hrtimer_interval_ms; 184 185 /* 186 * Fully disable/enable this PMU, can be used to protect from the PMI 187 * as well as for lazy/batch writing of the MSRs. 188 */ 189 void (*pmu_enable) (struct pmu *pmu); /* optional */ 190 void (*pmu_disable) (struct pmu *pmu); /* optional */ 191 192 /* 193 * Try and initialize the event for this PMU. 194 * Should return -ENOENT when the @event doesn't match this PMU. 195 */ 196 int (*event_init) (struct perf_event *event); 197 198 #define PERF_EF_START 0x01 /* start the counter when adding */ 199 #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ 200 #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ 201 202 /* 203 * Adds/Removes a counter to/from the PMU, can be done inside 204 * a transaction, see the ->*_txn() methods. 205 */ 206 int (*add) (struct perf_event *event, int flags); 207 void (*del) (struct perf_event *event, int flags); 208 209 /* 210 * Starts/Stops a counter present on the PMU. The PMI handler 211 * should stop the counter when perf_event_overflow() returns 212 * !0. ->start() will be used to continue. 213 */ 214 void (*start) (struct perf_event *event, int flags); 215 void (*stop) (struct perf_event *event, int flags); 216 217 /* 218 * Updates the counter value of the event. 219 */ 220 void (*read) (struct perf_event *event); 221 222 /* 223 * Group events scheduling is treated as a transaction, add 224 * group events as a whole and perform one schedulability test. 225 * If the test fails, roll back the whole group 226 * 227 * Start the transaction, after this ->add() doesn't need to 228 * do schedulability tests. 229 */ 230 void (*start_txn) (struct pmu *pmu); /* optional */ 231 /* 232 * If ->start_txn() disabled the ->add() schedulability test 233 * then ->commit_txn() is required to perform one. On success 234 * the transaction is closed. On error the transaction is kept 235 * open until ->cancel_txn() is called. 236 */ 237 int (*commit_txn) (struct pmu *pmu); /* optional */ 238 /* 239 * Will cancel the transaction, assumes ->del() is called 240 * for each successful ->add() during the transaction. 241 */ 242 void (*cancel_txn) (struct pmu *pmu); /* optional */ 243 244 /* 245 * Will return the value for perf_event_mmap_page::index for this event, 246 * if no implementation is provided it will default to: event->hw.idx + 1. 247 */ 248 int (*event_idx) (struct perf_event *event); /*optional */ 249 250 /* 251 * flush branch stack on context-switches (needed in cpu-wide mode) 252 */ 253 void (*flush_branch_stack) (void); 254 }; 255 256 /** 257 * enum perf_event_active_state - the states of a event 258 */ 259 enum perf_event_active_state { 260 PERF_EVENT_STATE_ERROR = -2, 261 PERF_EVENT_STATE_OFF = -1, 262 PERF_EVENT_STATE_INACTIVE = 0, 263 PERF_EVENT_STATE_ACTIVE = 1, 264 }; 265 266 struct file; 267 struct perf_sample_data; 268 269 typedef void (*perf_overflow_handler_t)(struct perf_event *, 270 struct perf_sample_data *, 271 struct pt_regs *regs); 272 273 enum perf_group_flag { 274 PERF_GROUP_SOFTWARE = 0x1, 275 }; 276 277 #define SWEVENT_HLIST_BITS 8 278 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) 279 280 struct swevent_hlist { 281 struct hlist_head heads[SWEVENT_HLIST_SIZE]; 282 struct rcu_head rcu_head; 283 }; 284 285 #define PERF_ATTACH_CONTEXT 0x01 286 #define PERF_ATTACH_GROUP 0x02 287 #define PERF_ATTACH_TASK 0x04 288 289 struct perf_cgroup; 290 struct ring_buffer; 291 292 /** 293 * struct perf_event - performance event kernel representation: 294 */ 295 struct perf_event { 296 #ifdef CONFIG_PERF_EVENTS 297 /* 298 * entry onto perf_event_context::event_list; 299 * modifications require ctx->lock 300 * RCU safe iterations. 301 */ 302 struct list_head event_entry; 303 304 /* 305 * XXX: group_entry and sibling_list should be mutually exclusive; 306 * either you're a sibling on a group, or you're the group leader. 307 * Rework the code to always use the same list element. 308 * 309 * Locked for modification by both ctx->mutex and ctx->lock; holding 310 * either sufficies for read. 311 */ 312 struct list_head group_entry; 313 struct list_head sibling_list; 314 315 /* 316 * We need storage to track the entries in perf_pmu_migrate_context; we 317 * cannot use the event_entry because of RCU and we want to keep the 318 * group in tact which avoids us using the other two entries. 319 */ 320 struct list_head migrate_entry; 321 322 struct hlist_node hlist_entry; 323 int nr_siblings; 324 int group_flags; 325 struct perf_event *group_leader; 326 struct pmu *pmu; 327 328 enum perf_event_active_state state; 329 unsigned int attach_state; 330 local64_t count; 331 atomic64_t child_count; 332 333 /* 334 * These are the total time in nanoseconds that the event 335 * has been enabled (i.e. eligible to run, and the task has 336 * been scheduled in, if this is a per-task event) 337 * and running (scheduled onto the CPU), respectively. 338 * 339 * They are computed from tstamp_enabled, tstamp_running and 340 * tstamp_stopped when the event is in INACTIVE or ACTIVE state. 341 */ 342 u64 total_time_enabled; 343 u64 total_time_running; 344 345 /* 346 * These are timestamps used for computing total_time_enabled 347 * and total_time_running when the event is in INACTIVE or 348 * ACTIVE state, measured in nanoseconds from an arbitrary point 349 * in time. 350 * tstamp_enabled: the notional time when the event was enabled 351 * tstamp_running: the notional time when the event was scheduled on 352 * tstamp_stopped: in INACTIVE state, the notional time when the 353 * event was scheduled off. 354 */ 355 u64 tstamp_enabled; 356 u64 tstamp_running; 357 u64 tstamp_stopped; 358 359 /* 360 * timestamp shadows the actual context timing but it can 361 * be safely used in NMI interrupt context. It reflects the 362 * context time as it was when the event was last scheduled in. 363 * 364 * ctx_time already accounts for ctx->timestamp. Therefore to 365 * compute ctx_time for a sample, simply add perf_clock(). 366 */ 367 u64 shadow_ctx_time; 368 369 struct perf_event_attr attr; 370 u16 header_size; 371 u16 id_header_size; 372 u16 read_size; 373 struct hw_perf_event hw; 374 375 struct perf_event_context *ctx; 376 atomic_long_t refcount; 377 378 /* 379 * These accumulate total time (in nanoseconds) that children 380 * events have been enabled and running, respectively. 381 */ 382 atomic64_t child_total_time_enabled; 383 atomic64_t child_total_time_running; 384 385 /* 386 * Protect attach/detach and child_list: 387 */ 388 struct mutex child_mutex; 389 struct list_head child_list; 390 struct perf_event *parent; 391 392 int oncpu; 393 int cpu; 394 395 struct list_head owner_entry; 396 struct task_struct *owner; 397 398 /* mmap bits */ 399 struct mutex mmap_mutex; 400 atomic_t mmap_count; 401 402 struct ring_buffer *rb; 403 struct list_head rb_entry; 404 405 /* poll related */ 406 wait_queue_head_t waitq; 407 struct fasync_struct *fasync; 408 409 /* delayed work for NMIs and such */ 410 int pending_wakeup; 411 int pending_kill; 412 int pending_disable; 413 struct irq_work pending; 414 415 atomic_t event_limit; 416 417 void (*destroy)(struct perf_event *); 418 struct rcu_head rcu_head; 419 420 struct pid_namespace *ns; 421 u64 id; 422 423 perf_overflow_handler_t overflow_handler; 424 void *overflow_handler_context; 425 426 #ifdef CONFIG_EVENT_TRACING 427 struct ftrace_event_call *tp_event; 428 struct event_filter *filter; 429 #ifdef CONFIG_FUNCTION_TRACER 430 struct ftrace_ops ftrace_ops; 431 #endif 432 #endif 433 434 #ifdef CONFIG_CGROUP_PERF 435 struct perf_cgroup *cgrp; /* cgroup event is attach to */ 436 int cgrp_defer_enabled; 437 #endif 438 439 #endif /* CONFIG_PERF_EVENTS */ 440 }; 441 442 enum perf_event_context_type { 443 task_context, 444 cpu_context, 445 }; 446 447 /** 448 * struct perf_event_context - event context structure 449 * 450 * Used as a container for task events and CPU events as well: 451 */ 452 struct perf_event_context { 453 struct pmu *pmu; 454 enum perf_event_context_type type; 455 /* 456 * Protect the states of the events in the list, 457 * nr_active, and the list: 458 */ 459 raw_spinlock_t lock; 460 /* 461 * Protect the list of events. Locking either mutex or lock 462 * is sufficient to ensure the list doesn't change; to change 463 * the list you need to lock both the mutex and the spinlock. 464 */ 465 struct mutex mutex; 466 467 struct list_head pinned_groups; 468 struct list_head flexible_groups; 469 struct list_head event_list; 470 int nr_events; 471 int nr_active; 472 int is_active; 473 int nr_stat; 474 int nr_freq; 475 int rotate_disable; 476 atomic_t refcount; 477 struct task_struct *task; 478 479 /* 480 * Context clock, runs when context enabled. 481 */ 482 u64 time; 483 u64 timestamp; 484 485 /* 486 * These fields let us detect when two contexts have both 487 * been cloned (inherited) from a common ancestor. 488 */ 489 struct perf_event_context *parent_ctx; 490 u64 parent_gen; 491 u64 generation; 492 int pin_count; 493 int nr_cgroups; /* cgroup evts */ 494 int nr_branch_stack; /* branch_stack evt */ 495 struct rcu_head rcu_head; 496 }; 497 498 /* 499 * Number of contexts where an event can trigger: 500 * task, softirq, hardirq, nmi. 501 */ 502 #define PERF_NR_CONTEXTS 4 503 504 /** 505 * struct perf_event_cpu_context - per cpu event context structure 506 */ 507 struct perf_cpu_context { 508 struct perf_event_context ctx; 509 struct perf_event_context *task_ctx; 510 int active_oncpu; 511 int exclusive; 512 struct hrtimer hrtimer; 513 ktime_t hrtimer_interval; 514 struct list_head rotation_list; 515 struct pmu *unique_pmu; 516 struct perf_cgroup *cgrp; 517 }; 518 519 struct perf_output_handle { 520 struct perf_event *event; 521 struct ring_buffer *rb; 522 unsigned long wakeup; 523 unsigned long size; 524 void *addr; 525 int page; 526 }; 527 528 #ifdef CONFIG_PERF_EVENTS 529 530 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); 531 extern void perf_pmu_unregister(struct pmu *pmu); 532 533 extern int perf_num_counters(void); 534 extern const char *perf_pmu_name(void); 535 extern void __perf_event_task_sched_in(struct task_struct *prev, 536 struct task_struct *task); 537 extern void __perf_event_task_sched_out(struct task_struct *prev, 538 struct task_struct *next); 539 extern int perf_event_init_task(struct task_struct *child); 540 extern void perf_event_exit_task(struct task_struct *child); 541 extern void perf_event_free_task(struct task_struct *task); 542 extern void perf_event_delayed_put(struct task_struct *task); 543 extern void perf_event_print_debug(void); 544 extern void perf_pmu_disable(struct pmu *pmu); 545 extern void perf_pmu_enable(struct pmu *pmu); 546 extern int perf_event_task_disable(void); 547 extern int perf_event_task_enable(void); 548 extern int perf_event_refresh(struct perf_event *event, int refresh); 549 extern void perf_event_update_userpage(struct perf_event *event); 550 extern int perf_event_release_kernel(struct perf_event *event); 551 extern struct perf_event * 552 perf_event_create_kernel_counter(struct perf_event_attr *attr, 553 int cpu, 554 struct task_struct *task, 555 perf_overflow_handler_t callback, 556 void *context); 557 extern void perf_pmu_migrate_context(struct pmu *pmu, 558 int src_cpu, int dst_cpu); 559 extern u64 perf_event_read_value(struct perf_event *event, 560 u64 *enabled, u64 *running); 561 562 563 struct perf_sample_data { 564 u64 type; 565 566 u64 ip; 567 struct { 568 u32 pid; 569 u32 tid; 570 } tid_entry; 571 u64 time; 572 u64 addr; 573 u64 id; 574 u64 stream_id; 575 struct { 576 u32 cpu; 577 u32 reserved; 578 } cpu_entry; 579 u64 period; 580 union perf_mem_data_src data_src; 581 struct perf_callchain_entry *callchain; 582 struct perf_raw_record *raw; 583 struct perf_branch_stack *br_stack; 584 struct perf_regs_user regs_user; 585 u64 stack_user_size; 586 u64 weight; 587 }; 588 589 static inline void perf_sample_data_init(struct perf_sample_data *data, 590 u64 addr, u64 period) 591 { 592 /* remaining struct members initialized in perf_prepare_sample() */ 593 data->addr = addr; 594 data->raw = NULL; 595 data->br_stack = NULL; 596 data->period = period; 597 data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE; 598 data->regs_user.regs = NULL; 599 data->stack_user_size = 0; 600 data->weight = 0; 601 data->data_src.val = 0; 602 } 603 604 extern void perf_output_sample(struct perf_output_handle *handle, 605 struct perf_event_header *header, 606 struct perf_sample_data *data, 607 struct perf_event *event); 608 extern void perf_prepare_sample(struct perf_event_header *header, 609 struct perf_sample_data *data, 610 struct perf_event *event, 611 struct pt_regs *regs); 612 613 extern int perf_event_overflow(struct perf_event *event, 614 struct perf_sample_data *data, 615 struct pt_regs *regs); 616 617 static inline bool is_sampling_event(struct perf_event *event) 618 { 619 return event->attr.sample_period != 0; 620 } 621 622 /* 623 * Return 1 for a software event, 0 for a hardware event 624 */ 625 static inline int is_software_event(struct perf_event *event) 626 { 627 return event->pmu->task_ctx_nr == perf_sw_context; 628 } 629 630 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 631 632 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); 633 634 #ifndef perf_arch_fetch_caller_regs 635 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } 636 #endif 637 638 /* 639 * Take a snapshot of the regs. Skip ip and frame pointer to 640 * the nth caller. We only need a few of the regs: 641 * - ip for PERF_SAMPLE_IP 642 * - cs for user_mode() tests 643 * - bp for callchains 644 * - eflags, for future purposes, just in case 645 */ 646 static inline void perf_fetch_caller_regs(struct pt_regs *regs) 647 { 648 memset(regs, 0, sizeof(*regs)); 649 650 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); 651 } 652 653 static __always_inline void 654 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 655 { 656 struct pt_regs hot_regs; 657 658 if (static_key_false(&perf_swevent_enabled[event_id])) { 659 if (!regs) { 660 perf_fetch_caller_regs(&hot_regs); 661 regs = &hot_regs; 662 } 663 __perf_sw_event(event_id, nr, regs, addr); 664 } 665 } 666 667 extern struct static_key_deferred perf_sched_events; 668 669 static inline void perf_event_task_sched_in(struct task_struct *prev, 670 struct task_struct *task) 671 { 672 if (static_key_false(&perf_sched_events.key)) 673 __perf_event_task_sched_in(prev, task); 674 } 675 676 static inline void perf_event_task_sched_out(struct task_struct *prev, 677 struct task_struct *next) 678 { 679 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 680 681 if (static_key_false(&perf_sched_events.key)) 682 __perf_event_task_sched_out(prev, next); 683 } 684 685 extern void perf_event_mmap(struct vm_area_struct *vma); 686 extern struct perf_guest_info_callbacks *perf_guest_cbs; 687 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 688 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 689 690 extern void perf_event_comm(struct task_struct *tsk); 691 extern void perf_event_fork(struct task_struct *tsk); 692 693 /* Callchains */ 694 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); 695 696 extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); 697 extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); 698 699 static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) 700 { 701 if (entry->nr < PERF_MAX_STACK_DEPTH) 702 entry->ip[entry->nr++] = ip; 703 } 704 705 extern int sysctl_perf_event_paranoid; 706 extern int sysctl_perf_event_mlock; 707 extern int sysctl_perf_event_sample_rate; 708 extern int sysctl_perf_cpu_time_max_percent; 709 710 extern void perf_sample_event_took(u64 sample_len_ns); 711 712 extern int perf_proc_update_handler(struct ctl_table *table, int write, 713 void __user *buffer, size_t *lenp, 714 loff_t *ppos); 715 extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 716 void __user *buffer, size_t *lenp, 717 loff_t *ppos); 718 719 720 static inline bool perf_paranoid_tracepoint_raw(void) 721 { 722 return sysctl_perf_event_paranoid > -1; 723 } 724 725 static inline bool perf_paranoid_cpu(void) 726 { 727 return sysctl_perf_event_paranoid > 0; 728 } 729 730 static inline bool perf_paranoid_kernel(void) 731 { 732 return sysctl_perf_event_paranoid > 1; 733 } 734 735 extern void perf_event_init(void); 736 extern void perf_tp_event(u64 addr, u64 count, void *record, 737 int entry_size, struct pt_regs *regs, 738 struct hlist_head *head, int rctx, 739 struct task_struct *task); 740 extern void perf_bp_event(struct perf_event *event, void *data); 741 742 #ifndef perf_misc_flags 743 # define perf_misc_flags(regs) \ 744 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) 745 # define perf_instruction_pointer(regs) instruction_pointer(regs) 746 #endif 747 748 static inline bool has_branch_stack(struct perf_event *event) 749 { 750 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; 751 } 752 753 extern int perf_output_begin(struct perf_output_handle *handle, 754 struct perf_event *event, unsigned int size); 755 extern void perf_output_end(struct perf_output_handle *handle); 756 extern unsigned int perf_output_copy(struct perf_output_handle *handle, 757 const void *buf, unsigned int len); 758 extern unsigned int perf_output_skip(struct perf_output_handle *handle, 759 unsigned int len); 760 extern int perf_swevent_get_recursion_context(void); 761 extern void perf_swevent_put_recursion_context(int rctx); 762 extern u64 perf_swevent_set_period(struct perf_event *event); 763 extern void perf_event_enable(struct perf_event *event); 764 extern void perf_event_disable(struct perf_event *event); 765 extern int __perf_event_disable(void *info); 766 extern void perf_event_task_tick(void); 767 #else 768 static inline void 769 perf_event_task_sched_in(struct task_struct *prev, 770 struct task_struct *task) { } 771 static inline void 772 perf_event_task_sched_out(struct task_struct *prev, 773 struct task_struct *next) { } 774 static inline int perf_event_init_task(struct task_struct *child) { return 0; } 775 static inline void perf_event_exit_task(struct task_struct *child) { } 776 static inline void perf_event_free_task(struct task_struct *task) { } 777 static inline void perf_event_delayed_put(struct task_struct *task) { } 778 static inline void perf_event_print_debug(void) { } 779 static inline int perf_event_task_disable(void) { return -EINVAL; } 780 static inline int perf_event_task_enable(void) { return -EINVAL; } 781 static inline int perf_event_refresh(struct perf_event *event, int refresh) 782 { 783 return -EINVAL; 784 } 785 786 static inline void 787 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } 788 static inline void 789 perf_bp_event(struct perf_event *event, void *data) { } 790 791 static inline int perf_register_guest_info_callbacks 792 (struct perf_guest_info_callbacks *callbacks) { return 0; } 793 static inline int perf_unregister_guest_info_callbacks 794 (struct perf_guest_info_callbacks *callbacks) { return 0; } 795 796 static inline void perf_event_mmap(struct vm_area_struct *vma) { } 797 static inline void perf_event_comm(struct task_struct *tsk) { } 798 static inline void perf_event_fork(struct task_struct *tsk) { } 799 static inline void perf_event_init(void) { } 800 static inline int perf_swevent_get_recursion_context(void) { return -1; } 801 static inline void perf_swevent_put_recursion_context(int rctx) { } 802 static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } 803 static inline void perf_event_enable(struct perf_event *event) { } 804 static inline void perf_event_disable(struct perf_event *event) { } 805 static inline int __perf_event_disable(void *info) { return -1; } 806 static inline void perf_event_task_tick(void) { } 807 #endif 808 809 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL) 810 extern bool perf_event_can_stop_tick(void); 811 #else 812 static inline bool perf_event_can_stop_tick(void) { return true; } 813 #endif 814 815 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) 816 extern void perf_restore_debug_store(void); 817 #else 818 static inline void perf_restore_debug_store(void) { } 819 #endif 820 821 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) 822 823 /* 824 * This has to have a higher priority than migration_notifier in sched/core.c. 825 */ 826 #define perf_cpu_notifier(fn) \ 827 do { \ 828 static struct notifier_block fn##_nb = \ 829 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ 830 unsigned long cpu = smp_processor_id(); \ 831 unsigned long flags; \ 832 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ 833 (void *)(unsigned long)cpu); \ 834 local_irq_save(flags); \ 835 fn(&fn##_nb, (unsigned long)CPU_STARTING, \ 836 (void *)(unsigned long)cpu); \ 837 local_irq_restore(flags); \ 838 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ 839 (void *)(unsigned long)cpu); \ 840 register_cpu_notifier(&fn##_nb); \ 841 } while (0) 842 843 844 struct perf_pmu_events_attr { 845 struct device_attribute attr; 846 u64 id; 847 const char *event_str; 848 }; 849 850 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ 851 static struct perf_pmu_events_attr _var = { \ 852 .attr = __ATTR(_name, 0444, _show, NULL), \ 853 .id = _id, \ 854 }; 855 856 #define PMU_FORMAT_ATTR(_name, _format) \ 857 static ssize_t \ 858 _name##_show(struct device *dev, \ 859 struct device_attribute *attr, \ 860 char *page) \ 861 { \ 862 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 863 return sprintf(page, _format "\n"); \ 864 } \ 865 \ 866 static struct device_attribute format_attr_##_name = __ATTR_RO(_name) 867 868 #endif /* _LINUX_PERF_EVENT_H */ 869