Home | History | Annotate | Download | only in linux-gnu
      1 /*
      2  * This file is part of ltrace.
      3  * Copyright (C) 2011,2012,2013 Petr Machata, Red Hat Inc.
      4  *
      5  * This program is free software; you can redistribute it and/or
      6  * modify it under the terms of the GNU General Public License as
      7  * published by the Free Software Foundation; either version 2 of the
      8  * License, or (at your option) any later version.
      9  *
     10  * This program is distributed in the hope that it will be useful, but
     11  * WITHOUT ANY WARRANTY; without even the implied warranty of
     12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     13  * General Public License for more details.
     14  *
     15  * You should have received a copy of the GNU General Public License
     16  * along with this program; if not, write to the Free Software
     17  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
     18  * 02110-1301 USA
     19  */
     20 
     21 #ifndef _LTRACE_LINUX_TRACE_H_
     22 #define _LTRACE_LINUX_TRACE_H_
     23 
     24 #include "proc.h"
     25 
     26 /* This publishes some Linux-specific data structures used for process
     27  * handling.  */
     28 
     29 /**
     30  * This is used for bookkeeping related to PIDs that the event
     31  * handlers work with.
     32  */
     33 struct pid_task {
     34 	pid_t pid;	/* This may be 0 for tasks that exited
     35 			 * mid-handling.  */
     36 	int sigstopped : 1;
     37 	int got_event : 1;
     38 	int delivered : 1;
     39 	int vforked : 1;
     40 	int sysret : 1;
     41 };
     42 
     43 struct pid_set {
     44 	struct pid_task *tasks;
     45 	size_t count;
     46 	size_t alloc;
     47 };
     48 
     49 /**
     50  * Breakpoint re-enablement.  When we hit a breakpoint, we must
     51  * disable it, single-step, and re-enable it.  That single-step can be
     52  * done only by one task in a task group, while others are stopped,
     53  * otherwise the processes would race for who sees the breakpoint
     54  * disabled and who doesn't.  The following is to keep track of it
     55  * all.
     56  */
     57 struct process_stopping_handler
     58 {
     59 	struct event_handler super;
     60 
     61 	/* The task that is doing the re-enablement.  */
     62 	struct process *task_enabling_breakpoint;
     63 
     64 	/* The pointer being re-enabled.  */
     65 	struct breakpoint *breakpoint_being_enabled;
     66 
     67 	/* Software singlestep breakpoints, if any needed.  */
     68 	struct breakpoint *sws_bps[2];
     69 
     70 	/* When all tasks are stopped, this callback gets called.  */
     71 	void (*on_all_stopped)(struct process_stopping_handler *);
     72 
     73 	/* When we get a singlestep event, this is called to decide
     74 	 * whether to stop stepping, or whether to enable the
     75 	 * brakpoint, sink remaining signals, and continue
     76 	 * everyone.  */
     77 	enum callback_status (*keep_stepping_p)
     78 		(struct process_stopping_handler *);
     79 
     80 	/* Whether we need to use ugly workaround to get around
     81 	 * various problems with singlestepping.  */
     82 	enum callback_status (*ugly_workaround_p)
     83 		(struct process_stopping_handler *);
     84 
     85 	enum {
     86 		/* We are waiting for everyone to land in t/T.  */
     87 		PSH_STOPPING = 0,
     88 
     89 		/* We are doing the PTRACE_SINGLESTEP.  */
     90 		PSH_SINGLESTEP,
     91 
     92 		/* We are waiting for all the SIGSTOPs to arrive so
     93 		 * that we can sink them.  */
     94 		PSH_SINKING,
     95 
     96 		/* This is for tracking the ugly workaround.  */
     97 		PSH_UGLY_WORKAROUND,
     98 	} state;
     99 
    100 	int exiting;
    101 
    102 	struct pid_set pids;
    103 };
    104 
    105 /* Allocate a process stopping handler, initialize it and install it.
    106  * Return 0 on success or a negative value on failure.  Pass NULL for
    107  * each callback to use a default instead.  The default for
    108  * ON_ALL_STOPPED is LINUX_PTRACE_DISABLE_AND_SINGLESTEP, the default
    109  * for KEEP_STEPPING_P and UGLY_WORKAROUND_P is "no".  */
    110 int process_install_stopping_handler
    111 	(struct process *proc, struct breakpoint *sbp,
    112 	 void (*on_all_stopped)(struct process_stopping_handler *),
    113 	 enum callback_status (*keep_stepping_p)
    114 		 (struct process_stopping_handler *),
    115 	 enum callback_status (*ugly_workaround_p)
    116 		(struct process_stopping_handler *));
    117 
    118 void linux_ptrace_disable_and_singlestep(struct process_stopping_handler *self);
    119 void linux_ptrace_disable_and_continue(struct process_stopping_handler *self);
    120 
    121 /* When main binary needs to call an IFUNC function defined in the
    122  * binary itself, a PLT entry is set up so that dynamic linker can get
    123  * involved and resolve the symbol.  But unlike other PLT relocation,
    124  * this one can't rely on symbol table being available.  So it doesn't
    125  * reference the symbol by its name, but by its address, and
    126  * correspondingly, has another type.  When arch backend wishes to
    127  * support these IRELATIVE relocations, it should override
    128  * arch_elf_add_plt_entry and dispatch to this function for IRELATIVE
    129  * relocations.
    130  *
    131  * This function behaves as arch_elf_add_plt_entry, except that it
    132  * doesn't take name for a parameter, but instead looks up the name in
    133  * symbol tables in LTE.  */
    134 enum plt_status linux_elf_add_plt_entry_irelative(struct process *proc,
    135 						  struct ltelf *lte,
    136 						  GElf_Rela *rela, size_t ndx,
    137 						  struct library_symbol **ret);
    138 
    139 /* Service routine of the above.  Determines a name corresponding to
    140  * ADDR, or invents a new one.  Returns NULL on failures, otherwise it
    141  * returns a malloc'd pointer that the caller is responsible for
    142  * freeing.  */
    143 char *linux_elf_find_irelative_name(struct ltelf *lte, GElf_Addr addr);
    144 
    145 /* Returns ${NAME}.IFUNC in a newly-malloc'd block, or NULL on
    146  * failures.  */
    147 char *linux_append_IFUNC_to_name(const char *name);
    148 
    149 /* Returns a statically allocated prototype that represents the
    150  * prototype "void *()".  Never fails.  */
    151 struct prototype *linux_IFUNC_prototype(void);
    152 
    153 
    154 #endif /* _LTRACE_LINUX_TRACE_H_ */
    155