Home | History | Annotate | Download | only in linux
      1 /*
      2  * Runtime locking correctness validator
      3  *
      4  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo (at) redhat.com>
      5  *
      6  * see Documentation/lockdep-design.txt for more details.
      7  */
      8 #ifndef __LINUX_LOCKDEP_H
      9 #define __LINUX_LOCKDEP_H
     10 
     11 #include <linux/linkage.h>
     12 #include <linux/list.h>
     13 #include <linux/debug_locks.h>
     14 #include <linux/stacktrace.h>
     15 
     16 #ifdef CONFIG_LOCKDEP
     17 
     18 /*
     19  * Lock-class usage-state bits:
     20  */
     21 enum lock_usage_bit
     22 {
     23 	LOCK_USED = 0,
     24 	LOCK_USED_IN_HARDIRQ,
     25 	LOCK_USED_IN_SOFTIRQ,
     26 	LOCK_ENABLED_SOFTIRQS,
     27 	LOCK_ENABLED_HARDIRQS,
     28 	LOCK_USED_IN_HARDIRQ_READ,
     29 	LOCK_USED_IN_SOFTIRQ_READ,
     30 	LOCK_ENABLED_SOFTIRQS_READ,
     31 	LOCK_ENABLED_HARDIRQS_READ,
     32 	LOCK_USAGE_STATES
     33 };
     34 
     35 /*
     36  * Usage-state bitmasks:
     37  */
     38 #define LOCKF_USED			(1 << LOCK_USED)
     39 #define LOCKF_USED_IN_HARDIRQ		(1 << LOCK_USED_IN_HARDIRQ)
     40 #define LOCKF_USED_IN_SOFTIRQ		(1 << LOCK_USED_IN_SOFTIRQ)
     41 #define LOCKF_ENABLED_HARDIRQS		(1 << LOCK_ENABLED_HARDIRQS)
     42 #define LOCKF_ENABLED_SOFTIRQS		(1 << LOCK_ENABLED_SOFTIRQS)
     43 
     44 #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
     45 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
     46 
     47 #define LOCKF_USED_IN_HARDIRQ_READ	(1 << LOCK_USED_IN_HARDIRQ_READ)
     48 #define LOCKF_USED_IN_SOFTIRQ_READ	(1 << LOCK_USED_IN_SOFTIRQ_READ)
     49 #define LOCKF_ENABLED_HARDIRQS_READ	(1 << LOCK_ENABLED_HARDIRQS_READ)
     50 #define LOCKF_ENABLED_SOFTIRQS_READ	(1 << LOCK_ENABLED_SOFTIRQS_READ)
     51 
     52 #define LOCKF_ENABLED_IRQS_READ \
     53 		(LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
     54 #define LOCKF_USED_IN_IRQ_READ \
     55 		(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
     56 
     57 #define MAX_LOCKDEP_SUBCLASSES		8UL
     58 
     59 /*
     60  * Lock-classes are keyed via unique addresses, by embedding the
     61  * lockclass-key into the kernel (or module) .data section. (For
     62  * static locks we use the lock address itself as the key.)
     63  */
     64 struct lockdep_subclass_key {
     65 	char __one_byte;
     66 } __attribute__ ((__packed__));
     67 
     68 struct lock_class_key {
     69 	struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
     70 };
     71 
     72 /*
     73  * The lock-class itself:
     74  */
     75 struct lock_class {
     76 	/*
     77 	 * class-hash:
     78 	 */
     79 	struct list_head		hash_entry;
     80 
     81 	/*
     82 	 * global list of all lock-classes:
     83 	 */
     84 	struct list_head		lock_entry;
     85 
     86 	struct lockdep_subclass_key	*key;
     87 	unsigned int			subclass;
     88 
     89 	/*
     90 	 * IRQ/softirq usage tracking bits:
     91 	 */
     92 	unsigned long			usage_mask;
     93 	struct stack_trace		usage_traces[LOCK_USAGE_STATES];
     94 
     95 	/*
     96 	 * These fields represent a directed graph of lock dependencies,
     97 	 * to every node we attach a list of "forward" and a list of
     98 	 * "backward" graph nodes.
     99 	 */
    100 	struct list_head		locks_after, locks_before;
    101 
    102 	/*
    103 	 * Generation counter, when doing certain classes of graph walking,
    104 	 * to ensure that we check one node only once:
    105 	 */
    106 	unsigned int			version;
    107 
    108 	/*
    109 	 * Statistics counter:
    110 	 */
    111 	unsigned long			ops;
    112 
    113 	const char			*name;
    114 	int				name_version;
    115 };
    116 
    117 /*
    118  * Map the lock object (the lock instance) to the lock-class object.
    119  * This is embedded into specific lock instances:
    120  */
    121 struct lockdep_map {
    122 	struct lock_class_key		*key;
    123 	struct lock_class		*class_cache;
    124 	const char			*name;
    125 };
    126 
    127 /*
    128  * Every lock has a list of other locks that were taken after it.
    129  * We only grow the list, never remove from it:
    130  */
    131 struct lock_list {
    132 	struct list_head		entry;
    133 	struct lock_class		*class;
    134 	struct stack_trace		trace;
    135 };
    136 
    137 /*
    138  * We record lock dependency chains, so that we can cache them:
    139  */
    140 struct lock_chain {
    141 	struct list_head		entry;
    142 	u64				chain_key;
    143 };
    144 
    145 struct held_lock {
    146 	/*
    147 	 * One-way hash of the dependency chain up to this point. We
    148 	 * hash the hashes step by step as the dependency chain grows.
    149 	 *
    150 	 * We use it for dependency-caching and we skip detection
    151 	 * passes and dependency-updates if there is a cache-hit, so
    152 	 * it is absolutely critical for 100% coverage of the validator
    153 	 * to have a unique key value for every unique dependency path
    154 	 * that can occur in the system, to make a unique hash value
    155 	 * as likely as possible - hence the 64-bit width.
    156 	 *
    157 	 * The task struct holds the current hash value (initialized
    158 	 * with zero), here we store the previous hash value:
    159 	 */
    160 	u64				prev_chain_key;
    161 	struct lock_class		*class;
    162 	unsigned long			acquire_ip;
    163 	struct lockdep_map		*instance;
    164 
    165 	/*
    166 	 * The lock-stack is unified in that the lock chains of interrupt
    167 	 * contexts nest ontop of process context chains, but we 'separate'
    168 	 * the hashes by starting with 0 if we cross into an interrupt
    169 	 * context, and we also keep do not add cross-context lock
    170 	 * dependencies - the lock usage graph walking covers that area
    171 	 * anyway, and we'd just unnecessarily increase the number of
    172 	 * dependencies otherwise. [Note: hardirq and softirq contexts
    173 	 * are separated from each other too.]
    174 	 *
    175 	 * The following field is used to detect when we cross into an
    176 	 * interrupt context:
    177 	 */
    178 	int				irq_context;
    179 	int				trylock;
    180 	int				read;
    181 	int				check;
    182 	int				hardirqs_off;
    183 };
    184 
    185 /*
    186  * Initialization, self-test and debugging-output methods:
    187  */
    188 extern void lockdep_init(void);
    189 extern void lockdep_info(void);
    190 extern void lockdep_reset(void);
    191 extern void lockdep_reset_lock(struct lockdep_map *lock);
    192 extern void lockdep_free_key_range(void *start, unsigned long size);
    193 
    194 extern void lockdep_off(void);
    195 extern void lockdep_on(void);
    196 extern int lockdep_internal(void);
    197 
    198 /*
    199  * These methods are used by specific locking variants (spinlocks,
    200  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
    201  * to lockdep:
    202  */
    203 
    204 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
    205 			     struct lock_class_key *key);
    206 
    207 /*
    208  * Reinitialize a lock key - for cases where there is special locking or
    209  * special initialization of locks so that the validator gets the scope
    210  * of dependencies wrong: they are either too broad (they need a class-split)
    211  * or they are too narrow (they suffer from a false class-split):
    212  */
    213 #define lockdep_set_class(lock, key) \
    214 		lockdep_init_map(&(lock)->dep_map, #key, key)
    215 #define lockdep_set_class_and_name(lock, key, name) \
    216 		lockdep_init_map(&(lock)->dep_map, name, key)
    217 
    218 /*
    219  * Acquire a lock.
    220  *
    221  * Values for "read":
    222  *
    223  *   0: exclusive (write) acquire
    224  *   1: read-acquire (no recursion allowed)
    225  *   2: read-acquire with same-instance recursion allowed
    226  *
    227  * Values for check:
    228  *
    229  *   0: disabled
    230  *   1: simple checks (freeing, held-at-exit-time, etc.)
    231  *   2: full validation
    232  */
    233 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
    234 			 int trylock, int read, int check, unsigned long ip);
    235 
    236 extern void lock_release(struct lockdep_map *lock, int nested,
    237 			 unsigned long ip);
    238 
    239 # define INIT_LOCKDEP				.lockdep_recursion = 0,
    240 
    241 #else /* !LOCKDEP */
    242 
    243 static inline void lockdep_off(void)
    244 {
    245 }
    246 
    247 static inline void lockdep_on(void)
    248 {
    249 }
    250 
    251 static inline int lockdep_internal(void)
    252 {
    253 	return 0;
    254 }
    255 
    256 # define lock_acquire(l, s, t, r, c, i)		do { } while (0)
    257 # define lock_release(l, n, i)			do { } while (0)
    258 # define lockdep_init()				do { } while (0)
    259 # define lockdep_info()				do { } while (0)
    260 # define lockdep_init_map(lock, name, key)	do { (void)(key); } while (0)
    261 # define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
    262 # define lockdep_set_class_and_name(lock, key, name) \
    263 		do { (void)(key); } while (0)
    264 # define INIT_LOCKDEP
    265 # define lockdep_reset()		do { debug_locks = 1; } while (0)
    266 # define lockdep_free_key_range(start, size)	do { } while (0)
    267 /*
    268  * The class key takes no space if lockdep is disabled:
    269  */
    270 struct lock_class_key { };
    271 #endif /* !LOCKDEP */
    272 
    273 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
    274 extern void early_init_irq_lock_class(void);
    275 #else
    276 # define early_init_irq_lock_class()		do { } while (0)
    277 #endif
    278 
    279 #ifdef CONFIG_TRACE_IRQFLAGS
    280 extern void early_boot_irqs_off(void);
    281 extern void early_boot_irqs_on(void);
    282 #else
    283 # define early_boot_irqs_off()			do { } while (0)
    284 # define early_boot_irqs_on()			do { } while (0)
    285 #endif
    286 
    287 /*
    288  * For trivial one-depth nesting of a lock-class, the following
    289  * global define can be used. (Subsystems with multiple levels
    290  * of nesting should define their own lock-nesting subclasses.)
    291  */
    292 #define SINGLE_DEPTH_NESTING			1
    293 
    294 /*
    295  * Map the dependency ops to NOP or to real lockdep ops, depending
    296  * on the per lock-class debug mode:
    297  */
    298 
    299 #ifdef CONFIG_DEBUG_LOCK_ALLOC
    300 # ifdef CONFIG_PROVE_LOCKING
    301 #  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, i)
    302 # else
    303 #  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, i)
    304 # endif
    305 # define spin_release(l, n, i)			lock_release(l, n, i)
    306 #else
    307 # define spin_acquire(l, s, t, i)		do { } while (0)
    308 # define spin_release(l, n, i)			do { } while (0)
    309 #endif
    310 
    311 #ifdef CONFIG_DEBUG_LOCK_ALLOC
    312 # ifdef CONFIG_PROVE_LOCKING
    313 #  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, i)
    314 #  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 2, i)
    315 # else
    316 #  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, i)
    317 #  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 1, i)
    318 # endif
    319 # define rwlock_release(l, n, i)		lock_release(l, n, i)
    320 #else
    321 # define rwlock_acquire(l, s, t, i)		do { } while (0)
    322 # define rwlock_acquire_read(l, s, t, i)	do { } while (0)
    323 # define rwlock_release(l, n, i)		do { } while (0)
    324 #endif
    325 
    326 #ifdef CONFIG_DEBUG_LOCK_ALLOC
    327 # ifdef CONFIG_PROVE_LOCKING
    328 #  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, i)
    329 # else
    330 #  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, i)
    331 # endif
    332 # define mutex_release(l, n, i)			lock_release(l, n, i)
    333 #else
    334 # define mutex_acquire(l, s, t, i)		do { } while (0)
    335 # define mutex_release(l, n, i)			do { } while (0)
    336 #endif
    337 
    338 #ifdef CONFIG_DEBUG_LOCK_ALLOC
    339 # ifdef CONFIG_PROVE_LOCKING
    340 #  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, i)
    341 #  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 2, i)
    342 # else
    343 #  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, i)
    344 #  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 1, i)
    345 # endif
    346 # define rwsem_release(l, n, i)			lock_release(l, n, i)
    347 #else
    348 # define rwsem_acquire(l, s, t, i)		do { } while (0)
    349 # define rwsem_acquire_read(l, s, t, i)		do { } while (0)
    350 # define rwsem_release(l, n, i)			do { } while (0)
    351 #endif
    352 
    353 #endif /* __LINUX_LOCKDEP_H */
    354