Home | History | Annotate | Download | only in linux
      1 #ifndef __LINUX_SMPLOCK_H
      2 #define __LINUX_SMPLOCK_H
      3 
      4 #ifdef CONFIG_LOCK_KERNEL
      5 #include <linux/sched.h>
      6 #include <linux/spinlock.h>
      7 
      8 #define kernel_locked()		(current->lock_depth >= 0)
      9 
     10 extern int __lockfunc __reacquire_kernel_lock(void);
     11 extern void __lockfunc __release_kernel_lock(void);
     12 
     13 /*
     14  * Release/re-acquire global kernel lock for the scheduler
     15  */
     16 #define release_kernel_lock(tsk) do { 		\
     17 	if (unlikely((tsk)->lock_depth >= 0))	\
     18 		__release_kernel_lock();	\
     19 } while (0)
     20 
     21 /*
     22  * Non-SMP kernels will never block on the kernel lock,
     23  * so we are better off returning a constant zero from
     24  * reacquire_kernel_lock() so that the compiler can see
     25  * it at compile-time.
     26  */
     27 #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL)
     28 # define return_value_on_smp return
     29 #else
     30 # define return_value_on_smp
     31 #endif
     32 
     33 static inline int reacquire_kernel_lock(struct task_struct *task)
     34 {
     35 	if (unlikely(task->lock_depth >= 0))
     36 		return_value_on_smp __reacquire_kernel_lock();
     37 	return 0;
     38 }
     39 
     40 extern void __lockfunc lock_kernel(void)	__acquires(kernel_lock);
     41 extern void __lockfunc unlock_kernel(void)	__releases(kernel_lock);
     42 
     43 #else
     44 
     45 #define lock_kernel()				do { } while(0)
     46 #define unlock_kernel()				do { } while(0)
     47 #define release_kernel_lock(task)		do { } while(0)
     48 #define reacquire_kernel_lock(task)		0
     49 #define kernel_locked()				1
     50 
     51 #endif /* CONFIG_LOCK_KERNEL */
     52 #endif /* __LINUX_SMPLOCK_H */
     53