Home | History | Annotate | Download | only in linux
      1 #ifndef __LINUX_COMPILER_H
      2 #define __LINUX_COMPILER_H
      3 
      4 #ifndef __ASSEMBLY__
      5 
      6 #ifdef __CHECKER__
      7 # define __user		__attribute__((noderef, address_space(1)))
      8 # define __kernel	__attribute__((address_space(0)))
      9 # define __safe		__attribute__((safe))
     10 # define __force	__attribute__((force))
     11 # define __nocast	__attribute__((nocast))
     12 # define __iomem	__attribute__((noderef, address_space(2)))
     13 # define __must_hold(x)	__attribute__((context(x,1,1)))
     14 # define __acquires(x)	__attribute__((context(x,0,1)))
     15 # define __releases(x)	__attribute__((context(x,1,0)))
     16 # define __acquire(x)	__context__(x,1)
     17 # define __release(x)	__context__(x,-1)
     18 # define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
     19 # define __percpu	__attribute__((noderef, address_space(3)))
     20 # define __pmem		__attribute__((noderef, address_space(5)))
     21 #ifdef CONFIG_SPARSE_RCU_POINTER
     22 # define __rcu		__attribute__((noderef, address_space(4)))
     23 #else
     24 # define __rcu
     25 #endif
     26 extern void __chk_user_ptr(const volatile void __user *);
     27 extern void __chk_io_ptr(const volatile void __iomem *);
     28 #else
     29 # define __user
     30 # define __kernel
     31 # define __safe
     32 # define __force
     33 # define __nocast
     34 # define __iomem
     35 # define __chk_user_ptr(x) (void)0
     36 # define __chk_io_ptr(x) (void)0
     37 # define __builtin_warning(x, y...) (1)
     38 # define __must_hold(x)
     39 # define __acquires(x)
     40 # define __releases(x)
     41 # define __acquire(x) (void)0
     42 # define __release(x) (void)0
     43 # define __cond_lock(x,c) (c)
     44 # define __percpu
     45 # define __rcu
     46 # define __pmem
     47 #endif
     48 
     49 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
     50 #define ___PASTE(a,b) a##b
     51 #define __PASTE(a,b) ___PASTE(a,b)
     52 
     53 #ifdef __KERNEL__
     54 
     55 #ifdef __GNUC__
     56 #include <linux/compiler-gcc.h>
     57 #endif
     58 
     59 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
     60 #define notrace __attribute__((hotpatch(0,0)))
     61 #else
     62 #define notrace __attribute__((no_instrument_function))
     63 #endif
     64 
     65 /* Intel compiler defines __GNUC__. So we will overwrite implementations
     66  * coming from above header files here
     67  */
     68 #ifdef __INTEL_COMPILER
     69 # include <linux/compiler-intel.h>
     70 #endif
     71 
     72 /* Clang compiler defines __GNUC__. So we will overwrite implementations
     73  * coming from above header files here
     74  */
     75 #ifdef __clang__
     76 #include <linux/compiler-clang.h>
     77 #endif
     78 
     79 /*
     80  * Generic compiler-dependent macros required for kernel
     81  * build go below this comment. Actual compiler/compiler version
     82  * specific implementations come from the above header files
     83  */
     84 
     85 struct ftrace_branch_data {
     86 	const char *func;
     87 	const char *file;
     88 	unsigned line;
     89 	union {
     90 		struct {
     91 			unsigned long correct;
     92 			unsigned long incorrect;
     93 		};
     94 		struct {
     95 			unsigned long miss;
     96 			unsigned long hit;
     97 		};
     98 		unsigned long miss_hit[2];
     99 	};
    100 };
    101 
    102 /*
    103  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
    104  * to disable branch tracing on a per file basis.
    105  */
    106 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
    107     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
    108 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
    109 
    110 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
    111 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
    112 
    113 #define __branch_check__(x, expect) ({					\
    114 			int ______r;					\
    115 			static struct ftrace_branch_data		\
    116 				__attribute__((__aligned__(4)))		\
    117 				__attribute__((section("_ftrace_annotated_branch"))) \
    118 				______f = {				\
    119 				.func = __func__,			\
    120 				.file = __FILE__,			\
    121 				.line = __LINE__,			\
    122 			};						\
    123 			______r = likely_notrace(x);			\
    124 			ftrace_likely_update(&______f, ______r, expect); \
    125 			______r;					\
    126 		})
    127 
    128 /*
    129  * Using __builtin_constant_p(x) to ignore cases where the return
    130  * value is always the same.  This idea is taken from a similar patch
    131  * written by Daniel Walker.
    132  */
    133 # ifndef likely
    134 #  define likely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
    135 # endif
    136 # ifndef unlikely
    137 #  define unlikely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
    138 # endif
    139 
    140 #ifdef CONFIG_PROFILE_ALL_BRANCHES
    141 /*
    142  * "Define 'is'", Bill Clinton
    143  * "Define 'if'", Steven Rostedt
    144  */
    145 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
    146 #define __trace_if(cond) \
    147 	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
    148 	({								\
    149 		int ______r;						\
    150 		static struct ftrace_branch_data			\
    151 			__attribute__((__aligned__(4)))			\
    152 			__attribute__((section("_ftrace_branch")))	\
    153 			______f = {					\
    154 				.func = __func__,			\
    155 				.file = __FILE__,			\
    156 				.line = __LINE__,			\
    157 			};						\
    158 		______r = !!(cond);					\
    159 		______f.miss_hit[______r]++;					\
    160 		______r;						\
    161 	}))
    162 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
    163 
    164 #else
    165 # define likely(x)	__builtin_expect(!!(x), 1)
    166 # define unlikely(x)	__builtin_expect(!!(x), 0)
    167 #endif
    168 
    169 /* Optimization barrier */
    170 #ifndef barrier
    171 # define barrier() __memory_barrier()
    172 #endif
    173 
    174 #ifndef barrier_data
    175 # define barrier_data(ptr) barrier()
    176 #endif
    177 
    178 /* Unreachable code */
    179 #ifndef unreachable
    180 # define unreachable() do { } while (1)
    181 #endif
    182 
    183 #ifndef RELOC_HIDE
    184 # define RELOC_HIDE(ptr, off)					\
    185   ({ unsigned long __ptr;					\
    186      __ptr = (unsigned long) (ptr);				\
    187     (typeof(ptr)) (__ptr + (off)); })
    188 #endif
    189 
    190 #ifndef OPTIMIZER_HIDE_VAR
    191 #define OPTIMIZER_HIDE_VAR(var) barrier()
    192 #endif
    193 
    194 /* Not-quite-unique ID. */
    195 #ifndef __UNIQUE_ID
    196 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
    197 #endif
    198 
    199 #include <linux/types.h>
    200 
    201 #define __READ_ONCE_SIZE						\
    202 ({									\
    203 	switch (size) {							\
    204 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
    205 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
    206 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
    207 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
    208 	default:							\
    209 		barrier();						\
    210 		__builtin_memcpy((void *)res, (const void *)p, size);	\
    211 		barrier();						\
    212 	}								\
    213 })
    214 
    215 static __always_inline
    216 void __read_once_size(const volatile void *p, void *res, int size)
    217 {
    218 	__READ_ONCE_SIZE;
    219 }
    220 
    221 #ifdef CONFIG_KASAN
    222 /*
    223  * This function is not 'inline' because __no_sanitize_address confilcts
    224  * with inlining. Attempt to inline it may cause a build failure.
    225  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
    226  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
    227  */
    228 static __no_sanitize_address __maybe_unused
    229 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
    230 {
    231 	__READ_ONCE_SIZE;
    232 }
    233 #else
    234 static __always_inline
    235 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
    236 {
    237 	__READ_ONCE_SIZE;
    238 }
    239 #endif
    240 
    241 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
    242 {
    243 	switch (size) {
    244 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
    245 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
    246 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
    247 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
    248 	default:
    249 		barrier();
    250 		__builtin_memcpy((void *)p, (const void *)res, size);
    251 		barrier();
    252 	}
    253 }
    254 
    255 /*
    256  * Prevent the compiler from merging or refetching reads or writes. The
    257  * compiler is also forbidden from reordering successive instances of
    258  * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
    259  * compiler is aware of some particular ordering.  One way to make the
    260  * compiler aware of ordering is to put the two invocations of READ_ONCE,
    261  * WRITE_ONCE or ACCESS_ONCE() in different C statements.
    262  *
    263  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
    264  * data types like structs or unions. If the size of the accessed data
    265  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
    266  * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
    267  * compile-time warning.
    268  *
    269  * Their two major use cases are: (1) Mediating communication between
    270  * process-level code and irq/NMI handlers, all running on the same CPU,
    271  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
    272  * mutilate accesses that either do not require ordering or that interact
    273  * with an explicit memory barrier or atomic instruction that provides the
    274  * required ordering.
    275  */
    276 
    277 #define __READ_ONCE(x, check)						\
    278 ({									\
    279 	union { typeof(x) __val; char __c[1]; } __u;			\
    280 	if (check)							\
    281 		__read_once_size(&(x), __u.__c, sizeof(x));		\
    282 	else								\
    283 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
    284 	__u.__val;							\
    285 })
    286 #define READ_ONCE(x) __READ_ONCE(x, 1)
    287 
    288 /*
    289  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
    290  * to hide memory access from KASAN.
    291  */
    292 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
    293 
    294 #define WRITE_ONCE(x, val) \
    295 ({							\
    296 	union { typeof(x) __val; char __c[1]; } __u =	\
    297 		{ .__val = (__force typeof(x)) (val) }; \
    298 	__write_once_size(&(x), __u.__c, sizeof(x));	\
    299 	__u.__val;					\
    300 })
    301 
    302 /**
    303  * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering
    304  * @cond: boolean expression to wait for
    305  *
    306  * Equivalent to using smp_load_acquire() on the condition variable but employs
    307  * the control dependency of the wait to reduce the barrier on many platforms.
    308  *
    309  * The control dependency provides a LOAD->STORE order, the additional RMB
    310  * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
    311  * aka. ACQUIRE.
    312  */
    313 #define smp_cond_acquire(cond)	do {		\
    314 	while (!(cond))				\
    315 		cpu_relax();			\
    316 	smp_rmb(); /* ctrl + rmb := acquire */	\
    317 } while (0)
    318 
    319 #endif /* __KERNEL__ */
    320 
    321 #endif /* __ASSEMBLY__ */
    322 
    323 #ifdef __KERNEL__
    324 /*
    325  * Allow us to mark functions as 'deprecated' and have gcc emit a nice
    326  * warning for each use, in hopes of speeding the functions removal.
    327  * Usage is:
    328  * 		int __deprecated foo(void)
    329  */
    330 #ifndef __deprecated
    331 # define __deprecated		/* unimplemented */
    332 #endif
    333 
    334 #ifdef MODULE
    335 #define __deprecated_for_modules __deprecated
    336 #else
    337 #define __deprecated_for_modules
    338 #endif
    339 
    340 #ifndef __must_check
    341 #define __must_check
    342 #endif
    343 
    344 #ifndef CONFIG_ENABLE_MUST_CHECK
    345 #undef __must_check
    346 #define __must_check
    347 #endif
    348 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
    349 #undef __deprecated
    350 #undef __deprecated_for_modules
    351 #define __deprecated
    352 #define __deprecated_for_modules
    353 #endif
    354 
    355 /*
    356  * Allow us to avoid 'defined but not used' warnings on functions and data,
    357  * as well as force them to be emitted to the assembly file.
    358  *
    359  * As of gcc 3.4, static functions that are not marked with attribute((used))
    360  * may be elided from the assembly file.  As of gcc 3.4, static data not so
    361  * marked will not be elided, but this may change in a future gcc version.
    362  *
    363  * NOTE: Because distributions shipped with a backported unit-at-a-time
    364  * compiler in gcc 3.3, we must define __used to be __attribute__((used))
    365  * for gcc >=3.3 instead of 3.4.
    366  *
    367  * In prior versions of gcc, such functions and data would be emitted, but
    368  * would be warned about except with attribute((unused)).
    369  *
    370  * Mark functions that are referenced only in inline assembly as __used so
    371  * the code is emitted even though it appears to be unreferenced.
    372  */
    373 #ifndef __used
    374 # define __used			/* unimplemented */
    375 #endif
    376 
    377 #ifndef __maybe_unused
    378 # define __maybe_unused		/* unimplemented */
    379 #endif
    380 
    381 #ifndef __always_unused
    382 # define __always_unused	/* unimplemented */
    383 #endif
    384 
    385 #ifndef noinline
    386 #define noinline
    387 #endif
    388 
    389 /*
    390  * Rather then using noinline to prevent stack consumption, use
    391  * noinline_for_stack instead.  For documentation reasons.
    392  */
    393 #define noinline_for_stack noinline
    394 
    395 #ifndef __always_inline
    396 #define __always_inline inline
    397 #endif
    398 
    399 #endif /* __KERNEL__ */
    400 
    401 /*
    402  * From the GCC manual:
    403  *
    404  * Many functions do not examine any values except their arguments,
    405  * and have no effects except the return value.  Basically this is
    406  * just slightly more strict class than the `pure' attribute above,
    407  * since function is not allowed to read global memory.
    408  *
    409  * Note that a function that has pointer arguments and examines the
    410  * data pointed to must _not_ be declared `const'.  Likewise, a
    411  * function that calls a non-`const' function usually must not be
    412  * `const'.  It does not make sense for a `const' function to return
    413  * `void'.
    414  */
    415 #ifndef __attribute_const__
    416 # define __attribute_const__	/* unimplemented */
    417 #endif
    418 
    419 /*
    420  * Tell gcc if a function is cold. The compiler will assume any path
    421  * directly leading to the call is unlikely.
    422  */
    423 
    424 #ifndef __cold
    425 #define __cold
    426 #endif
    427 
    428 /* Simple shorthand for a section definition */
    429 #ifndef __section
    430 # define __section(S) __attribute__ ((__section__(#S)))
    431 #endif
    432 
    433 #ifndef __visible
    434 #define __visible
    435 #endif
    436 
    437 /*
    438  * Assume alignment of return value.
    439  */
    440 #ifndef __assume_aligned
    441 #define __assume_aligned(a, ...)
    442 #endif
    443 
    444 
    445 /* Are two types/vars the same type (ignoring qualifiers)? */
    446 #ifndef __same_type
    447 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
    448 #endif
    449 
    450 /* Is this type a native word size -- useful for atomic operations */
    451 #ifndef __native_word
    452 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
    453 #endif
    454 
    455 /* Compile time object size, -1 for unknown */
    456 #ifndef __compiletime_object_size
    457 # define __compiletime_object_size(obj) -1
    458 #endif
    459 #ifndef __compiletime_warning
    460 # define __compiletime_warning(message)
    461 #endif
    462 #ifndef __compiletime_error
    463 # define __compiletime_error(message)
    464 /*
    465  * Sparse complains of variable sized arrays due to the temporary variable in
    466  * __compiletime_assert. Unfortunately we can't just expand it out to make
    467  * sparse see a constant array size without breaking compiletime_assert on old
    468  * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
    469  */
    470 # ifndef __CHECKER__
    471 #  define __compiletime_error_fallback(condition) \
    472 	do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
    473 # endif
    474 #endif
    475 #ifndef __compiletime_error_fallback
    476 # define __compiletime_error_fallback(condition) do { } while (0)
    477 #endif
    478 
    479 #ifdef __OPTIMIZE__
    480 # define __compiletime_assert(condition, msg, prefix, suffix)		\
    481 	do {								\
    482 		bool __cond = !(condition);				\
    483 		extern void prefix ## suffix(void) __compiletime_error(msg); \
    484 		if (__cond)						\
    485 			prefix ## suffix();				\
    486 		__compiletime_error_fallback(__cond);			\
    487 	} while (0)
    488 #else
    489 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
    490 #endif
    491 
    492 #define _compiletime_assert(condition, msg, prefix, suffix) \
    493 	__compiletime_assert(condition, msg, prefix, suffix)
    494 
    495 /**
    496  * compiletime_assert - break build and emit msg if condition is false
    497  * @condition: a compile-time constant condition to check
    498  * @msg:       a message to emit if condition is false
    499  *
    500  * In tradition of POSIX assert, this macro will break the build if the
    501  * supplied condition is *false*, emitting the supplied error message if the
    502  * compiler has support to do so.
    503  */
    504 #define compiletime_assert(condition, msg) \
    505 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
    506 
    507 #define compiletime_assert_atomic_type(t)				\
    508 	compiletime_assert(__native_word(t),				\
    509 		"Need native word sized stores/loads for atomicity.")
    510 
    511 /*
    512  * Prevent the compiler from merging or refetching accesses.  The compiler
    513  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
    514  * but only when the compiler is aware of some particular ordering.  One way
    515  * to make the compiler aware of ordering is to put the two invocations of
    516  * ACCESS_ONCE() in different C statements.
    517  *
    518  * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
    519  * on a union member will work as long as the size of the member matches the
    520  * size of the union and the size is smaller than word size.
    521  *
    522  * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
    523  * between process-level code and irq/NMI handlers, all running on the same CPU,
    524  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
    525  * mutilate accesses that either do not require ordering or that interact
    526  * with an explicit memory barrier or atomic instruction that provides the
    527  * required ordering.
    528  *
    529  * If possible use READ_ONCE()/WRITE_ONCE() instead.
    530  */
    531 #define __ACCESS_ONCE(x) ({ \
    532 	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
    533 	(volatile typeof(x) *)&(x); })
    534 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
    535 
    536 /**
    537  * lockless_dereference() - safely load a pointer for later dereference
    538  * @p: The pointer to load
    539  *
    540  * Similar to rcu_dereference(), but for situations where the pointed-to
    541  * object's lifetime is managed by something other than RCU.  That
    542  * "something other" might be reference counting or simple immortality.
    543  */
    544 #define lockless_dereference(p) \
    545 ({ \
    546 	typeof(p) _________p1 = READ_ONCE(p); \
    547 	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
    548 	(_________p1); \
    549 })
    550 
    551 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
    552 #ifdef CONFIG_KPROBES
    553 # define __kprobes	__attribute__((__section__(".kprobes.text")))
    554 # define nokprobe_inline	__always_inline
    555 #else
    556 # define __kprobes
    557 # define nokprobe_inline	inline
    558 #endif
    559 #endif /* __LINUX_COMPILER_H */
    560