Home | History | Annotate | Download | only in internal
      1 #ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H
      2 #define JEMALLOC_INTERNAL_ATOMIC_MSVC_H
      3 
      4 #define ATOMIC_INIT(...) {__VA_ARGS__}
      5 
      6 typedef enum {
      7 	atomic_memory_order_relaxed,
      8 	atomic_memory_order_acquire,
      9 	atomic_memory_order_release,
     10 	atomic_memory_order_acq_rel,
     11 	atomic_memory_order_seq_cst
     12 } atomic_memory_order_t;
     13 
     14 typedef char atomic_repr_0_t;
     15 typedef short atomic_repr_1_t;
     16 typedef long atomic_repr_2_t;
     17 typedef __int64 atomic_repr_3_t;
     18 
     19 ATOMIC_INLINE void
     20 atomic_fence(atomic_memory_order_t mo) {
     21 	_ReadWriteBarrier();
     22 #  if defined(_M_ARM) || defined(_M_ARM64)
     23 	/* ARM needs a barrier for everything but relaxed. */
     24 	if (mo != atomic_memory_order_relaxed) {
     25 		MemoryBarrier();
     26 	}
     27 #  elif defined(_M_IX86) || defined (_M_X64)
     28 	/* x86 needs a barrier only for seq_cst. */
     29 	if (mo == atomic_memory_order_seq_cst) {
     30 		MemoryBarrier();
     31 	}
     32 #  else
     33 #  error "Don't know how to create atomics for this platform for MSVC."
     34 #  endif
     35 	_ReadWriteBarrier();
     36 }
     37 
     38 #define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t
     39 
     40 #define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b)
     41 #define ATOMIC_RAW_CONCAT(a, b) a ## b
     42 
     43 #define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT(	\
     44     base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
     45 
     46 #define ATOMIC_INTERLOCKED_SUFFIX(lg_size)				\
     47     ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
     48 
     49 #define ATOMIC_INTERLOCKED_SUFFIX_0 8
     50 #define ATOMIC_INTERLOCKED_SUFFIX_1 16
     51 #define ATOMIC_INTERLOCKED_SUFFIX_2
     52 #define ATOMIC_INTERLOCKED_SUFFIX_3 64
     53 
     54 #define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size)		\
     55 typedef struct {							\
     56 	ATOMIC_INTERLOCKED_REPR(lg_size) repr;				\
     57 } atomic_##short_type##_t;						\
     58 									\
     59 ATOMIC_INLINE type							\
     60 atomic_load_##short_type(const atomic_##short_type##_t *a,		\
     61     atomic_memory_order_t mo) {						\
     62 	ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr;			\
     63 	if (mo != atomic_memory_order_relaxed) {			\
     64 		atomic_fence(atomic_memory_order_acquire);		\
     65 	}								\
     66 	return (type) ret;						\
     67 }									\
     68 									\
     69 ATOMIC_INLINE void							\
     70 atomic_store_##short_type(atomic_##short_type##_t *a,			\
     71     type val, atomic_memory_order_t mo) {				\
     72 	if (mo != atomic_memory_order_relaxed) {			\
     73 		atomic_fence(atomic_memory_order_release);		\
     74 	}								\
     75 	a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val;		\
     76 	if (mo == atomic_memory_order_seq_cst) {			\
     77 		atomic_fence(atomic_memory_order_seq_cst);		\
     78 	}								\
     79 }									\
     80 									\
     81 ATOMIC_INLINE type							\
     82 atomic_exchange_##short_type(atomic_##short_type##_t *a, type val,	\
     83     atomic_memory_order_t mo) {						\
     84 	return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange,	\
     85 	    lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val);	\
     86 }									\
     87 									\
     88 ATOMIC_INLINE bool							\
     89 atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a,	\
     90     type *expected, type desired, atomic_memory_order_t success_mo,	\
     91     atomic_memory_order_t failure_mo) {					\
     92 	ATOMIC_INTERLOCKED_REPR(lg_size) e =				\
     93 	    (ATOMIC_INTERLOCKED_REPR(lg_size))*expected;		\
     94 	ATOMIC_INTERLOCKED_REPR(lg_size) d =				\
     95 	    (ATOMIC_INTERLOCKED_REPR(lg_size))desired;			\
     96 	ATOMIC_INTERLOCKED_REPR(lg_size) old =				\
     97 	    ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, 	\
     98 		lg_size)(&a->repr, d, e);				\
     99 	if (old == e) {							\
    100 		return true;						\
    101 	} else {							\
    102 		*expected = (type)old;					\
    103 		return false;						\
    104 	}								\
    105 }									\
    106 									\
    107 ATOMIC_INLINE bool							\
    108 atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a,	\
    109     type *expected, type desired, atomic_memory_order_t success_mo,	\
    110     atomic_memory_order_t failure_mo) {					\
    111 	/* We implement the weak version with strong semantics. */	\
    112 	return atomic_compare_exchange_weak_##short_type(a, expected,	\
    113 	    desired, success_mo, failure_mo);				\
    114 }
    115 
    116 
    117 #define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size)	\
    118 JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size)			\
    119 									\
    120 ATOMIC_INLINE type							\
    121 atomic_fetch_add_##short_type(atomic_##short_type##_t *a,		\
    122     type val, atomic_memory_order_t mo) {				\
    123 	return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd,	\
    124 	    lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val);	\
    125 }									\
    126 									\
    127 ATOMIC_INLINE type							\
    128 atomic_fetch_sub_##short_type(atomic_##short_type##_t *a,		\
    129     type val, atomic_memory_order_t mo) {				\
    130 	/*								\
    131 	 * MSVC warns on negation of unsigned operands, but for us it	\
    132 	 * gives exactly the right semantics (MAX_TYPE + 1 - operand).	\
    133 	 */								\
    134 	__pragma(warning(push))						\
    135 	__pragma(warning(disable: 4146))				\
    136 	return atomic_fetch_add_##short_type(a, -val, mo);		\
    137 	__pragma(warning(pop))						\
    138 }									\
    139 ATOMIC_INLINE type							\
    140 atomic_fetch_and_##short_type(atomic_##short_type##_t *a,		\
    141     type val, atomic_memory_order_t mo) {				\
    142 	return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)(	\
    143 	    &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val);		\
    144 }									\
    145 ATOMIC_INLINE type							\
    146 atomic_fetch_or_##short_type(atomic_##short_type##_t *a,		\
    147     type val, atomic_memory_order_t mo) {				\
    148 	return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)(	\
    149 	    &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val);		\
    150 }									\
    151 ATOMIC_INLINE type							\
    152 atomic_fetch_xor_##short_type(atomic_##short_type##_t *a,		\
    153     type val, atomic_memory_order_t mo) {				\
    154 	return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)(	\
    155 	    &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val);		\
    156 }
    157 
    158 #endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */
    159