Home | History | Annotate | Download | only in linux
      1 #ifndef __LINUX_SEQLOCK_H
      2 #define __LINUX_SEQLOCK_H
      3 /*
      4  * Reader/writer consistent mechanism without starving writers. This type of
      5  * lock for data where the reader wants a consitent set of information
      6  * and is willing to retry if the information changes.  Readers never
      7  * block but they may have to retry if a writer is in
      8  * progress. Writers do not wait for readers.
      9  *
     10  * This is not as cache friendly as brlock. Also, this will not work
     11  * for data that contains pointers, because any writer could
     12  * invalidate a pointer that a reader was following.
     13  *
     14  * Expected reader usage:
     15  * 	do {
     16  *	    seq = read_seqbegin(&foo);
     17  * 	...
     18  *      } while (read_seqretry(&foo, seq));
     19  *
     20  *
     21  * On non-SMP the spin locks disappear but the writer still needs
     22  * to increment the sequence variables because an interrupt routine could
     23  * change the state of the data.
     24  *
     25  * Based on x86_64 vsyscall gettimeofday
     26  * by Keith Owens and Andrea Arcangeli
     27  */
     28 
     29 #include <linux/spinlock.h>
     30 #include <linux/preempt.h>
     31 
     32 typedef struct {
     33 	unsigned sequence;
     34 	spinlock_t lock;
     35 } seqlock_t;
     36 
     37 /*
     38  * These macros triggered gcc-3.x compile-time problems.  We think these are
     39  * OK now.  Be cautious.
     40  */
     41 #define __SEQLOCK_UNLOCKED(lockname) \
     42 		 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
     43 
     44 #define SEQLOCK_UNLOCKED \
     45 		 __SEQLOCK_UNLOCKED(old_style_seqlock_init)
     46 
     47 #define seqlock_init(x) \
     48 		do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0)
     49 
     50 #define DEFINE_SEQLOCK(x) \
     51 		seqlock_t x = __SEQLOCK_UNLOCKED(x)
     52 
     53 /* Lock out other writers and update the count.
     54  * Acts like a normal spin_lock/unlock.
     55  * Don't need preempt_disable() because that is in the spin_lock already.
     56  */
     57 static inline void write_seqlock(seqlock_t *sl)
     58 {
     59 	spin_lock(&sl->lock);
     60 	++sl->sequence;
     61 	smp_wmb();
     62 }
     63 
     64 static inline void write_sequnlock(seqlock_t *sl)
     65 {
     66 	smp_wmb();
     67 	sl->sequence++;
     68 	spin_unlock(&sl->lock);
     69 }
     70 
     71 static inline int write_tryseqlock(seqlock_t *sl)
     72 {
     73 	int ret = spin_trylock(&sl->lock);
     74 
     75 	if (ret) {
     76 		++sl->sequence;
     77 		smp_wmb();
     78 	}
     79 	return ret;
     80 }
     81 
     82 /* Start of read calculation -- fetch last complete writer token */
     83 static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
     84 {
     85 	unsigned ret = sl->sequence;
     86 	smp_rmb();
     87 	return ret;
     88 }
     89 
     90 /* Test if reader processed invalid data.
     91  * If initial values is odd,
     92  *	then writer had already started when section was entered
     93  * If sequence value changed
     94  *	then writer changed data while in section
     95  *
     96  * Using xor saves one conditional branch.
     97  */
     98 static __always_inline int read_seqretry(const seqlock_t *sl, unsigned iv)
     99 {
    100 	smp_rmb();
    101 	return (iv & 1) | (sl->sequence ^ iv);
    102 }
    103 
    104 
    105 /*
    106  * Version using sequence counter only.
    107  * This can be used when code has its own mutex protecting the
    108  * updating starting before the write_seqcountbeqin() and ending
    109  * after the write_seqcount_end().
    110  */
    111 
    112 typedef struct seqcount {
    113 	unsigned sequence;
    114 } seqcount_t;
    115 
    116 #define SEQCNT_ZERO { 0 }
    117 #define seqcount_init(x)	do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
    118 
    119 /* Start of read using pointer to a sequence counter only.  */
    120 static inline unsigned read_seqcount_begin(const seqcount_t *s)
    121 {
    122 	unsigned ret = s->sequence;
    123 	smp_rmb();
    124 	return ret;
    125 }
    126 
    127 /* Test if reader processed invalid data.
    128  * Equivalent to: iv is odd or sequence number has changed.
    129  *                (iv & 1) || (*s != iv)
    130  * Using xor saves one conditional branch.
    131  */
    132 static inline int read_seqcount_retry(const seqcount_t *s, unsigned iv)
    133 {
    134 	smp_rmb();
    135 	return (iv & 1) | (s->sequence ^ iv);
    136 }
    137 
    138 
    139 /*
    140  * Sequence counter only version assumes that callers are using their
    141  * own mutexing.
    142  */
    143 static inline void write_seqcount_begin(seqcount_t *s)
    144 {
    145 	s->sequence++;
    146 	smp_wmb();
    147 }
    148 
    149 static inline void write_seqcount_end(seqcount_t *s)
    150 {
    151 	smp_wmb();
    152 	s->sequence++;
    153 }
    154 
    155 /*
    156  * Possible sw/hw IRQ protected versions of the interfaces.
    157  */
    158 #define write_seqlock_irqsave(lock, flags)				\
    159 	do { local_irq_save(flags); write_seqlock(lock); } while (0)
    160 #define write_seqlock_irq(lock)						\
    161 	do { local_irq_disable();   write_seqlock(lock); } while (0)
    162 #define write_seqlock_bh(lock)						\
    163         do { local_bh_disable();    write_seqlock(lock); } while (0)
    164 
    165 #define write_sequnlock_irqrestore(lock, flags)				\
    166 	do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
    167 #define write_sequnlock_irq(lock)					\
    168 	do { write_sequnlock(lock); local_irq_enable(); } while(0)
    169 #define write_sequnlock_bh(lock)					\
    170 	do { write_sequnlock(lock); local_bh_enable(); } while(0)
    171 
    172 #define read_seqbegin_irqsave(lock, flags)				\
    173 	({ local_irq_save(flags);   read_seqbegin(lock); })
    174 
    175 #define read_seqretry_irqrestore(lock, iv, flags)			\
    176 	({								\
    177 		int ret = read_seqretry(lock, iv);			\
    178 		local_irq_restore(flags);				\
    179 		ret;							\
    180 	})
    181 
    182 #endif /* __LINUX_SEQLOCK_H */
    183