Home | History | Annotate | Download | only in linux
      1 #ifndef _LINUX_JIFFIES_H
      2 #define _LINUX_JIFFIES_H
      3 
      4 #include <linux/calc64.h>
      5 #include <linux/kernel.h>
      6 #include <linux/types.h>
      7 #include <linux/time.h>
      8 #include <linux/timex.h>
      9 #include <asm/param.h>			/* for HZ */
     10 
     11 /*
     12  * The following defines establish the engineering parameters of the PLL
     13  * model. The HZ variable establishes the timer interrupt frequency, 100 Hz
     14  * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the
     15  * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the
     16  * nearest power of two in order to avoid hardware multiply operations.
     17  */
     18 #if HZ >= 12 && HZ < 24
     19 # define SHIFT_HZ	4
     20 #elif HZ >= 24 && HZ < 48
     21 # define SHIFT_HZ	5
     22 #elif HZ >= 48 && HZ < 96
     23 # define SHIFT_HZ	6
     24 #elif HZ >= 96 && HZ < 192
     25 # define SHIFT_HZ	7
     26 #elif HZ >= 192 && HZ < 384
     27 # define SHIFT_HZ	8
     28 #elif HZ >= 384 && HZ < 768
     29 # define SHIFT_HZ	9
     30 #elif HZ >= 768 && HZ < 1536
     31 # define SHIFT_HZ	10
     32 #else
     33 # error You lose.
     34 #endif
     35 
     36 /* LATCH is used in the interval timer and ftape setup. */
     37 #define LATCH  ((CLOCK_TICK_RATE + HZ/2) / HZ)	/* For divider */
     38 
     39 #define LATCH_HPET ((HPET_TICK_RATE + HZ/2) / HZ)
     40 
     41 /* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can
     42  * improve accuracy by shifting LSH bits, hence calculating:
     43  *     (NOM << LSH) / DEN
     44  * This however means trouble for large NOM, because (NOM << LSH) may no
     45  * longer fit in 32 bits. The following way of calculating this gives us
     46  * some slack, under the following conditions:
     47  *   - (NOM / DEN) fits in (32 - LSH) bits.
     48  *   - (NOM % DEN) fits in (32 - LSH) bits.
     49  */
     50 #define SH_DIV(NOM,DEN,LSH) (   (((NOM) / (DEN)) << (LSH))              \
     51                              + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
     52 
     53 /* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
     54 #define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
     55 
     56 #define ACTHZ_HPET (SH_DIV (HPET_TICK_RATE, LATCH_HPET, 8))
     57 
     58 /* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
     59 #define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
     60 
     61 #define TICK_NSEC_HPET (SH_DIV(1000000UL * 1000, ACTHZ_HPET, 8))
     62 
     63 /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
     64 #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
     65 
     66 /* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and	*/
     67 /* a value TUSEC for TICK_USEC (can be set bij adjtimex)		*/
     68 #define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
     69 
     70 /* some arch's have a small-data section that can be accessed register-relative
     71  * but that can only take up to, say, 4-byte variables. jiffies being part of
     72  * an 8-byte variable may not be correctly accessed unless we force the issue
     73  */
     74 #define __jiffy_data  __attribute__((section(".data")))
     75 
     76 /*
     77  * The 64-bit value is not volatile - you MUST NOT read it
     78  * without sampling the sequence number in xtime_lock.
     79  * get_jiffies_64() will do this for you as appropriate.
     80  */
     81 extern u64 __jiffy_data jiffies_64;
     82 extern unsigned long volatile __jiffy_data jiffies;
     83 
     84 #if (BITS_PER_LONG < 64)
     85 u64 get_jiffies_64(void);
     86 #else
     87 static inline u64 get_jiffies_64(void)
     88 {
     89 	return (u64)jiffies;
     90 }
     91 #endif
     92 
     93 /*
     94  *	These inlines deal with timer wrapping correctly. You are
     95  *	strongly encouraged to use them
     96  *	1. Because people otherwise forget
     97  *	2. Because if the timer wrap changes in future you won't have to
     98  *	   alter your driver code.
     99  *
    100  * time_after(a,b) returns true if the time a is after time b.
    101  *
    102  * Do this with "<0" and ">=0" to only test the sign of the result. A
    103  * good compiler would generate better code (and a really good compiler
    104  * wouldn't care). Gcc is currently neither.
    105  */
    106 #define time_after(a,b)		\
    107 	(typecheck(unsigned long, a) && \
    108 	 typecheck(unsigned long, b) && \
    109 	 ((long)(b) - (long)(a) < 0))
    110 #define time_before(a,b)	time_after(b,a)
    111 
    112 #define time_after_eq(a,b)	\
    113 	(typecheck(unsigned long, a) && \
    114 	 typecheck(unsigned long, b) && \
    115 	 ((long)(a) - (long)(b) >= 0))
    116 #define time_before_eq(a,b)	time_after_eq(b,a)
    117 
    118 /*
    119  * Have the 32 bit jiffies value wrap 5 minutes after boot
    120  * so jiffies wrap bugs show up earlier.
    121  */
    122 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
    123 
    124 /*
    125  * Change timeval to jiffies, trying to avoid the
    126  * most obvious overflows..
    127  *
    128  * And some not so obvious.
    129  *
    130  * Note that we don't want to return MAX_LONG, because
    131  * for various timeout reasons we often end up having
    132  * to wait "jiffies+1" in order to guarantee that we wait
    133  * at _least_ "jiffies" - so "jiffies+1" had better still
    134  * be positive.
    135  */
    136 #define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
    137 
    138 /*
    139  * We want to do realistic conversions of time so we need to use the same
    140  * values the update wall clock code uses as the jiffies size.  This value
    141  * is: TICK_NSEC (which is defined in timex.h).  This
    142  * is a constant and is in nanoseconds.  We will used scaled math
    143  * with a set of scales defined here as SEC_JIFFIE_SC,  USEC_JIFFIE_SC and
    144  * NSEC_JIFFIE_SC.  Note that these defines contain nothing but
    145  * constants and so are computed at compile time.  SHIFT_HZ (computed in
    146  * timex.h) adjusts the scaling for different HZ values.
    147 
    148  * Scaled math???  What is that?
    149  *
    150  * Scaled math is a way to do integer math on values that would,
    151  * otherwise, either overflow, underflow, or cause undesired div
    152  * instructions to appear in the execution path.  In short, we "scale"
    153  * up the operands so they take more bits (more precision, less
    154  * underflow), do the desired operation and then "scale" the result back
    155  * by the same amount.  If we do the scaling by shifting we avoid the
    156  * costly mpy and the dastardly div instructions.
    157 
    158  * Suppose, for example, we want to convert from seconds to jiffies
    159  * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE.  The
    160  * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We
    161  * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we
    162  * might calculate at compile time, however, the result will only have
    163  * about 3-4 bits of precision (less for smaller values of HZ).
    164  *
    165  * So, we scale as follows:
    166  * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE);
    167  * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE;
    168  * Then we make SCALE a power of two so:
    169  * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE;
    170  * Now we define:
    171  * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE))
    172  * jiff = (sec * SEC_CONV) >> SCALE;
    173  *
    174  * Often the math we use will expand beyond 32-bits so we tell C how to
    175  * do this and pass the 64-bit result of the mpy through the ">> SCALE"
    176  * which should take the result back to 32-bits.  We want this expansion
    177  * to capture as much precision as possible.  At the same time we don't
    178  * want to overflow so we pick the SCALE to avoid this.  In this file,
    179  * that means using a different scale for each range of HZ values (as
    180  * defined in timex.h).
    181  *
    182  * For those who want to know, gcc will give a 64-bit result from a "*"
    183  * operator if the result is a long long AND at least one of the
    184  * operands is cast to long long (usually just prior to the "*" so as
    185  * not to confuse it into thinking it really has a 64-bit operand,
    186  * which, buy the way, it can do, but it take more code and at least 2
    187  * mpys).
    188 
    189  * We also need to be aware that one second in nanoseconds is only a
    190  * couple of bits away from overflowing a 32-bit word, so we MUST use
    191  * 64-bits to get the full range time in nanoseconds.
    192 
    193  */
    194 
    195 /*
    196  * Here are the scales we will use.  One for seconds, nanoseconds and
    197  * microseconds.
    198  *
    199  * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and
    200  * check if the sign bit is set.  If not, we bump the shift count by 1.
    201  * (Gets an extra bit of precision where we can use it.)
    202  * We know it is set for HZ = 1024 and HZ = 100 not for 1000.
    203  * Haven't tested others.
    204 
    205  * Limits of cpp (for #if expressions) only long (no long long), but
    206  * then we only need the most signicant bit.
    207  */
    208 
    209 #define SEC_JIFFIE_SC (31 - SHIFT_HZ)
    210 #if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000)
    211 #undef SEC_JIFFIE_SC
    212 #define SEC_JIFFIE_SC (32 - SHIFT_HZ)
    213 #endif
    214 #define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
    215 #define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
    216 #define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
    217                                 TICK_NSEC -1) / (u64)TICK_NSEC))
    218 
    219 #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
    220                                         TICK_NSEC -1) / (u64)TICK_NSEC))
    221 #define USEC_CONVERSION  \
    222                     ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
    223                                         TICK_NSEC -1) / (u64)TICK_NSEC))
    224 /*
    225  * USEC_ROUND is used in the timeval to jiffie conversion.  See there
    226  * for more details.  It is the scaled resolution rounding value.  Note
    227  * that it is a 64-bit value.  Since, when it is applied, we are already
    228  * in jiffies (albit scaled), it is nothing but the bits we will shift
    229  * off.
    230  */
    231 #define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
    232 /*
    233  * The maximum jiffie value is (MAX_INT >> 1).  Here we translate that
    234  * into seconds.  The 64-bit case will overflow if we are not careful,
    235  * so use the messy SH_DIV macro to do it.  Still all constants.
    236  */
    237 #if BITS_PER_LONG < 64
    238 # define MAX_SEC_IN_JIFFIES \
    239 	(long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
    240 #else	/* take care of overflow on 64 bits machines */
    241 # define MAX_SEC_IN_JIFFIES \
    242 	(SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
    243 
    244 #endif
    245 
    246 /*
    247  * Convert jiffies to milliseconds and back.
    248  *
    249  * Avoid unnecessary multiplications/divisions in the
    250  * two most common HZ cases:
    251  */
    252 static inline unsigned int jiffies_to_msecs(const unsigned long j)
    253 {
    254 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
    255 	return (MSEC_PER_SEC / HZ) * j;
    256 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
    257 	return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
    258 #else
    259 	return (j * MSEC_PER_SEC) / HZ;
    260 #endif
    261 }
    262 
    263 static inline unsigned int jiffies_to_usecs(const unsigned long j)
    264 {
    265 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
    266 	return (USEC_PER_SEC / HZ) * j;
    267 #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
    268 	return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
    269 #else
    270 	return (j * USEC_PER_SEC) / HZ;
    271 #endif
    272 }
    273 
    274 static inline unsigned long msecs_to_jiffies(const unsigned int m)
    275 {
    276 	if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
    277 		return MAX_JIFFY_OFFSET;
    278 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
    279 	return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
    280 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
    281 	return m * (HZ / MSEC_PER_SEC);
    282 #else
    283 	return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
    284 #endif
    285 }
    286 
    287 static inline unsigned long usecs_to_jiffies(const unsigned int u)
    288 {
    289 	if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
    290 		return MAX_JIFFY_OFFSET;
    291 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
    292 	return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
    293 #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
    294 	return u * (HZ / USEC_PER_SEC);
    295 #else
    296 	return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC;
    297 #endif
    298 }
    299 
    300 /*
    301  * The TICK_NSEC - 1 rounds up the value to the next resolution.  Note
    302  * that a remainder subtract here would not do the right thing as the
    303  * resolution values don't fall on second boundries.  I.e. the line:
    304  * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
    305  *
    306  * Rather, we just shift the bits off the right.
    307  *
    308  * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
    309  * value to a scaled second value.
    310  */
    311 static __inline__ unsigned long
    312 timespec_to_jiffies(const struct timespec *value)
    313 {
    314 	unsigned long sec = value->tv_sec;
    315 	long nsec = value->tv_nsec + TICK_NSEC - 1;
    316 
    317 	if (sec >= MAX_SEC_IN_JIFFIES){
    318 		sec = MAX_SEC_IN_JIFFIES;
    319 		nsec = 0;
    320 	}
    321 	return (((u64)sec * SEC_CONVERSION) +
    322 		(((u64)nsec * NSEC_CONVERSION) >>
    323 		 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
    324 
    325 }
    326 
    327 static __inline__ void
    328 jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
    329 {
    330 	/*
    331 	 * Convert jiffies to nanoseconds and separate with
    332 	 * one divide.
    333 	 */
    334 	u64 nsec = (u64)jiffies * TICK_NSEC;
    335 	value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
    336 }
    337 
    338 /* Same for "timeval"
    339  *
    340  * Well, almost.  The problem here is that the real system resolution is
    341  * in nanoseconds and the value being converted is in micro seconds.
    342  * Also for some machines (those that use HZ = 1024, in-particular),
    343  * there is a LARGE error in the tick size in microseconds.
    344 
    345  * The solution we use is to do the rounding AFTER we convert the
    346  * microsecond part.  Thus the USEC_ROUND, the bits to be shifted off.
    347  * Instruction wise, this should cost only an additional add with carry
    348  * instruction above the way it was done above.
    349  */
    350 static __inline__ unsigned long
    351 timeval_to_jiffies(const struct timeval *value)
    352 {
    353 	unsigned long sec = value->tv_sec;
    354 	long usec = value->tv_usec;
    355 
    356 	if (sec >= MAX_SEC_IN_JIFFIES){
    357 		sec = MAX_SEC_IN_JIFFIES;
    358 		usec = 0;
    359 	}
    360 	return (((u64)sec * SEC_CONVERSION) +
    361 		(((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
    362 		 (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
    363 }
    364 
    365 static __inline__ void
    366 jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
    367 {
    368 	/*
    369 	 * Convert jiffies to nanoseconds and separate with
    370 	 * one divide.
    371 	 */
    372 	u64 nsec = (u64)jiffies * TICK_NSEC;
    373 	long tv_usec;
    374 
    375 	value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec);
    376 	tv_usec /= NSEC_PER_USEC;
    377 	value->tv_usec = tv_usec;
    378 }
    379 
    380 /*
    381  * Convert jiffies/jiffies_64 to clock_t and back.
    382  */
    383 static inline clock_t jiffies_to_clock_t(long x)
    384 {
    385 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
    386 	return x / (HZ / USER_HZ);
    387 #else
    388 	u64 tmp = (u64)x * TICK_NSEC;
    389 	do_div(tmp, (NSEC_PER_SEC / USER_HZ));
    390 	return (long)tmp;
    391 #endif
    392 }
    393 
    394 static inline unsigned long clock_t_to_jiffies(unsigned long x)
    395 {
    396 #if (HZ % USER_HZ)==0
    397 	if (x >= ~0UL / (HZ / USER_HZ))
    398 		return ~0UL;
    399 	return x * (HZ / USER_HZ);
    400 #else
    401 	u64 jif;
    402 
    403 	/* Don't worry about loss of precision here .. */
    404 	if (x >= ~0UL / HZ * USER_HZ)
    405 		return ~0UL;
    406 
    407 	/* .. but do try to contain it here */
    408 	jif = x * (u64) HZ;
    409 	do_div(jif, USER_HZ);
    410 	return jif;
    411 #endif
    412 }
    413 
    414 static inline u64 jiffies_64_to_clock_t(u64 x)
    415 {
    416 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
    417 	do_div(x, HZ / USER_HZ);
    418 #else
    419 	/*
    420 	 * There are better ways that don't overflow early,
    421 	 * but even this doesn't overflow in hundreds of years
    422 	 * in 64 bits, so..
    423 	 */
    424 	x *= TICK_NSEC;
    425 	do_div(x, (NSEC_PER_SEC / USER_HZ));
    426 #endif
    427 	return x;
    428 }
    429 
    430 static inline u64 nsec_to_clock_t(u64 x)
    431 {
    432 #if (NSEC_PER_SEC % USER_HZ) == 0
    433 	do_div(x, (NSEC_PER_SEC / USER_HZ));
    434 #elif (USER_HZ % 512) == 0
    435 	x *= USER_HZ/512;
    436 	do_div(x, (NSEC_PER_SEC / 512));
    437 #else
    438 	/*
    439          * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
    440          * overflow after 64.99 years.
    441          * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
    442          */
    443 	x *= 9;
    444 	do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2))
    445 	                          / USER_HZ));
    446 #endif
    447 	return x;
    448 }
    449 
    450 #endif
    451