Home | History | Annotate | Download | only in asm-mips
      1 /*
      2  * This file is subject to the terms and conditions of the GNU General Public
      3  * License.  See the file "COPYING" in the main directory of this archive
      4  * for more details.
      5  *
      6  * Copyright (C) 1994 by Waldorf Electronics
      7  * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
      8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
      9  * Copyright (C) 2007  Maciej W. Rozycki
     10  */
     11 #ifndef _ASM_DELAY_H
     12 #define _ASM_DELAY_H
     13 
     14 #include <linux/param.h>
     15 #include <linux/smp.h>
     16 
     17 #include <asm/compiler.h>
     18 #include <asm/war.h>
     19 
     20 static inline void __delay(unsigned long loops)
     21 {
     22 	if (sizeof(long) == 4)
     23 		__asm__ __volatile__ (
     24 		"	.set	noreorder				\n"
     25 		"	.align	3					\n"
     26 		"1:	bnez	%0, 1b					\n"
     27 		"	subu	%0, 1					\n"
     28 		"	.set	reorder					\n"
     29 		: "=r" (loops)
     30 		: "0" (loops));
     31 	else if (sizeof(long) == 8 && !DADDI_WAR)
     32 		__asm__ __volatile__ (
     33 		"	.set	noreorder				\n"
     34 		"	.align	3					\n"
     35 		"1:	bnez	%0, 1b					\n"
     36 		"	dsubu	%0, 1					\n"
     37 		"	.set	reorder					\n"
     38 		: "=r" (loops)
     39 		: "0" (loops));
     40 	else if (sizeof(long) == 8 && DADDI_WAR)
     41 		__asm__ __volatile__ (
     42 		"	.set	noreorder				\n"
     43 		"	.align	3					\n"
     44 		"1:	bnez	%0, 1b					\n"
     45 		"	dsubu	%0, %2					\n"
     46 		"	.set	reorder					\n"
     47 		: "=r" (loops)
     48 		: "0" (loops), "r" (1));
     49 }
     50 
     51 
     52 /*
     53  * Division by multiplication: you don't have to worry about
     54  * loss of precision.
     55  *
     56  * Use only for very small delays ( < 1 msec).  Should probably use a
     57  * lookup table, really, as the multiplications take much too long with
     58  * short delays.  This is a "reasonable" implementation, though (and the
     59  * first constant multiplications gets optimized away if the delay is
     60  * a constant)
     61  */
     62 
     63 static inline void __udelay(unsigned long usecs, unsigned long lpj)
     64 {
     65 	unsigned long hi, lo;
     66 
     67 	/*
     68 	 * The rates of 128 is rounded wrongly by the catchall case
     69 	 * for 64-bit.  Excessive precission?  Probably ...
     70 	 */
     71 #if defined(CONFIG_64BIT) && (HZ == 128)
     72 	usecs *= 0x0008637bd05af6c7UL;		/* 2**64 / (1000000 / HZ) */
     73 #elif defined(CONFIG_64BIT)
     74 	usecs *= (0x8000000000000000UL / (500000 / HZ));
     75 #else /* 32-bit junk follows here */
     76 	usecs *= (unsigned long) (((0x8000000000000000ULL / (500000 / HZ)) +
     77 	                           0x80000000ULL) >> 32);
     78 #endif
     79 
     80 	if (sizeof(long) == 4)
     81 		__asm__("multu\t%2, %3"
     82 		: "=h" (usecs), "=l" (lo)
     83 		: "r" (usecs), "r" (lpj)
     84 		: GCC_REG_ACCUM);
     85 	else if (sizeof(long) == 8 && !R4000_WAR)
     86 		__asm__("dmultu\t%2, %3"
     87 		: "=h" (usecs), "=l" (lo)
     88 		: "r" (usecs), "r" (lpj)
     89 		: GCC_REG_ACCUM);
     90 	else if (sizeof(long) == 8 && R4000_WAR)
     91 		__asm__("dmultu\t%3, %4\n\tmfhi\t%0"
     92 		: "=r" (usecs), "=h" (hi), "=l" (lo)
     93 		: "r" (usecs), "r" (lpj)
     94 		: GCC_REG_ACCUM);
     95 
     96 	__delay(usecs);
     97 }
     98 
     99 #define __udelay_val cpu_data[raw_smp_processor_id()].udelay_val
    100 
    101 #define udelay(usecs) __udelay((usecs), __udelay_val)
    102 
    103 /* make sure "usecs *= ..." in udelay do not overflow. */
    104 #if HZ >= 1000
    105 #define MAX_UDELAY_MS	1
    106 #elif HZ <= 200
    107 #define MAX_UDELAY_MS	5
    108 #else
    109 #define MAX_UDELAY_MS	(1000 / HZ)
    110 #endif
    111 
    112 #endif /* _ASM_DELAY_H */
    113