Home | History | Annotate | Download | only in asm-x86
      1 #ifndef __ASM_X86_MSR_H_
      2 #define __ASM_X86_MSR_H_
      3 
      4 #include <asm/msr-index.h>
      5 
      6 #ifndef __ASSEMBLY__
      7 # include <linux/types.h>
      8 #endif
      9 
     10 #ifdef __i386__
     11 
     12 #ifdef __KERNEL__
     13 #ifndef __ASSEMBLY__
     14 
     15 #include <asm/errno.h>
     16 
     17 static inline unsigned long long native_read_msr(unsigned int msr)
     18 {
     19 	unsigned long long val;
     20 
     21 	asm volatile("rdmsr" : "=A" (val) : "c" (msr));
     22 	return val;
     23 }
     24 
     25 static inline unsigned long long native_read_msr_safe(unsigned int msr,
     26 						      int *err)
     27 {
     28 	unsigned long long val;
     29 
     30 	asm volatile("2: rdmsr ; xorl %0,%0\n"
     31 		     "1:\n\t"
     32 		     ".section .fixup,\"ax\"\n\t"
     33 		     "3:  movl %3,%0 ; jmp 1b\n\t"
     34 		     ".previous\n\t"
     35 		     ".section __ex_table,\"a\"\n"
     36 		     "   .align 4\n\t"
     37 		     "   .long	2b,3b\n\t"
     38 		     ".previous"
     39 		     : "=r" (*err), "=A" (val)
     40 		     : "c" (msr), "i" (-EFAULT));
     41 
     42 	return val;
     43 }
     44 
     45 static inline void native_write_msr(unsigned int msr, unsigned long long val)
     46 {
     47 	asm volatile("wrmsr" : : "c" (msr), "A"(val));
     48 }
     49 
     50 static inline int native_write_msr_safe(unsigned int msr,
     51 					unsigned long long val)
     52 {
     53 	int err;
     54 	asm volatile("2: wrmsr ; xorl %0,%0\n"
     55 		     "1:\n\t"
     56 		     ".section .fixup,\"ax\"\n\t"
     57 		     "3:  movl %4,%0 ; jmp 1b\n\t"
     58 		     ".previous\n\t"
     59 		     ".section __ex_table,\"a\"\n"
     60 		     "   .align 4\n\t"
     61 		     "   .long	2b,3b\n\t"
     62 		     ".previous"
     63 		     : "=a" (err)
     64 		     : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
     65 		       "i" (-EFAULT));
     66 	return err;
     67 }
     68 
     69 static inline unsigned long long native_read_tsc(void)
     70 {
     71 	unsigned long long val;
     72 	asm volatile("rdtsc" : "=A" (val));
     73 	return val;
     74 }
     75 
     76 static inline unsigned long long native_read_pmc(void)
     77 {
     78 	unsigned long long val;
     79 	asm volatile("rdpmc" : "=A" (val));
     80 	return val;
     81 }
     82 
     83 #ifdef CONFIG_PARAVIRT
     84 #include <asm/paravirt.h>
     85 #else
     86 #include <linux/errno.h>
     87 /*
     88  * Access to machine-specific registers (available on 586 and better only)
     89  * Note: the rd* operations modify the parameters directly (without using
     90  * pointer indirection), this allows gcc to optimize better
     91  */
     92 
     93 #define rdmsr(msr,val1,val2)						\
     94 	do {								\
     95 		u64 __val = native_read_msr(msr);			\
     96 		(val1) = (u32)__val;					\
     97 		(val2) = (u32)(__val >> 32);				\
     98 	} while(0)
     99 
    100 static inline void wrmsr(u32 __msr, u32 __low, u32 __high)
    101 {
    102 	native_write_msr(__msr, ((u64)__high << 32) | __low);
    103 }
    104 
    105 #define rdmsrl(msr,val)							\
    106 	((val) = native_read_msr(msr))
    107 
    108 #define wrmsrl(msr,val)	native_write_msr(msr, val)
    109 
    110 /* wrmsr with exception handling */
    111 static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)
    112 {
    113 	return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);
    114 }
    115 
    116 /* rdmsr with exception handling */
    117 #define rdmsr_safe(msr,p1,p2)						\
    118 	({								\
    119 		int __err;						\
    120 		u64 __val = native_read_msr_safe(msr, &__err);		\
    121 		(*p1) = (u32)__val;					\
    122 		(*p2) = (u32)(__val >> 32);				\
    123 		__err;							\
    124 	})
    125 
    126 #define rdtscl(low)						\
    127 	((low) = (u32)native_read_tsc())
    128 
    129 #define rdtscll(val)						\
    130 	((val) = native_read_tsc())
    131 
    132 #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
    133 
    134 #define rdpmc(counter,low,high)					\
    135 	do {							\
    136 		u64 _l = native_read_pmc();			\
    137 		(low)  = (u32)_l;				\
    138 		(high) = (u32)(_l >> 32);			\
    139 	} while(0)
    140 #endif	/* !CONFIG_PARAVIRT */
    141 
    142 #ifdef CONFIG_SMP
    143 void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
    144 void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
    145 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
    146 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
    147 #else  /*  CONFIG_SMP  */
    148 static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
    149 {
    150 	rdmsr(msr_no, *l, *h);
    151 }
    152 static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
    153 {
    154 	wrmsr(msr_no, l, h);
    155 }
    156 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
    157 {
    158 	return rdmsr_safe(msr_no, l, h);
    159 }
    160 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
    161 {
    162 	return wrmsr_safe(msr_no, l, h);
    163 }
    164 #endif  /*  CONFIG_SMP  */
    165 #endif  /* ! __ASSEMBLY__ */
    166 #endif  /* __KERNEL__ */
    167 
    168 #else   /* __i386__ */
    169 
    170 #ifndef __ASSEMBLY__
    171 #include <linux/errno.h>
    172 /*
    173  * Access to machine-specific registers (available on 586 and better only)
    174  * Note: the rd* operations modify the parameters directly (without using
    175  * pointer indirection), this allows gcc to optimize better
    176  */
    177 
    178 #define rdmsr(msr,val1,val2) \
    179        __asm__ __volatile__("rdmsr" \
    180 			    : "=a" (val1), "=d" (val2) \
    181 			    : "c" (msr))
    182 
    183 
    184 #define rdmsrl(msr,val) do { unsigned long a__,b__; \
    185        __asm__ __volatile__("rdmsr" \
    186 			    : "=a" (a__), "=d" (b__) \
    187 			    : "c" (msr)); \
    188        val = a__ | (b__<<32); \
    189 } while(0)
    190 
    191 #define wrmsr(msr,val1,val2) \
    192      __asm__ __volatile__("wrmsr" \
    193 			  : /* no outputs */ \
    194 			  : "c" (msr), "a" (val1), "d" (val2))
    195 
    196 #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
    197 
    198 #define rdtsc(low,high) \
    199      __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
    200 
    201 #define rdtscl(low) \
    202      __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
    203 
    204 #define rdtscp(low,high,aux) \
    205      __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
    206 
    207 #define rdtscll(val) do { \
    208      unsigned int __a,__d; \
    209      __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \
    210      (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
    211 } while(0)
    212 
    213 #define rdtscpll(val, aux) do { \
    214      unsigned long __a, __d; \
    215      __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
    216      (val) = (__d << 32) | __a; \
    217 } while (0)
    218 
    219 #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
    220 
    221 #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
    222 
    223 #define rdpmc(counter,low,high) \
    224      __asm__ __volatile__("rdpmc" \
    225 			  : "=a" (low), "=d" (high) \
    226 			  : "c" (counter))
    227 
    228 
    229 static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
    230 			 unsigned int *ecx, unsigned int *edx)
    231 {
    232 	__asm__("cpuid"
    233 		: "=a" (*eax),
    234 		  "=b" (*ebx),
    235 		  "=c" (*ecx),
    236 		  "=d" (*edx)
    237 		: "0" (op));
    238 }
    239 
    240 /* Some CPUID calls want 'count' to be placed in ecx */
    241 static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
    242 			       int *edx)
    243 {
    244 	__asm__("cpuid"
    245 		: "=a" (*eax),
    246 		  "=b" (*ebx),
    247 		  "=c" (*ecx),
    248 		  "=d" (*edx)
    249 		: "0" (op), "c" (count));
    250 }
    251 
    252 /*
    253  * CPUID functions returning a single datum
    254  */
    255 static inline unsigned int cpuid_eax(unsigned int op)
    256 {
    257 	unsigned int eax;
    258 
    259 	__asm__("cpuid"
    260 		: "=a" (eax)
    261 		: "0" (op)
    262 		: "bx", "cx", "dx");
    263 	return eax;
    264 }
    265 static inline unsigned int cpuid_ebx(unsigned int op)
    266 {
    267 	unsigned int eax, ebx;
    268 
    269 	__asm__("cpuid"
    270 		: "=a" (eax), "=b" (ebx)
    271 		: "0" (op)
    272 		: "cx", "dx" );
    273 	return ebx;
    274 }
    275 static inline unsigned int cpuid_ecx(unsigned int op)
    276 {
    277 	unsigned int eax, ecx;
    278 
    279 	__asm__("cpuid"
    280 		: "=a" (eax), "=c" (ecx)
    281 		: "0" (op)
    282 		: "bx", "dx" );
    283 	return ecx;
    284 }
    285 static inline unsigned int cpuid_edx(unsigned int op)
    286 {
    287 	unsigned int eax, edx;
    288 
    289 	__asm__("cpuid"
    290 		: "=a" (eax), "=d" (edx)
    291 		: "0" (op)
    292 		: "bx", "cx");
    293 	return edx;
    294 }
    295 
    296 #ifdef __KERNEL__
    297 
    298 /* wrmsr with exception handling */
    299 #define wrmsr_safe(msr,a,b) ({ int ret__;			\
    300 	asm volatile("2: wrmsr ; xorl %0,%0\n"			\
    301 		     "1:\n\t"					\
    302 		     ".section .fixup,\"ax\"\n\t"		\
    303 		     "3:  movl %4,%0 ; jmp 1b\n\t"		\
    304 		     ".previous\n\t"				\
    305 		     ".section __ex_table,\"a\"\n"		\
    306 		     "   .align 8\n\t"				\
    307 		     "   .quad	2b,3b\n\t"			\
    308 		     ".previous"				\
    309 		     : "=a" (ret__)				\
    310 		     : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
    311 	ret__; })
    312 
    313 #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
    314 
    315 #define rdmsr_safe(msr,a,b) \
    316 	({ int ret__;						\
    317 	  asm volatile ("1:       rdmsr\n"			\
    318 			"2:\n"					\
    319 			".section .fixup,\"ax\"\n"		\
    320 			"3:       movl %4,%0\n"			\
    321 			" jmp 2b\n"				\
    322 			".previous\n"				\
    323 			".section __ex_table,\"a\"\n"		\
    324 			" .align 8\n"				\
    325 			" .quad 1b,3b\n"				\
    326 			".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \
    327 			:"c"(msr), "i"(-EIO), "0"(0));			\
    328 	  ret__; })
    329 
    330 #ifdef CONFIG_SMP
    331 void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
    332 void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
    333 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
    334 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
    335 #else  /*  CONFIG_SMP  */
    336 static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
    337 {
    338 	rdmsr(msr_no, *l, *h);
    339 }
    340 static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
    341 {
    342 	wrmsr(msr_no, l, h);
    343 }
    344 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
    345 {
    346 	return rdmsr_safe(msr_no, l, h);
    347 }
    348 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
    349 {
    350 	return wrmsr_safe(msr_no, l, h);
    351 }
    352 #endif  /* CONFIG_SMP */
    353 #endif  /* __KERNEL__ */
    354 #endif  /* __ASSEMBLY__ */
    355 
    356 #endif  /* !__i386__ */
    357 
    358 #endif
    359