Home | History | Annotate | Download | only in asm-x86
      1 #ifndef X86_64_PDA_H
      2 #define X86_64_PDA_H
      3 
      4 #ifndef __ASSEMBLY__
      5 #include <linux/stddef.h>
      6 #include <linux/types.h>
      7 #include <linux/cache.h>
      8 #include <asm/page.h>
      9 
     10 /* Per processor datastructure. %gs points to it while the kernel runs */
     11 struct x8664_pda {
     12 	struct task_struct *pcurrent;	/* 0  Current process */
     13 	unsigned long data_offset;	/* 8 Per cpu data offset from linker
     14 					   address */
     15 	unsigned long kernelstack;  /* 16 top of kernel stack for current */
     16 	unsigned long oldrsp; 	    /* 24 user rsp for system call */
     17         int irqcount;		    /* 32 Irq nesting counter. Starts with -1 */
     18 	int cpunumber;		    /* 36 Logical CPU number */
     19 #ifdef CONFIG_CC_STACKPROTECTOR
     20 	unsigned long stack_canary;	/* 40 stack canary value */
     21 					/* gcc-ABI: this canary MUST be at
     22 					   offset 40!!! */
     23 #endif
     24 	char *irqstackptr;
     25 	int nodenumber;		    /* number of current node */
     26 	unsigned int __softirq_pending;
     27 	unsigned int __nmi_count;	/* number of NMI on this CPUs */
     28 	short mmu_state;
     29 	short isidle;
     30 	struct mm_struct *active_mm;
     31 	unsigned apic_timer_irqs;
     32 	unsigned irq0_irqs;
     33 	unsigned irq_resched_count;
     34 	unsigned irq_call_count;
     35 	unsigned irq_tlb_count;
     36 	unsigned irq_thermal_count;
     37 	unsigned irq_threshold_count;
     38 	unsigned irq_spurious_count;
     39 } ____cacheline_aligned_in_smp;
     40 
     41 extern struct x8664_pda *_cpu_pda[];
     42 extern struct x8664_pda boot_cpu_pda[];
     43 
     44 #define cpu_pda(i) (_cpu_pda[i])
     45 
     46 /*
     47  * There is no fast way to get the base address of the PDA, all the accesses
     48  * have to mention %fs/%gs.  So it needs to be done this Torvaldian way.
     49  */
     50 extern void __bad_pda_field(void) __attribute__((noreturn));
     51 
     52 /*
     53  * proxy_pda doesn't actually exist, but tell gcc it is accessed for
     54  * all PDA accesses so it gets read/write dependencies right.
     55  */
     56 extern struct x8664_pda _proxy_pda;
     57 
     58 #define pda_offset(field) offsetof(struct x8664_pda, field)
     59 
     60 #define pda_to_op(op,field,val) do {		\
     61 	typedef typeof(_proxy_pda.field) T__;	\
     62 	if (0) { T__ tmp__; tmp__ = (val); }	/* type checking */ \
     63 	switch (sizeof(_proxy_pda.field)) {	\
     64 	case 2:					\
     65 		asm(op "w %1,%%gs:%c2" : 	\
     66 		    "+m" (_proxy_pda.field) :	\
     67 		    "ri" ((T__)val),		\
     68 		    "i"(pda_offset(field))); 	\
     69  		break;				\
     70 	case 4:					\
     71 		asm(op "l %1,%%gs:%c2" : 	\
     72 		    "+m" (_proxy_pda.field) :	\
     73 		    "ri" ((T__)val),		\
     74 		    "i" (pda_offset(field))); 	\
     75 		break;				\
     76 	case 8:					\
     77 		asm(op "q %1,%%gs:%c2": 	\
     78 		    "+m" (_proxy_pda.field) :	\
     79 		    "ri" ((T__)val),		\
     80 		    "i"(pda_offset(field))); 	\
     81 		break;				\
     82        default: 				\
     83 		__bad_pda_field();		\
     84        }					\
     85        } while (0)
     86 
     87 #define pda_from_op(op,field) ({		\
     88 	typeof(_proxy_pda.field) ret__;		\
     89 	switch (sizeof(_proxy_pda.field)) {	\
     90        	case 2:					\
     91 		asm(op "w %%gs:%c1,%0" : 	\
     92 		    "=r" (ret__) :		\
     93 		    "i" (pda_offset(field)), 	\
     94 		    "m" (_proxy_pda.field)); 	\
     95 		 break;				\
     96 	case 4:					\
     97 		asm(op "l %%gs:%c1,%0":		\
     98 		    "=r" (ret__):		\
     99 		    "i" (pda_offset(field)), 	\
    100 		    "m" (_proxy_pda.field)); 	\
    101 		 break;				\
    102        case 8:					\
    103 		asm(op "q %%gs:%c1,%0":		\
    104 		    "=r" (ret__) :		\
    105 		    "i" (pda_offset(field)), 	\
    106 		    "m" (_proxy_pda.field)); 	\
    107 		 break;				\
    108        default: 				\
    109 		__bad_pda_field();		\
    110        }					\
    111        ret__; })
    112 
    113 #define read_pda(field) pda_from_op("mov",field)
    114 #define write_pda(field,val) pda_to_op("mov",field,val)
    115 #define add_pda(field,val) pda_to_op("add",field,val)
    116 #define sub_pda(field,val) pda_to_op("sub",field,val)
    117 #define or_pda(field,val) pda_to_op("or",field,val)
    118 
    119 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
    120 #define test_and_clear_bit_pda(bit,field) ({		\
    121 	int old__;						\
    122 	asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0"		\
    123 	    : "=r" (old__), "+m" (_proxy_pda.field) 		\
    124 	    : "dIr" (bit), "i" (pda_offset(field)) : "memory");	\
    125 	old__;							\
    126 })
    127 
    128 #endif
    129 
    130 #define PDA_STACKOFFSET (5*8)
    131 
    132 #endif
    133