Home | History | Annotate | Download | only in asm-x86
      1 #ifndef __ASM_SMP_H
      2 #define __ASM_SMP_H
      3 
      4 /*
      5  * We need the APIC definitions automatically as part of 'smp.h'
      6  */
      7 #ifndef __ASSEMBLY__
      8 #include <linux/kernel.h>
      9 #include <linux/threads.h>
     10 #include <linux/cpumask.h>
     11 #endif
     12 
     13 #if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__)
     14 #include <linux/bitops.h>
     15 #include <asm/mpspec.h>
     16 #include <asm/apic.h>
     17 #ifdef CONFIG_X86_IO_APIC
     18 #include <asm/io_apic.h>
     19 #endif
     20 #endif
     21 
     22 #define BAD_APICID 0xFFu
     23 #ifdef CONFIG_SMP
     24 #ifndef __ASSEMBLY__
     25 
     26 /*
     27  * Private routines/data
     28  */
     29 
     30 extern void smp_alloc_memory(void);
     31 extern int pic_mode;
     32 extern int smp_num_siblings;
     33 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
     34 DECLARE_PER_CPU(cpumask_t, cpu_core_map);
     35 
     36 extern void (*mtrr_hook) (void);
     37 extern void zap_low_mappings (void);
     38 extern void lock_ipi_call_lock(void);
     39 extern void unlock_ipi_call_lock(void);
     40 
     41 #define MAX_APICID 256
     42 extern u8 __initdata x86_cpu_to_apicid_init[];
     43 extern void *x86_cpu_to_apicid_ptr;
     44 DECLARE_PER_CPU(u8, x86_cpu_to_apicid);
     45 
     46 #define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
     47 
     48 extern void set_cpu_sibling_map(int cpu);
     49 
     50 #ifdef CONFIG_HOTPLUG_CPU
     51 extern void cpu_exit_clear(void);
     52 extern void cpu_uninit(void);
     53 extern void remove_siblinginfo(int cpu);
     54 #endif
     55 
     56 struct smp_ops
     57 {
     58 	void (*smp_prepare_boot_cpu)(void);
     59 	void (*smp_prepare_cpus)(unsigned max_cpus);
     60 	int (*cpu_up)(unsigned cpu);
     61 	void (*smp_cpus_done)(unsigned max_cpus);
     62 
     63 	void (*smp_send_stop)(void);
     64 	void (*smp_send_reschedule)(int cpu);
     65 	int (*smp_call_function_mask)(cpumask_t mask,
     66 				      void (*func)(void *info), void *info,
     67 				      int wait);
     68 };
     69 
     70 extern struct smp_ops smp_ops;
     71 
     72 static inline void smp_prepare_boot_cpu(void)
     73 {
     74 	smp_ops.smp_prepare_boot_cpu();
     75 }
     76 static inline void smp_prepare_cpus(unsigned int max_cpus)
     77 {
     78 	smp_ops.smp_prepare_cpus(max_cpus);
     79 }
     80 static inline int __cpu_up(unsigned int cpu)
     81 {
     82 	return smp_ops.cpu_up(cpu);
     83 }
     84 static inline void smp_cpus_done(unsigned int max_cpus)
     85 {
     86 	smp_ops.smp_cpus_done(max_cpus);
     87 }
     88 
     89 static inline void smp_send_stop(void)
     90 {
     91 	smp_ops.smp_send_stop();
     92 }
     93 static inline void smp_send_reschedule(int cpu)
     94 {
     95 	smp_ops.smp_send_reschedule(cpu);
     96 }
     97 static inline int smp_call_function_mask(cpumask_t mask,
     98 					 void (*func) (void *info), void *info,
     99 					 int wait)
    100 {
    101 	return smp_ops.smp_call_function_mask(mask, func, info, wait);
    102 }
    103 
    104 void native_smp_prepare_boot_cpu(void);
    105 void native_smp_prepare_cpus(unsigned int max_cpus);
    106 int native_cpu_up(unsigned int cpunum);
    107 void native_smp_cpus_done(unsigned int max_cpus);
    108 
    109 #ifndef CONFIG_PARAVIRT
    110 #define startup_ipi_hook(phys_apicid, start_eip, start_esp) 		\
    111 do { } while (0)
    112 #endif
    113 
    114 /*
    115  * This function is needed by all SMP systems. It must _always_ be valid
    116  * from the initial startup. We map APIC_BASE very early in page_setup(),
    117  * so this is correct in the x86 case.
    118  */
    119 DECLARE_PER_CPU(int, cpu_number);
    120 #define raw_smp_processor_id() (x86_read_percpu(cpu_number))
    121 
    122 extern cpumask_t cpu_callout_map;
    123 extern cpumask_t cpu_callin_map;
    124 extern cpumask_t cpu_possible_map;
    125 
    126 /* We don't mark CPUs online until __cpu_up(), so we need another measure */
    127 static inline int num_booting_cpus(void)
    128 {
    129 	return cpus_weight(cpu_callout_map);
    130 }
    131 
    132 extern int safe_smp_processor_id(void);
    133 extern int __cpu_disable(void);
    134 extern void __cpu_die(unsigned int cpu);
    135 extern unsigned int num_processors;
    136 
    137 void __cpuinit smp_store_cpu_info(int id);
    138 
    139 #endif /* !__ASSEMBLY__ */
    140 
    141 #else /* CONFIG_SMP */
    142 
    143 #define safe_smp_processor_id()		0
    144 #define cpu_physical_id(cpu)		boot_cpu_physical_apicid
    145 
    146 #define NO_PROC_ID		0xFF		/* No processor magic marker */
    147 
    148 #endif /* CONFIG_SMP */
    149 
    150 #ifndef __ASSEMBLY__
    151 
    152 #ifdef CONFIG_X86_LOCAL_APIC
    153 
    154 #ifdef APIC_DEFINITION
    155 extern int hard_smp_processor_id(void);
    156 #else
    157 #include <mach_apicdef.h>
    158 static inline int hard_smp_processor_id(void)
    159 {
    160 	/* we don't want to mark this access volatile - bad code generation */
    161 	return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
    162 }
    163 #endif /* APIC_DEFINITION */
    164 
    165 #else /* CONFIG_X86_LOCAL_APIC */
    166 
    167 #ifndef CONFIG_SMP
    168 #define hard_smp_processor_id()		0
    169 #endif
    170 
    171 #endif /* CONFIG_X86_LOCAL_APIC */
    172 
    173 extern u8 apicid_2_node[];
    174 
    175 #ifdef CONFIG_X86_LOCAL_APIC
    176 static __inline int logical_smp_processor_id(void)
    177 {
    178 	/* we don't want to mark this access volatile - bad code generation */
    179 	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
    180 }
    181 #endif
    182 #endif
    183 
    184 #endif
    185