1 #include <linux/cpumask.h> 2 #include <linux/interrupt.h> 3 4 #include <linux/mm.h> 5 #include <linux/delay.h> 6 #include <linux/spinlock.h> 7 #include <linux/kernel_stat.h> 8 #include <linux/mc146818rtc.h> 9 #include <linux/cache.h> 10 #include <linux/cpu.h> 11 #include <linux/module.h> 12 13 #include <asm/smp.h> 14 #include <asm/mtrr.h> 15 #include <asm/tlbflush.h> 16 #include <asm/mmu_context.h> 17 #include <asm/apic.h> 18 #include <asm/proto.h> 19 #include <asm/ipi.h> 20 21 void default_send_IPI_single_phys(int cpu, int vector) 22 { 23 unsigned long flags; 24 25 local_irq_save(flags); 26 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu), 27 vector, APIC_DEST_PHYSICAL); 28 local_irq_restore(flags); 29 } 30 31 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) 32 { 33 unsigned long query_cpu; 34 unsigned long flags; 35 36 /* 37 * Hack. The clustered APIC addressing mode doesn't allow us to send 38 * to an arbitrary mask, so I do a unicast to each CPU instead. 39 * - mbligh 40 */ 41 local_irq_save(flags); 42 for_each_cpu(query_cpu, mask) { 43 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, 44 query_cpu), vector, APIC_DEST_PHYSICAL); 45 } 46 local_irq_restore(flags); 47 } 48 49 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, 50 int vector) 51 { 52 unsigned int this_cpu = smp_processor_id(); 53 unsigned int query_cpu; 54 unsigned long flags; 55 56 /* See Hack comment above */ 57 58 local_irq_save(flags); 59 for_each_cpu(query_cpu, mask) { 60 if (query_cpu == this_cpu) 61 continue; 62 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, 63 query_cpu), vector, APIC_DEST_PHYSICAL); 64 } 65 local_irq_restore(flags); 66 } 67 68 /* 69 * Helper function for APICs which insist on cpumasks 70 */ 71 void default_send_IPI_single(int cpu, int vector) 72 { 73 apic->send_IPI_mask(cpumask_of(cpu), vector); 74 } 75 76 #ifdef CONFIG_X86_32 77 78 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, 79 int vector) 80 { 81 unsigned long flags; 82 unsigned int query_cpu; 83 84 /* 85 * Hack. The clustered APIC addressing mode doesn't allow us to send 86 * to an arbitrary mask, so I do a unicasts to each CPU instead. This 87 * should be modified to do 1 message per cluster ID - mbligh 88 */ 89 90 local_irq_save(flags); 91 for_each_cpu(query_cpu, mask) 92 __default_send_IPI_dest_field( 93 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), 94 vector, apic->dest_logical); 95 local_irq_restore(flags); 96 } 97 98 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, 99 int vector) 100 { 101 unsigned long flags; 102 unsigned int query_cpu; 103 unsigned int this_cpu = smp_processor_id(); 104 105 /* See Hack comment above */ 106 107 local_irq_save(flags); 108 for_each_cpu(query_cpu, mask) { 109 if (query_cpu == this_cpu) 110 continue; 111 __default_send_IPI_dest_field( 112 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), 113 vector, apic->dest_logical); 114 } 115 local_irq_restore(flags); 116 } 117 118 /* 119 * This is only used on smaller machines. 120 */ 121 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) 122 { 123 unsigned long mask = cpumask_bits(cpumask)[0]; 124 unsigned long flags; 125 126 if (!mask) 127 return; 128 129 local_irq_save(flags); 130 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); 131 __default_send_IPI_dest_field(mask, vector, apic->dest_logical); 132 local_irq_restore(flags); 133 } 134 135 void default_send_IPI_allbutself(int vector) 136 { 137 /* 138 * if there are no other CPUs in the system then we get an APIC send 139 * error if we try to broadcast, thus avoid sending IPIs in this case. 140 */ 141 if (!(num_online_cpus() > 1)) 142 return; 143 144 __default_local_send_IPI_allbutself(vector); 145 } 146 147 void default_send_IPI_all(int vector) 148 { 149 __default_local_send_IPI_all(vector); 150 } 151 152 void default_send_IPI_self(int vector) 153 { 154 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); 155 } 156 157 /* must come after the send_IPI functions above for inlining */ 158 static int convert_apicid_to_cpu(int apic_id) 159 { 160 int i; 161 162 for_each_possible_cpu(i) { 163 if (per_cpu(x86_cpu_to_apicid, i) == apic_id) 164 return i; 165 } 166 return -1; 167 } 168 169 int safe_smp_processor_id(void) 170 { 171 int apicid, cpuid; 172 173 if (!cpu_has_apic) 174 return 0; 175 176 apicid = hard_smp_processor_id(); 177 if (apicid == BAD_APICID) 178 return 0; 179 180 cpuid = convert_apicid_to_cpu(apicid); 181 182 return cpuid >= 0 ? cpuid : 0; 183 } 184 #endif 185