1 /* 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include "linux/percpu.h" 7 #include "asm/pgalloc.h" 8 #include "asm/tlb.h" 9 10 /* For some reason, mmu_gathers are referenced when CONFIG_SMP is off. */ 11 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 12 13 #ifdef CONFIG_SMP 14 15 #include "linux/sched.h" 16 #include "linux/module.h" 17 #include "linux/threads.h" 18 #include "linux/interrupt.h" 19 #include "linux/err.h" 20 #include "linux/hardirq.h" 21 #include "asm/smp.h" 22 #include "asm/processor.h" 23 #include "asm/spinlock.h" 24 #include "kern_util.h" 25 #include "kern.h" 26 #include "irq_user.h" 27 #include "os.h" 28 29 /* CPU online map, set by smp_boot_cpus */ 30 cpumask_t cpu_online_map = CPU_MASK_NONE; 31 cpumask_t cpu_possible_map = CPU_MASK_NONE; 32 33 EXPORT_SYMBOL(cpu_online_map); 34 EXPORT_SYMBOL(cpu_possible_map); 35 36 /* Per CPU bogomips and other parameters 37 * The only piece used here is the ipi pipe, which is set before SMP is 38 * started and never changed. 39 */ 40 struct cpuinfo_um cpu_data[NR_CPUS]; 41 42 /* A statistic, can be a little off */ 43 int num_reschedules_sent = 0; 44 45 /* Not changed after boot */ 46 struct task_struct *idle_threads[NR_CPUS]; 47 48 void smp_send_reschedule(int cpu) 49 { 50 os_write_file(cpu_data[cpu].ipi_pipe[1], "R", 1); 51 num_reschedules_sent++; 52 } 53 54 void smp_send_stop(void) 55 { 56 int i; 57 58 printk(KERN_INFO "Stopping all CPUs..."); 59 for (i = 0; i < num_online_cpus(); i++) { 60 if (i == current_thread->cpu) 61 continue; 62 os_write_file(cpu_data[i].ipi_pipe[1], "S", 1); 63 } 64 printk(KERN_INFO "done\n"); 65 } 66 67 static cpumask_t smp_commenced_mask = CPU_MASK_NONE; 68 static cpumask_t cpu_callin_map = CPU_MASK_NONE; 69 70 static int idle_proc(void *cpup) 71 { 72 int cpu = (int) cpup, err; 73 74 err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1); 75 if (err < 0) 76 panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err); 77 78 os_set_fd_async(cpu_data[cpu].ipi_pipe[0], 79 current->thread.mode.tt.extern_pid); 80 81 wmb(); 82 if (cpu_test_and_set(cpu, cpu_callin_map)) { 83 printk(KERN_ERR "huh, CPU#%d already present??\n", cpu); 84 BUG(); 85 } 86 87 while (!cpu_isset(cpu, smp_commenced_mask)) 88 cpu_relax(); 89 90 cpu_set(cpu, cpu_online_map); 91 default_idle(); 92 return 0; 93 } 94 95 static struct task_struct *idle_thread(int cpu) 96 { 97 struct task_struct *new_task; 98 99 current->thread.request.u.thread.proc = idle_proc; 100 current->thread.request.u.thread.arg = (void *) cpu; 101 new_task = fork_idle(cpu); 102 if (IS_ERR(new_task)) 103 panic("copy_process failed in idle_thread, error = %ld", 104 PTR_ERR(new_task)); 105 106 cpu_tasks[cpu] = ((struct cpu_task) 107 { .pid = new_task->thread.mode.tt.extern_pid, 108 .task = new_task } ); 109 idle_threads[cpu] = new_task; 110 panic("skas mode doesn't support SMP"); 111 return new_task; 112 } 113 114 void smp_prepare_cpus(unsigned int maxcpus) 115 { 116 struct task_struct *idle; 117 unsigned long waittime; 118 int err, cpu, me = smp_processor_id(); 119 int i; 120 121 for (i = 0; i < ncpus; ++i) 122 cpu_set(i, cpu_possible_map); 123 124 cpu_clear(me, cpu_online_map); 125 cpu_set(me, cpu_online_map); 126 cpu_set(me, cpu_callin_map); 127 128 err = os_pipe(cpu_data[me].ipi_pipe, 1, 1); 129 if (err < 0) 130 panic("CPU#0 failed to create IPI pipe, errno = %d", -err); 131 132 os_set_fd_async(cpu_data[me].ipi_pipe[0], 133 current->thread.mode.tt.extern_pid); 134 135 for (cpu = 1; cpu < ncpus; cpu++) { 136 printk(KERN_INFO "Booting processor %d...\n", cpu); 137 138 idle = idle_thread(cpu); 139 140 init_idle(idle, cpu); 141 142 waittime = 200000000; 143 while (waittime-- && !cpu_isset(cpu, cpu_callin_map)) 144 cpu_relax(); 145 146 if (cpu_isset(cpu, cpu_callin_map)) 147 printk(KERN_INFO "done\n"); 148 else printk(KERN_INFO "failed\n"); 149 } 150 } 151 152 void smp_prepare_boot_cpu(void) 153 { 154 cpu_set(smp_processor_id(), cpu_online_map); 155 } 156 157 int __cpu_up(unsigned int cpu) 158 { 159 cpu_set(cpu, smp_commenced_mask); 160 while (!cpu_isset(cpu, cpu_online_map)) 161 mb(); 162 return 0; 163 } 164 165 int setup_profiling_timer(unsigned int multiplier) 166 { 167 printk(KERN_INFO "setup_profiling_timer\n"); 168 return 0; 169 } 170 171 void smp_call_function_slave(int cpu); 172 173 void IPI_handler(int cpu) 174 { 175 unsigned char c; 176 int fd; 177 178 fd = cpu_data[cpu].ipi_pipe[0]; 179 while (os_read_file(fd, &c, 1) == 1) { 180 switch (c) { 181 case 'C': 182 smp_call_function_slave(cpu); 183 break; 184 185 case 'R': 186 set_tsk_need_resched(current); 187 break; 188 189 case 'S': 190 printk(KERN_INFO "CPU#%d stopping\n", cpu); 191 while (1) 192 pause(); 193 break; 194 195 default: 196 printk(KERN_ERR "CPU#%d received unknown IPI [%c]!\n", 197 cpu, c); 198 break; 199 } 200 } 201 } 202 203 int hard_smp_processor_id(void) 204 { 205 return pid_to_processor_id(os_getpid()); 206 } 207 208 static DEFINE_SPINLOCK(call_lock); 209 static atomic_t scf_started; 210 static atomic_t scf_finished; 211 static void (*func)(void *info); 212 static void *info; 213 214 void smp_call_function_slave(int cpu) 215 { 216 atomic_inc(&scf_started); 217 (*func)(info); 218 atomic_inc(&scf_finished); 219 } 220 221 int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, 222 int wait) 223 { 224 int cpus = num_online_cpus() - 1; 225 int i; 226 227 if (!cpus) 228 return 0; 229 230 /* Can deadlock when called with interrupts disabled */ 231 WARN_ON(irqs_disabled()); 232 233 spin_lock_bh(&call_lock); 234 atomic_set(&scf_started, 0); 235 atomic_set(&scf_finished, 0); 236 func = _func; 237 info = _info; 238 239 for_each_online_cpu(i) 240 os_write_file(cpu_data[i].ipi_pipe[1], "C", 1); 241 242 while (atomic_read(&scf_started) != cpus) 243 barrier(); 244 245 if (wait) 246 while (atomic_read(&scf_finished) != cpus) 247 barrier(); 248 249 spin_unlock_bh(&call_lock); 250 return 0; 251 } 252 253 #endif 254