1 /* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. 2 * GPL v2 and any later version. 3 */ 4 #include <linux/cpu.h> 5 #include <linux/err.h> 6 #include <linux/kthread.h> 7 #include <linux/module.h> 8 #include <linux/sched.h> 9 #include <linux/stop_machine.h> 10 #include <linux/syscalls.h> 11 #include <asm/atomic.h> 12 #include <asm/semaphore.h> 13 #include <asm/uaccess.h> 14 15 /* Since we effect priority and affinity (both of which are visible 16 * to, and settable by outside processes) we do indirection via a 17 * kthread. */ 18 19 /* Thread to stop each CPU in user context. */ 20 enum stopmachine_state { 21 STOPMACHINE_WAIT, 22 STOPMACHINE_PREPARE, 23 STOPMACHINE_DISABLE_IRQ, 24 STOPMACHINE_EXIT, 25 }; 26 27 static enum stopmachine_state stopmachine_state; 28 static unsigned int stopmachine_num_threads; 29 static atomic_t stopmachine_thread_ack; 30 static DECLARE_MUTEX(stopmachine_mutex); 31 32 static int stopmachine(void *cpu) 33 { 34 int irqs_disabled = 0; 35 int prepared = 0; 36 37 set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu)); 38 39 /* Ack: we are alive */ 40 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 41 atomic_inc(&stopmachine_thread_ack); 42 43 /* Simple state machine */ 44 while (stopmachine_state != STOPMACHINE_EXIT) { 45 if (stopmachine_state == STOPMACHINE_DISABLE_IRQ 46 && !irqs_disabled) { 47 local_irq_disable(); 48 irqs_disabled = 1; 49 /* Ack: irqs disabled. */ 50 smp_mb(); /* Must read state first. */ 51 atomic_inc(&stopmachine_thread_ack); 52 } else if (stopmachine_state == STOPMACHINE_PREPARE 53 && !prepared) { 54 /* Everyone is in place, hold CPU. */ 55 preempt_disable(); 56 prepared = 1; 57 smp_mb(); /* Must read state first. */ 58 atomic_inc(&stopmachine_thread_ack); 59 } 60 /* Yield in first stage: migration threads need to 61 * help our sisters onto their CPUs. */ 62 if (!prepared && !irqs_disabled) 63 yield(); 64 else 65 cpu_relax(); 66 } 67 68 /* Ack: we are exiting. */ 69 smp_mb(); /* Must read state first. */ 70 atomic_inc(&stopmachine_thread_ack); 71 72 if (irqs_disabled) 73 local_irq_enable(); 74 if (prepared) 75 preempt_enable(); 76 77 return 0; 78 } 79 80 /* Change the thread state */ 81 static void stopmachine_set_state(enum stopmachine_state state) 82 { 83 atomic_set(&stopmachine_thread_ack, 0); 84 smp_wmb(); 85 stopmachine_state = state; 86 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) 87 cpu_relax(); 88 } 89 90 static int stop_machine(void) 91 { 92 int i, ret = 0; 93 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 94 95 /* One high-prio thread per cpu. We'll do this one. */ 96 sched_setscheduler(current, SCHED_FIFO, ¶m); 97 98 atomic_set(&stopmachine_thread_ack, 0); 99 stopmachine_num_threads = 0; 100 stopmachine_state = STOPMACHINE_WAIT; 101 102 for_each_online_cpu(i) { 103 if (i == raw_smp_processor_id()) 104 continue; 105 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); 106 if (ret < 0) 107 break; 108 stopmachine_num_threads++; 109 } 110 111 /* Wait for them all to come to life. */ 112 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) 113 yield(); 114 115 /* If some failed, kill them all. */ 116 if (ret < 0) { 117 stopmachine_set_state(STOPMACHINE_EXIT); 118 return ret; 119 } 120 121 /* Now they are all started, make them hold the CPUs, ready. */ 122 preempt_disable(); 123 stopmachine_set_state(STOPMACHINE_PREPARE); 124 125 /* Make them disable irqs. */ 126 local_irq_disable(); 127 stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); 128 129 return 0; 130 } 131 132 static void restart_machine(void) 133 { 134 stopmachine_set_state(STOPMACHINE_EXIT); 135 local_irq_enable(); 136 preempt_enable_no_resched(); 137 } 138 139 struct stop_machine_data 140 { 141 int (*fn)(void *); 142 void *data; 143 struct completion done; 144 }; 145 146 static int do_stop(void *_smdata) 147 { 148 struct stop_machine_data *smdata = _smdata; 149 int ret; 150 151 ret = stop_machine(); 152 if (ret == 0) { 153 ret = smdata->fn(smdata->data); 154 restart_machine(); 155 } 156 157 /* We're done: you can kthread_stop us now */ 158 complete(&smdata->done); 159 160 /* Wait for kthread_stop */ 161 set_current_state(TASK_INTERRUPTIBLE); 162 while (!kthread_should_stop()) { 163 schedule(); 164 set_current_state(TASK_INTERRUPTIBLE); 165 } 166 __set_current_state(TASK_RUNNING); 167 return ret; 168 } 169 170 struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, 171 unsigned int cpu) 172 { 173 struct stop_machine_data smdata; 174 struct task_struct *p; 175 176 smdata.fn = fn; 177 smdata.data = data; 178 init_completion(&smdata.done); 179 180 down(&stopmachine_mutex); 181 182 /* If they don't care which CPU fn runs on, bind to any online one. */ 183 if (cpu == NR_CPUS) 184 cpu = raw_smp_processor_id(); 185 186 p = kthread_create(do_stop, &smdata, "kstopmachine"); 187 if (!IS_ERR(p)) { 188 kthread_bind(p, cpu); 189 wake_up_process(p); 190 wait_for_completion(&smdata.done); 191 } 192 up(&stopmachine_mutex); 193 return p; 194 } 195 196 int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) 197 { 198 struct task_struct *p; 199 int ret; 200 201 /* No CPUs can come up or down during this. */ 202 lock_cpu_hotplug(); 203 p = __stop_machine_run(fn, data, cpu); 204 if (!IS_ERR(p)) 205 ret = kthread_stop(p); 206 else 207 ret = PTR_ERR(p); 208 unlock_cpu_hotplug(); 209 210 return ret; 211 } 212 EXPORT_SYMBOL_GPL(stop_machine_run); 213