1 /* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. 2 * GPL v2 and any later version. 3 */ 4 #include <linux/stop_machine.h> 5 #include <linux/kthread.h> 6 #include <linux/sched.h> 7 #include <linux/cpu.h> 8 #include <linux/err.h> 9 #include <linux/syscalls.h> 10 #include <asm/atomic.h> 11 #include <asm/semaphore.h> 12 #include <asm/uaccess.h> 13 14 /* Since we effect priority and affinity (both of which are visible 15 * to, and settable by outside processes) we do indirection via a 16 * kthread. */ 17 18 /* Thread to stop each CPU in user context. */ 19 enum stopmachine_state { 20 STOPMACHINE_WAIT, 21 STOPMACHINE_PREPARE, 22 STOPMACHINE_DISABLE_IRQ, 23 STOPMACHINE_EXIT, 24 }; 25 26 static enum stopmachine_state stopmachine_state; 27 static unsigned int stopmachine_num_threads; 28 static atomic_t stopmachine_thread_ack; 29 static DECLARE_MUTEX(stopmachine_mutex); 30 31 static int stopmachine(void *cpu) 32 { 33 int irqs_disabled = 0; 34 int prepared = 0; 35 36 set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu)); 37 38 /* Ack: we are alive */ 39 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 40 atomic_inc(&stopmachine_thread_ack); 41 42 /* Simple state machine */ 43 while (stopmachine_state != STOPMACHINE_EXIT) { 44 if (stopmachine_state == STOPMACHINE_DISABLE_IRQ 45 && !irqs_disabled) { 46 local_irq_disable(); 47 irqs_disabled = 1; 48 /* Ack: irqs disabled. */ 49 smp_mb(); /* Must read state first. */ 50 atomic_inc(&stopmachine_thread_ack); 51 } else if (stopmachine_state == STOPMACHINE_PREPARE 52 && !prepared) { 53 /* Everyone is in place, hold CPU. */ 54 preempt_disable(); 55 prepared = 1; 56 smp_mb(); /* Must read state first. */ 57 atomic_inc(&stopmachine_thread_ack); 58 } 59 /* Yield in first stage: migration threads need to 60 * help our sisters onto their CPUs. */ 61 if (!prepared && !irqs_disabled) 62 yield(); 63 else 64 cpu_relax(); 65 } 66 67 /* Ack: we are exiting. */ 68 smp_mb(); /* Must read state first. */ 69 atomic_inc(&stopmachine_thread_ack); 70 71 if (irqs_disabled) 72 local_irq_enable(); 73 if (prepared) 74 preempt_enable(); 75 76 return 0; 77 } 78 79 /* Change the thread state */ 80 static void stopmachine_set_state(enum stopmachine_state state) 81 { 82 atomic_set(&stopmachine_thread_ack, 0); 83 smp_wmb(); 84 stopmachine_state = state; 85 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) 86 cpu_relax(); 87 } 88 89 static int stop_machine(void) 90 { 91 int i, ret = 0; 92 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 93 94 /* One high-prio thread per cpu. We'll do this one. */ 95 sched_setscheduler(current, SCHED_FIFO, ¶m); 96 97 atomic_set(&stopmachine_thread_ack, 0); 98 stopmachine_num_threads = 0; 99 stopmachine_state = STOPMACHINE_WAIT; 100 101 for_each_online_cpu(i) { 102 if (i == raw_smp_processor_id()) 103 continue; 104 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); 105 if (ret < 0) 106 break; 107 stopmachine_num_threads++; 108 } 109 110 /* Wait for them all to come to life. */ 111 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) 112 yield(); 113 114 /* If some failed, kill them all. */ 115 if (ret < 0) { 116 stopmachine_set_state(STOPMACHINE_EXIT); 117 return ret; 118 } 119 120 /* Now they are all started, make them hold the CPUs, ready. */ 121 preempt_disable(); 122 stopmachine_set_state(STOPMACHINE_PREPARE); 123 124 /* Make them disable irqs. */ 125 local_irq_disable(); 126 stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); 127 128 return 0; 129 } 130 131 static void restart_machine(void) 132 { 133 stopmachine_set_state(STOPMACHINE_EXIT); 134 local_irq_enable(); 135 preempt_enable_no_resched(); 136 } 137 138 struct stop_machine_data 139 { 140 int (*fn)(void *); 141 void *data; 142 struct completion done; 143 }; 144 145 static int do_stop(void *_smdata) 146 { 147 struct stop_machine_data *smdata = _smdata; 148 int ret; 149 150 ret = stop_machine(); 151 if (ret == 0) { 152 ret = smdata->fn(smdata->data); 153 restart_machine(); 154 } 155 156 /* We're done: you can kthread_stop us now */ 157 complete(&smdata->done); 158 159 /* Wait for kthread_stop */ 160 set_current_state(TASK_INTERRUPTIBLE); 161 while (!kthread_should_stop()) { 162 schedule(); 163 set_current_state(TASK_INTERRUPTIBLE); 164 } 165 __set_current_state(TASK_RUNNING); 166 return ret; 167 } 168 169 struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, 170 unsigned int cpu) 171 { 172 struct stop_machine_data smdata; 173 struct task_struct *p; 174 175 smdata.fn = fn; 176 smdata.data = data; 177 init_completion(&smdata.done); 178 179 down(&stopmachine_mutex); 180 181 /* If they don't care which CPU fn runs on, bind to any online one. */ 182 if (cpu == NR_CPUS) 183 cpu = raw_smp_processor_id(); 184 185 p = kthread_create(do_stop, &smdata, "kstopmachine"); 186 if (!IS_ERR(p)) { 187 kthread_bind(p, cpu); 188 wake_up_process(p); 189 wait_for_completion(&smdata.done); 190 } 191 up(&stopmachine_mutex); 192 return p; 193 } 194 195 int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) 196 { 197 struct task_struct *p; 198 int ret; 199 200 /* No CPUs can come up or down during this. */ 201 lock_cpu_hotplug(); 202 p = __stop_machine_run(fn, data, cpu); 203 if (!IS_ERR(p)) 204 ret = kthread_stop(p); 205 else 206 ret = PTR_ERR(p); 207 unlock_cpu_hotplug(); 208 209 return ret; 210 } 211