1 #include <linux/stop_machine.h> 2 #include <linux/kthread.h> 3 #include <linux/sched.h> 4 #include <linux/cpu.h> 5 #include <linux/err.h> 6 #include <linux/syscalls.h> 7 #include <linux/kthread.h> 8 #include <asm/atomic.h> 9 #include <asm/semaphore.h> 10 #include <asm/uaccess.h> 11 12 /* Since we effect priority and affinity (both of which are visible 13 * to, and settable by outside processes) we do indirection via a 14 * kthread. */ 15 16 /* Thread to stop each CPU in user context. */ 17 enum stopmachine_state { 18 STOPMACHINE_WAIT, 19 STOPMACHINE_PREPARE, 20 STOPMACHINE_DISABLE_IRQ, 21 STOPMACHINE_EXIT, 22 }; 23 24 static enum stopmachine_state stopmachine_state; 25 static unsigned int stopmachine_num_threads; 26 static atomic_t stopmachine_thread_ack; 27 static DECLARE_MUTEX(stopmachine_mutex); 28 29 static int stopmachine(void *unused) 30 { 31 int irqs_disabled = 0; 32 int prepared = 0; 33 34 /* Ack: we are alive */ 35 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 36 atomic_inc(&stopmachine_thread_ack); 37 38 /* Simple state machine */ 39 while (stopmachine_state != STOPMACHINE_EXIT) { 40 if (stopmachine_state == STOPMACHINE_DISABLE_IRQ 41 && !irqs_disabled) { 42 local_irq_disable(); 43 irqs_disabled = 1; 44 /* Ack: irqs disabled. */ 45 smp_mb(); /* Must read state first. */ 46 atomic_inc(&stopmachine_thread_ack); 47 } else if (stopmachine_state == STOPMACHINE_PREPARE 48 && !prepared) { 49 /* Everyone is in place, hold CPU. */ 50 preempt_disable(); 51 prepared = 1; 52 smp_mb(); /* Must read state first. */ 53 atomic_inc(&stopmachine_thread_ack); 54 } 55 /* Yield in first stage: migration threads need to 56 * help our sisters onto their CPUs. */ 57 if (!prepared && !irqs_disabled) 58 yield(); 59 else 60 cpu_relax(); 61 } 62 63 /* Ack: we are exiting. */ 64 smp_mb(); /* Must read state first. */ 65 atomic_inc(&stopmachine_thread_ack); 66 67 if (irqs_disabled) 68 local_irq_enable(); 69 if (prepared) 70 preempt_enable(); 71 72 return 0; 73 } 74 75 /* Change the thread state */ 76 static void stopmachine_set_state(enum stopmachine_state state) 77 { 78 atomic_set(&stopmachine_thread_ack, 0); 79 smp_wmb(); 80 stopmachine_state = state; 81 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) 82 cpu_relax(); 83 } 84 85 static int stop_machine(void) 86 { 87 int ret = 0; 88 unsigned int i; 89 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 90 91 /* One high-prio thread per cpu. We'll do this one. */ 92 sched_setscheduler(current, SCHED_FIFO, ¶m); 93 94 atomic_set(&stopmachine_thread_ack, 0); 95 stopmachine_num_threads = 0; 96 stopmachine_state = STOPMACHINE_WAIT; 97 98 for_each_online_cpu(i) { 99 struct task_struct *tsk; 100 if (i == raw_smp_processor_id()) 101 continue; 102 tsk = kthread_create(stopmachine, NULL, "stopmachine"); 103 if (IS_ERR(tsk)) { 104 ret = PTR_ERR(tsk); 105 break; 106 } 107 kthread_bind(tsk, i); 108 wake_up_process(tsk); 109 stopmachine_num_threads++; 110 } 111 112 /* Wait for them all to come to life. */ 113 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) 114 yield(); 115 116 /* If some failed, kill them all. */ 117 if (ret < 0) { 118 stopmachine_set_state(STOPMACHINE_EXIT); 119 up(&stopmachine_mutex); 120 return ret; 121 } 122 123 /* Now they are all started, make them hold the CPUs, ready. */ 124 preempt_disable(); 125 stopmachine_set_state(STOPMACHINE_PREPARE); 126 127 /* Make them disable irqs. */ 128 local_irq_disable(); 129 stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); 130 131 return 0; 132 } 133 134 static void restart_machine(void) 135 { 136 stopmachine_set_state(STOPMACHINE_EXIT); 137 local_irq_enable(); 138 preempt_enable_no_resched(); 139 } 140 141 struct stop_machine_data 142 { 143 int (*fn)(void *); 144 void *data; 145 struct completion done; 146 }; 147 148 static int do_stop(void *_smdata) 149 { 150 struct stop_machine_data *smdata = _smdata; 151 int ret; 152 153 ret = stop_machine(); 154 if (ret == 0) { 155 ret = smdata->fn(smdata->data); 156 restart_machine(); 157 } 158 159 /* We're done: you can kthread_stop us now */ 160 complete(&smdata->done); 161 162 /* Wait for kthread_stop */ 163 set_current_state(TASK_INTERRUPTIBLE); 164 while (!kthread_should_stop()) { 165 schedule(); 166 set_current_state(TASK_INTERRUPTIBLE); 167 } 168 __set_current_state(TASK_RUNNING); 169 return ret; 170 } 171 172 struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, 173 unsigned int cpu) 174 { 175 struct stop_machine_data smdata; 176 struct task_struct *p; 177 178 smdata.fn = fn; 179 smdata.data = data; 180 init_completion(&smdata.done); 181 182 down(&stopmachine_mutex); 183 184 /* If they don't care which CPU fn runs on, bind to any online one. */ 185 if (cpu == NR_CPUS) 186 cpu = raw_smp_processor_id(); 187 188 p = kthread_create(do_stop, &smdata, "kstopmachine"); 189 if (!IS_ERR(p)) { 190 kthread_bind(p, cpu); 191 wake_up_process(p); 192 wait_for_completion(&smdata.done); 193 } 194 up(&stopmachine_mutex); 195 return p; 196 } 197 198 int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) 199 { 200 struct task_struct *p; 201 int ret; 202 203 /* No CPUs can come up or down during this. */ 204 lock_cpu_hotplug(); 205 p = __stop_machine_run(fn, data, cpu); 206 if (!IS_ERR(p)) 207 ret = kthread_stop(p); 208 else 209 ret = PTR_ERR(p); 210 unlock_cpu_hotplug(); 211 212 return ret; 213 } 214