1 #include <linux/stop_machine.h> 2 #include <linux/kthread.h> 3 #include <linux/sched.h> 4 #include <linux/cpu.h> 5 #include <linux/err.h> 6 #include <linux/syscalls.h> 7 #include <asm/atomic.h> 8 #include <asm/semaphore.h> 9 #include <asm/uaccess.h> 10 11 /* Since we effect priority and affinity (both of which are visible 12 * to, and settable by outside processes) we do indirection via a 13 * kthread. */ 14 15 /* Thread to stop each CPU in user context. */ 16 enum stopmachine_state { 17 STOPMACHINE_WAIT, 18 STOPMACHINE_PREPARE, 19 STOPMACHINE_DISABLE_IRQ, 20 STOPMACHINE_EXIT, 21 }; 22 23 static enum stopmachine_state stopmachine_state; 24 static unsigned int stopmachine_num_threads; 25 static atomic_t stopmachine_thread_ack; 26 static DECLARE_MUTEX(stopmachine_mutex); 27 28 static int stopmachine(void *cpu) 29 { 30 int irqs_disabled = 0; 31 int prepared = 0; 32 33 set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu)); 34 35 /* Ack: we are alive */ 36 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 37 atomic_inc(&stopmachine_thread_ack); 38 39 /* Simple state machine */ 40 while (stopmachine_state != STOPMACHINE_EXIT) { 41 if (stopmachine_state == STOPMACHINE_DISABLE_IRQ 42 && !irqs_disabled) { 43 local_irq_disable(); 44 irqs_disabled = 1; 45 /* Ack: irqs disabled. */ 46 smp_mb(); /* Must read state first. */ 47 atomic_inc(&stopmachine_thread_ack); 48 } else if (stopmachine_state == STOPMACHINE_PREPARE 49 && !prepared) { 50 /* Everyone is in place, hold CPU. */ 51 preempt_disable(); 52 prepared = 1; 53 smp_mb(); /* Must read state first. */ 54 atomic_inc(&stopmachine_thread_ack); 55 } 56 /* Yield in first stage: migration threads need to 57 * help our sisters onto their CPUs. */ 58 if (!prepared && !irqs_disabled) 59 yield(); 60 else 61 cpu_relax(); 62 } 63 64 /* Ack: we are exiting. */ 65 smp_mb(); /* Must read state first. */ 66 atomic_inc(&stopmachine_thread_ack); 67 68 if (irqs_disabled) 69 local_irq_enable(); 70 if (prepared) 71 preempt_enable(); 72 73 return 0; 74 } 75 76 /* Change the thread state */ 77 static void stopmachine_set_state(enum stopmachine_state state) 78 { 79 atomic_set(&stopmachine_thread_ack, 0); 80 smp_wmb(); 81 stopmachine_state = state; 82 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) 83 cpu_relax(); 84 } 85 86 static int stop_machine(void) 87 { 88 int i, ret = 0; 89 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 90 91 /* One high-prio thread per cpu. We'll do this one. */ 92 sched_setscheduler(current, SCHED_FIFO, ¶m); 93 94 atomic_set(&stopmachine_thread_ack, 0); 95 stopmachine_num_threads = 0; 96 stopmachine_state = STOPMACHINE_WAIT; 97 98 for_each_online_cpu(i) { 99 if (i == raw_smp_processor_id()) 100 continue; 101 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); 102 if (ret < 0) 103 break; 104 stopmachine_num_threads++; 105 } 106 107 /* Wait for them all to come to life. */ 108 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) 109 yield(); 110 111 /* If some failed, kill them all. */ 112 if (ret < 0) { 113 stopmachine_set_state(STOPMACHINE_EXIT); 114 up(&stopmachine_mutex); 115 return ret; 116 } 117 118 /* Now they are all started, make them hold the CPUs, ready. */ 119 preempt_disable(); 120 stopmachine_set_state(STOPMACHINE_PREPARE); 121 122 /* Make them disable irqs. */ 123 local_irq_disable(); 124 stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); 125 126 return 0; 127 } 128 129 static void restart_machine(void) 130 { 131 stopmachine_set_state(STOPMACHINE_EXIT); 132 local_irq_enable(); 133 preempt_enable_no_resched(); 134 } 135 136 struct stop_machine_data 137 { 138 int (*fn)(void *); 139 void *data; 140 struct completion done; 141 }; 142 143 static int do_stop(void *_smdata) 144 { 145 struct stop_machine_data *smdata = _smdata; 146 int ret; 147 148 ret = stop_machine(); 149 if (ret == 0) { 150 ret = smdata->fn(smdata->data); 151 restart_machine(); 152 } 153 154 /* We're done: you can kthread_stop us now */ 155 complete(&smdata->done); 156 157 /* Wait for kthread_stop */ 158 set_current_state(TASK_INTERRUPTIBLE); 159 while (!kthread_should_stop()) { 160 schedule(); 161 set_current_state(TASK_INTERRUPTIBLE); 162 } 163 __set_current_state(TASK_RUNNING); 164 return ret; 165 } 166 167 struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, 168 unsigned int cpu) 169 { 170 struct stop_machine_data smdata; 171 struct task_struct *p; 172 173 smdata.fn = fn; 174 smdata.data = data; 175 init_completion(&smdata.done); 176 177 down(&stopmachine_mutex); 178 179 /* If they don't care which CPU fn runs on, bind to any online one. */ 180 if (cpu == NR_CPUS) 181 cpu = raw_smp_processor_id(); 182 183 p = kthread_create(do_stop, &smdata, "kstopmachine"); 184 if (!IS_ERR(p)) { 185 kthread_bind(p, cpu); 186 wake_up_process(p); 187 wait_for_completion(&smdata.done); 188 } 189 up(&stopmachine_mutex); 190 return p; 191 } 192 193 int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) 194 { 195 struct task_struct *p; 196 int ret; 197 198 /* No CPUs can come up or down during this. */ 199 lock_cpu_hotplug(); 200 p = __stop_machine_run(fn, data, cpu); 201 if (!IS_ERR(p)) 202 ret = kthread_stop(p); 203 else 204 ret = PTR_ERR(p); 205 unlock_cpu_hotplug(); 206 207 return ret; 208 } 209