11142d810STejun Heo /* 21142d810STejun Heo * kernel/stop_machine.c 31142d810STejun Heo * 41142d810STejun Heo * Copyright (C) 2008, 2005 IBM Corporation. 51142d810STejun Heo * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au 61142d810STejun Heo * Copyright (C) 2010 SUSE Linux Products GmbH 71142d810STejun Heo * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 81142d810STejun Heo * 91142d810STejun Heo * This file is released under the GPLv2 and any later version. 10e5582ca2SRusty Russell */ 111142d810STejun Heo #include <linux/completion.h> 121da177e4SLinus Torvalds #include <linux/cpu.h> 131142d810STejun Heo #include <linux/init.h> 14ee527cd3SPrarit Bhargava #include <linux/kthread.h> 159984de1aSPaul Gortmaker #include <linux/export.h> 161142d810STejun Heo #include <linux/percpu.h> 17ee527cd3SPrarit Bhargava #include <linux/sched.h> 18ee527cd3SPrarit Bhargava #include <linux/stop_machine.h> 19a12bb444SBenjamin Herrenschmidt #include <linux/interrupt.h> 201142d810STejun Heo #include <linux/kallsyms.h> 2114e568e7SThomas Gleixner #include <linux/smpboot.h> 2260063497SArun Sharma #include <linux/atomic.h> 237053ea1aSRik van Riel #include <linux/lglock.h> 241142d810STejun Heo 251142d810STejun Heo /* 261142d810STejun Heo * Structure to determine completion condition and record errors. May 271142d810STejun Heo * be shared by works on different cpus. 281142d810STejun Heo */ 291142d810STejun Heo struct cpu_stop_done { 301142d810STejun Heo atomic_t nr_todo; /* nr left to execute */ 311142d810STejun Heo bool executed; /* actually executed? */ 321142d810STejun Heo int ret; /* collected return value */ 331142d810STejun Heo struct completion completion; /* fired if nr_todo reaches 0 */ 341142d810STejun Heo }; 351142d810STejun Heo 361142d810STejun Heo /* the actual stopper, one per every possible cpu, enabled on online cpus */ 371142d810STejun Heo struct cpu_stopper { 381142d810STejun Heo spinlock_t lock; 39878ae127SRichard Kennedy bool enabled; /* is this stopper enabled? */ 401142d810STejun Heo struct list_head works; /* list of pending works */ 411142d810STejun Heo }; 421142d810STejun Heo 431142d810STejun Heo static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); 44860a0ffaSThomas Gleixner static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task); 45f445027eSJeremy Fitzhardinge static bool stop_machine_initialized = false; 461142d810STejun Heo 477053ea1aSRik van Riel /* 487053ea1aSRik van Riel * Avoids a race between stop_two_cpus and global stop_cpus, where 497053ea1aSRik van Riel * the stoppers could get queued up in reverse order, leading to 507053ea1aSRik van Riel * system deadlock. Using an lglock means stop_two_cpus remains 517053ea1aSRik van Riel * relatively cheap. 527053ea1aSRik van Riel */ 537053ea1aSRik van Riel DEFINE_STATIC_LGLOCK(stop_cpus_lock); 547053ea1aSRik van Riel 551142d810STejun Heo static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) 561142d810STejun Heo { 571142d810STejun Heo memset(done, 0, sizeof(*done)); 581142d810STejun Heo atomic_set(&done->nr_todo, nr_todo); 591142d810STejun Heo init_completion(&done->completion); 601142d810STejun Heo } 611142d810STejun Heo 621142d810STejun Heo /* signal completion unless @done is NULL */ 631142d810STejun Heo static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed) 641142d810STejun Heo { 651142d810STejun Heo if (done) { 661142d810STejun Heo if (executed) 671142d810STejun Heo done->executed = true; 681142d810STejun Heo if (atomic_dec_and_test(&done->nr_todo)) 691142d810STejun Heo complete(&done->completion); 701142d810STejun Heo } 711142d810STejun Heo } 721142d810STejun Heo 731142d810STejun Heo /* queue @work to @stopper. if offline, @work is completed immediately */ 74860a0ffaSThomas Gleixner static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) 751142d810STejun Heo { 76860a0ffaSThomas Gleixner struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 77860a0ffaSThomas Gleixner struct task_struct *p = per_cpu(cpu_stopper_task, cpu); 78860a0ffaSThomas Gleixner 791142d810STejun Heo unsigned long flags; 801142d810STejun Heo 811142d810STejun Heo spin_lock_irqsave(&stopper->lock, flags); 821142d810STejun Heo 831142d810STejun Heo if (stopper->enabled) { 841142d810STejun Heo list_add_tail(&work->list, &stopper->works); 85860a0ffaSThomas Gleixner wake_up_process(p); 861142d810STejun Heo } else 871142d810STejun Heo cpu_stop_signal_done(work->done, false); 881142d810STejun Heo 891142d810STejun Heo spin_unlock_irqrestore(&stopper->lock, flags); 901142d810STejun Heo } 911142d810STejun Heo 921142d810STejun Heo /** 931142d810STejun Heo * stop_one_cpu - stop a cpu 941142d810STejun Heo * @cpu: cpu to stop 951142d810STejun Heo * @fn: function to execute 961142d810STejun Heo * @arg: argument to @fn 971142d810STejun Heo * 981142d810STejun Heo * Execute @fn(@arg) on @cpu. @fn is run in a process context with 991142d810STejun Heo * the highest priority preempting any task on the cpu and 1001142d810STejun Heo * monopolizing it. This function returns after the execution is 1011142d810STejun Heo * complete. 1021142d810STejun Heo * 1031142d810STejun Heo * This function doesn't guarantee @cpu stays online till @fn 1041142d810STejun Heo * completes. If @cpu goes down in the middle, execution may happen 1051142d810STejun Heo * partially or fully on different cpus. @fn should either be ready 1061142d810STejun Heo * for that or the caller should ensure that @cpu stays online until 1071142d810STejun Heo * this function completes. 1081142d810STejun Heo * 1091142d810STejun Heo * CONTEXT: 1101142d810STejun Heo * Might sleep. 1111142d810STejun Heo * 1121142d810STejun Heo * RETURNS: 1131142d810STejun Heo * -ENOENT if @fn(@arg) was not executed because @cpu was offline; 1141142d810STejun Heo * otherwise, the return value of @fn. 1151142d810STejun Heo */ 1161142d810STejun Heo int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) 1171142d810STejun Heo { 1181142d810STejun Heo struct cpu_stop_done done; 1191142d810STejun Heo struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; 1201142d810STejun Heo 1211142d810STejun Heo cpu_stop_init_done(&done, 1); 122860a0ffaSThomas Gleixner cpu_stop_queue_work(cpu, &work); 1231142d810STejun Heo wait_for_completion(&done.completion); 1241142d810STejun Heo return done.executed ? done.ret : -ENOENT; 1251142d810STejun Heo } 1261142d810STejun Heo 1271be0bd77SPeter Zijlstra /* This controls the threads on each CPU. */ 1281be0bd77SPeter Zijlstra enum multi_stop_state { 1291be0bd77SPeter Zijlstra /* Dummy starting state for thread. */ 1301be0bd77SPeter Zijlstra MULTI_STOP_NONE, 1311be0bd77SPeter Zijlstra /* Awaiting everyone to be scheduled. */ 1321be0bd77SPeter Zijlstra MULTI_STOP_PREPARE, 1331be0bd77SPeter Zijlstra /* Disable interrupts. */ 1341be0bd77SPeter Zijlstra MULTI_STOP_DISABLE_IRQ, 1351be0bd77SPeter Zijlstra /* Run the function */ 1361be0bd77SPeter Zijlstra MULTI_STOP_RUN, 1371be0bd77SPeter Zijlstra /* Exit */ 1381be0bd77SPeter Zijlstra MULTI_STOP_EXIT, 1391be0bd77SPeter Zijlstra }; 1401be0bd77SPeter Zijlstra 1411be0bd77SPeter Zijlstra struct multi_stop_data { 1421be0bd77SPeter Zijlstra int (*fn)(void *); 1431be0bd77SPeter Zijlstra void *data; 1441be0bd77SPeter Zijlstra /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ 1451be0bd77SPeter Zijlstra unsigned int num_threads; 1461be0bd77SPeter Zijlstra const struct cpumask *active_cpus; 1471be0bd77SPeter Zijlstra 1481be0bd77SPeter Zijlstra enum multi_stop_state state; 1491be0bd77SPeter Zijlstra atomic_t thread_ack; 1501be0bd77SPeter Zijlstra }; 1511be0bd77SPeter Zijlstra 1521be0bd77SPeter Zijlstra static void set_state(struct multi_stop_data *msdata, 1531be0bd77SPeter Zijlstra enum multi_stop_state newstate) 1541be0bd77SPeter Zijlstra { 1551be0bd77SPeter Zijlstra /* Reset ack counter. */ 1561be0bd77SPeter Zijlstra atomic_set(&msdata->thread_ack, msdata->num_threads); 1571be0bd77SPeter Zijlstra smp_wmb(); 1581be0bd77SPeter Zijlstra msdata->state = newstate; 1591be0bd77SPeter Zijlstra } 1601be0bd77SPeter Zijlstra 1611be0bd77SPeter Zijlstra /* Last one to ack a state moves to the next state. */ 1621be0bd77SPeter Zijlstra static void ack_state(struct multi_stop_data *msdata) 1631be0bd77SPeter Zijlstra { 1641be0bd77SPeter Zijlstra if (atomic_dec_and_test(&msdata->thread_ack)) 1651be0bd77SPeter Zijlstra set_state(msdata, msdata->state + 1); 1661be0bd77SPeter Zijlstra } 1671be0bd77SPeter Zijlstra 1681be0bd77SPeter Zijlstra /* This is the cpu_stop function which stops the CPU. */ 1691be0bd77SPeter Zijlstra static int multi_cpu_stop(void *data) 1701be0bd77SPeter Zijlstra { 1711be0bd77SPeter Zijlstra struct multi_stop_data *msdata = data; 1721be0bd77SPeter Zijlstra enum multi_stop_state curstate = MULTI_STOP_NONE; 1731be0bd77SPeter Zijlstra int cpu = smp_processor_id(), err = 0; 1741be0bd77SPeter Zijlstra unsigned long flags; 1751be0bd77SPeter Zijlstra bool is_active; 1761be0bd77SPeter Zijlstra 1771be0bd77SPeter Zijlstra /* 1781be0bd77SPeter Zijlstra * When called from stop_machine_from_inactive_cpu(), irq might 1791be0bd77SPeter Zijlstra * already be disabled. Save the state and restore it on exit. 1801be0bd77SPeter Zijlstra */ 1811be0bd77SPeter Zijlstra local_save_flags(flags); 1821be0bd77SPeter Zijlstra 1831be0bd77SPeter Zijlstra if (!msdata->active_cpus) 1841be0bd77SPeter Zijlstra is_active = cpu == cpumask_first(cpu_online_mask); 1851be0bd77SPeter Zijlstra else 1861be0bd77SPeter Zijlstra is_active = cpumask_test_cpu(cpu, msdata->active_cpus); 1871be0bd77SPeter Zijlstra 1881be0bd77SPeter Zijlstra /* Simple state machine */ 1891be0bd77SPeter Zijlstra do { 1901be0bd77SPeter Zijlstra /* Chill out and ensure we re-read multi_stop_state. */ 1911be0bd77SPeter Zijlstra cpu_relax(); 1921be0bd77SPeter Zijlstra if (msdata->state != curstate) { 1931be0bd77SPeter Zijlstra curstate = msdata->state; 1941be0bd77SPeter Zijlstra switch (curstate) { 1951be0bd77SPeter Zijlstra case MULTI_STOP_DISABLE_IRQ: 1961be0bd77SPeter Zijlstra local_irq_disable(); 1971be0bd77SPeter Zijlstra hard_irq_disable(); 1981be0bd77SPeter Zijlstra break; 1991be0bd77SPeter Zijlstra case MULTI_STOP_RUN: 2001be0bd77SPeter Zijlstra if (is_active) 2011be0bd77SPeter Zijlstra err = msdata->fn(msdata->data); 2021be0bd77SPeter Zijlstra break; 2031be0bd77SPeter Zijlstra default: 2041be0bd77SPeter Zijlstra break; 2051be0bd77SPeter Zijlstra } 2061be0bd77SPeter Zijlstra ack_state(msdata); 2071be0bd77SPeter Zijlstra } 2081be0bd77SPeter Zijlstra } while (curstate != MULTI_STOP_EXIT); 2091be0bd77SPeter Zijlstra 2101be0bd77SPeter Zijlstra local_irq_restore(flags); 2111be0bd77SPeter Zijlstra return err; 2121be0bd77SPeter Zijlstra } 2131be0bd77SPeter Zijlstra 2141be0bd77SPeter Zijlstra struct irq_cpu_stop_queue_work_info { 2151be0bd77SPeter Zijlstra int cpu1; 2161be0bd77SPeter Zijlstra int cpu2; 2171be0bd77SPeter Zijlstra struct cpu_stop_work *work1; 2181be0bd77SPeter Zijlstra struct cpu_stop_work *work2; 2191be0bd77SPeter Zijlstra }; 2201be0bd77SPeter Zijlstra 2211be0bd77SPeter Zijlstra /* 2221be0bd77SPeter Zijlstra * This function is always run with irqs and preemption disabled. 2231be0bd77SPeter Zijlstra * This guarantees that both work1 and work2 get queued, before 2241be0bd77SPeter Zijlstra * our local migrate thread gets the chance to preempt us. 2251be0bd77SPeter Zijlstra */ 2261be0bd77SPeter Zijlstra static void irq_cpu_stop_queue_work(void *arg) 2271be0bd77SPeter Zijlstra { 2281be0bd77SPeter Zijlstra struct irq_cpu_stop_queue_work_info *info = arg; 2291be0bd77SPeter Zijlstra cpu_stop_queue_work(info->cpu1, info->work1); 2301be0bd77SPeter Zijlstra cpu_stop_queue_work(info->cpu2, info->work2); 2311be0bd77SPeter Zijlstra } 2321be0bd77SPeter Zijlstra 2331be0bd77SPeter Zijlstra /** 2341be0bd77SPeter Zijlstra * stop_two_cpus - stops two cpus 2351be0bd77SPeter Zijlstra * @cpu1: the cpu to stop 2361be0bd77SPeter Zijlstra * @cpu2: the other cpu to stop 2371be0bd77SPeter Zijlstra * @fn: function to execute 2381be0bd77SPeter Zijlstra * @arg: argument to @fn 2391be0bd77SPeter Zijlstra * 2401be0bd77SPeter Zijlstra * Stops both the current and specified CPU and runs @fn on one of them. 2411be0bd77SPeter Zijlstra * 2421be0bd77SPeter Zijlstra * returns when both are completed. 2431be0bd77SPeter Zijlstra */ 2441be0bd77SPeter Zijlstra int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) 2451be0bd77SPeter Zijlstra { 2461be0bd77SPeter Zijlstra struct cpu_stop_done done; 2471be0bd77SPeter Zijlstra struct cpu_stop_work work1, work2; 2481be0bd77SPeter Zijlstra struct irq_cpu_stop_queue_work_info call_args; 2496acce3efSPeter Zijlstra struct multi_stop_data msdata; 2506acce3efSPeter Zijlstra 2516acce3efSPeter Zijlstra preempt_disable(); 2526acce3efSPeter Zijlstra msdata = (struct multi_stop_data){ 2531be0bd77SPeter Zijlstra .fn = fn, 2541be0bd77SPeter Zijlstra .data = arg, 2551be0bd77SPeter Zijlstra .num_threads = 2, 2561be0bd77SPeter Zijlstra .active_cpus = cpumask_of(cpu1), 2571be0bd77SPeter Zijlstra }; 2581be0bd77SPeter Zijlstra 2591be0bd77SPeter Zijlstra work1 = work2 = (struct cpu_stop_work){ 2601be0bd77SPeter Zijlstra .fn = multi_cpu_stop, 2611be0bd77SPeter Zijlstra .arg = &msdata, 2621be0bd77SPeter Zijlstra .done = &done 2631be0bd77SPeter Zijlstra }; 2641be0bd77SPeter Zijlstra 2651be0bd77SPeter Zijlstra call_args = (struct irq_cpu_stop_queue_work_info){ 2661be0bd77SPeter Zijlstra .cpu1 = cpu1, 2671be0bd77SPeter Zijlstra .cpu2 = cpu2, 2681be0bd77SPeter Zijlstra .work1 = &work1, 2691be0bd77SPeter Zijlstra .work2 = &work2, 2701be0bd77SPeter Zijlstra }; 2711be0bd77SPeter Zijlstra 2721be0bd77SPeter Zijlstra cpu_stop_init_done(&done, 2); 2731be0bd77SPeter Zijlstra set_state(&msdata, MULTI_STOP_PREPARE); 2741be0bd77SPeter Zijlstra 2751be0bd77SPeter Zijlstra /* 2766acce3efSPeter Zijlstra * If we observe both CPUs active we know _cpu_down() cannot yet have 2776acce3efSPeter Zijlstra * queued its stop_machine works and therefore ours will get executed 2786acce3efSPeter Zijlstra * first. Or its not either one of our CPUs that's getting unplugged, 2796acce3efSPeter Zijlstra * in which case we don't care. 2806acce3efSPeter Zijlstra * 2816acce3efSPeter Zijlstra * This relies on the stopper workqueues to be FIFO. 2826acce3efSPeter Zijlstra */ 2836acce3efSPeter Zijlstra if (!cpu_active(cpu1) || !cpu_active(cpu2)) { 2846acce3efSPeter Zijlstra preempt_enable(); 2856acce3efSPeter Zijlstra return -ENOENT; 2866acce3efSPeter Zijlstra } 2876acce3efSPeter Zijlstra 2887053ea1aSRik van Riel lg_local_lock(&stop_cpus_lock); 2896acce3efSPeter Zijlstra /* 2901be0bd77SPeter Zijlstra * Queuing needs to be done by the lowest numbered CPU, to ensure 2911be0bd77SPeter Zijlstra * that works are always queued in the same order on every CPU. 2921be0bd77SPeter Zijlstra * This prevents deadlocks. 2931be0bd77SPeter Zijlstra */ 2946acce3efSPeter Zijlstra smp_call_function_single(min(cpu1, cpu2), 2956acce3efSPeter Zijlstra &irq_cpu_stop_queue_work, 296177c53d9SPeter Zijlstra &call_args, 1); 2977053ea1aSRik van Riel lg_local_unlock(&stop_cpus_lock); 2986acce3efSPeter Zijlstra preempt_enable(); 2991be0bd77SPeter Zijlstra 3001be0bd77SPeter Zijlstra wait_for_completion(&done.completion); 3016acce3efSPeter Zijlstra 3021be0bd77SPeter Zijlstra return done.executed ? done.ret : -ENOENT; 3031be0bd77SPeter Zijlstra } 3041be0bd77SPeter Zijlstra 3051142d810STejun Heo /** 3061142d810STejun Heo * stop_one_cpu_nowait - stop a cpu but don't wait for completion 3071142d810STejun Heo * @cpu: cpu to stop 3081142d810STejun Heo * @fn: function to execute 3091142d810STejun Heo * @arg: argument to @fn 310*cf250040SFabian Frederick * @work_buf: pointer to cpu_stop_work structure 3111142d810STejun Heo * 3121142d810STejun Heo * Similar to stop_one_cpu() but doesn't wait for completion. The 3131142d810STejun Heo * caller is responsible for ensuring @work_buf is currently unused 3141142d810STejun Heo * and will remain untouched until stopper starts executing @fn. 3151142d810STejun Heo * 3161142d810STejun Heo * CONTEXT: 3171142d810STejun Heo * Don't care. 3181142d810STejun Heo */ 3191142d810STejun Heo void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, 3201142d810STejun Heo struct cpu_stop_work *work_buf) 3211142d810STejun Heo { 3221142d810STejun Heo *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; 323860a0ffaSThomas Gleixner cpu_stop_queue_work(cpu, work_buf); 3241142d810STejun Heo } 3251142d810STejun Heo 3261142d810STejun Heo /* static data for stop_cpus */ 327192d8857SSuresh Siddha static DEFINE_MUTEX(stop_cpus_mutex); 3281142d810STejun Heo static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); 3291142d810STejun Heo 330fd7355baSTejun Heo static void queue_stop_cpus_work(const struct cpumask *cpumask, 331fd7355baSTejun Heo cpu_stop_fn_t fn, void *arg, 332fd7355baSTejun Heo struct cpu_stop_done *done) 3331142d810STejun Heo { 3341142d810STejun Heo struct cpu_stop_work *work; 3351142d810STejun Heo unsigned int cpu; 3361142d810STejun Heo 3371142d810STejun Heo /* initialize works and done */ 3381142d810STejun Heo for_each_cpu(cpu, cpumask) { 3391142d810STejun Heo work = &per_cpu(stop_cpus_work, cpu); 3401142d810STejun Heo work->fn = fn; 3411142d810STejun Heo work->arg = arg; 342fd7355baSTejun Heo work->done = done; 3431142d810STejun Heo } 3441142d810STejun Heo 3451142d810STejun Heo /* 3461142d810STejun Heo * Disable preemption while queueing to avoid getting 3471142d810STejun Heo * preempted by a stopper which might wait for other stoppers 3481142d810STejun Heo * to enter @fn which can lead to deadlock. 3491142d810STejun Heo */ 3507053ea1aSRik van Riel lg_global_lock(&stop_cpus_lock); 3511142d810STejun Heo for_each_cpu(cpu, cpumask) 352860a0ffaSThomas Gleixner cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu)); 3537053ea1aSRik van Riel lg_global_unlock(&stop_cpus_lock); 354fd7355baSTejun Heo } 3551142d810STejun Heo 356fd7355baSTejun Heo static int __stop_cpus(const struct cpumask *cpumask, 357fd7355baSTejun Heo cpu_stop_fn_t fn, void *arg) 358fd7355baSTejun Heo { 359fd7355baSTejun Heo struct cpu_stop_done done; 360fd7355baSTejun Heo 361fd7355baSTejun Heo cpu_stop_init_done(&done, cpumask_weight(cpumask)); 362fd7355baSTejun Heo queue_stop_cpus_work(cpumask, fn, arg, &done); 3631142d810STejun Heo wait_for_completion(&done.completion); 3641142d810STejun Heo return done.executed ? done.ret : -ENOENT; 3651142d810STejun Heo } 3661142d810STejun Heo 3671142d810STejun Heo /** 3681142d810STejun Heo * stop_cpus - stop multiple cpus 3691142d810STejun Heo * @cpumask: cpus to stop 3701142d810STejun Heo * @fn: function to execute 3711142d810STejun Heo * @arg: argument to @fn 3721142d810STejun Heo * 3731142d810STejun Heo * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, 3741142d810STejun Heo * @fn is run in a process context with the highest priority 3751142d810STejun Heo * preempting any task on the cpu and monopolizing it. This function 3761142d810STejun Heo * returns after all executions are complete. 3771142d810STejun Heo * 3781142d810STejun Heo * This function doesn't guarantee the cpus in @cpumask stay online 3791142d810STejun Heo * till @fn completes. If some cpus go down in the middle, execution 3801142d810STejun Heo * on the cpu may happen partially or fully on different cpus. @fn 3811142d810STejun Heo * should either be ready for that or the caller should ensure that 3821142d810STejun Heo * the cpus stay online until this function completes. 3831142d810STejun Heo * 3841142d810STejun Heo * All stop_cpus() calls are serialized making it safe for @fn to wait 3851142d810STejun Heo * for all cpus to start executing it. 3861142d810STejun Heo * 3871142d810STejun Heo * CONTEXT: 3881142d810STejun Heo * Might sleep. 3891142d810STejun Heo * 3901142d810STejun Heo * RETURNS: 3911142d810STejun Heo * -ENOENT if @fn(@arg) was not executed at all because all cpus in 3921142d810STejun Heo * @cpumask were offline; otherwise, 0 if all executions of @fn 3931142d810STejun Heo * returned 0, any non zero return value if any returned non zero. 3941142d810STejun Heo */ 3951142d810STejun Heo int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) 3961142d810STejun Heo { 3971142d810STejun Heo int ret; 3981142d810STejun Heo 3991142d810STejun Heo /* static works are used, process one request at a time */ 4001142d810STejun Heo mutex_lock(&stop_cpus_mutex); 4011142d810STejun Heo ret = __stop_cpus(cpumask, fn, arg); 4021142d810STejun Heo mutex_unlock(&stop_cpus_mutex); 4031142d810STejun Heo return ret; 4041142d810STejun Heo } 4051142d810STejun Heo 4061142d810STejun Heo /** 4071142d810STejun Heo * try_stop_cpus - try to stop multiple cpus 4081142d810STejun Heo * @cpumask: cpus to stop 4091142d810STejun Heo * @fn: function to execute 4101142d810STejun Heo * @arg: argument to @fn 4111142d810STejun Heo * 4121142d810STejun Heo * Identical to stop_cpus() except that it fails with -EAGAIN if 4131142d810STejun Heo * someone else is already using the facility. 4141142d810STejun Heo * 4151142d810STejun Heo * CONTEXT: 4161142d810STejun Heo * Might sleep. 4171142d810STejun Heo * 4181142d810STejun Heo * RETURNS: 4191142d810STejun Heo * -EAGAIN if someone else is already stopping cpus, -ENOENT if 4201142d810STejun Heo * @fn(@arg) was not executed at all because all cpus in @cpumask were 4211142d810STejun Heo * offline; otherwise, 0 if all executions of @fn returned 0, any non 4221142d810STejun Heo * zero return value if any returned non zero. 4231142d810STejun Heo */ 4241142d810STejun Heo int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) 4251142d810STejun Heo { 4261142d810STejun Heo int ret; 4271142d810STejun Heo 4281142d810STejun Heo /* static works are used, process one request at a time */ 4291142d810STejun Heo if (!mutex_trylock(&stop_cpus_mutex)) 4301142d810STejun Heo return -EAGAIN; 4311142d810STejun Heo ret = __stop_cpus(cpumask, fn, arg); 4321142d810STejun Heo mutex_unlock(&stop_cpus_mutex); 4331142d810STejun Heo return ret; 4341142d810STejun Heo } 4351142d810STejun Heo 43614e568e7SThomas Gleixner static int cpu_stop_should_run(unsigned int cpu) 4371142d810STejun Heo { 43814e568e7SThomas Gleixner struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 43914e568e7SThomas Gleixner unsigned long flags; 44014e568e7SThomas Gleixner int run; 44114e568e7SThomas Gleixner 44214e568e7SThomas Gleixner spin_lock_irqsave(&stopper->lock, flags); 44314e568e7SThomas Gleixner run = !list_empty(&stopper->works); 44414e568e7SThomas Gleixner spin_unlock_irqrestore(&stopper->lock, flags); 44514e568e7SThomas Gleixner return run; 44614e568e7SThomas Gleixner } 44714e568e7SThomas Gleixner 44814e568e7SThomas Gleixner static void cpu_stopper_thread(unsigned int cpu) 44914e568e7SThomas Gleixner { 45014e568e7SThomas Gleixner struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 4511142d810STejun Heo struct cpu_stop_work *work; 4521142d810STejun Heo int ret; 4531142d810STejun Heo 4541142d810STejun Heo repeat: 4551142d810STejun Heo work = NULL; 4561142d810STejun Heo spin_lock_irq(&stopper->lock); 4571142d810STejun Heo if (!list_empty(&stopper->works)) { 4581142d810STejun Heo work = list_first_entry(&stopper->works, 4591142d810STejun Heo struct cpu_stop_work, list); 4601142d810STejun Heo list_del_init(&work->list); 4611142d810STejun Heo } 4621142d810STejun Heo spin_unlock_irq(&stopper->lock); 4631142d810STejun Heo 4641142d810STejun Heo if (work) { 4651142d810STejun Heo cpu_stop_fn_t fn = work->fn; 4661142d810STejun Heo void *arg = work->arg; 4671142d810STejun Heo struct cpu_stop_done *done = work->done; 468ca51c5a7SRakib Mullick char ksym_buf[KSYM_NAME_LEN] __maybe_unused; 4691142d810STejun Heo 4701142d810STejun Heo /* cpu stop callbacks are not allowed to sleep */ 4711142d810STejun Heo preempt_disable(); 4721142d810STejun Heo 4731142d810STejun Heo ret = fn(arg); 4741142d810STejun Heo if (ret) 4751142d810STejun Heo done->ret = ret; 4761142d810STejun Heo 4771142d810STejun Heo /* restore preemption and check it's still balanced */ 4781142d810STejun Heo preempt_enable(); 4791142d810STejun Heo WARN_ONCE(preempt_count(), 4801142d810STejun Heo "cpu_stop: %s(%p) leaked preempt count\n", 4811142d810STejun Heo kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, 4821142d810STejun Heo ksym_buf), arg); 4831142d810STejun Heo 4841142d810STejun Heo cpu_stop_signal_done(done, true); 4851142d810STejun Heo goto repeat; 4861142d810STejun Heo } 48714e568e7SThomas Gleixner } 4881142d810STejun Heo 48934f971f6SPeter Zijlstra extern void sched_set_stop_task(int cpu, struct task_struct *stop); 49034f971f6SPeter Zijlstra 49114e568e7SThomas Gleixner static void cpu_stop_create(unsigned int cpu) 4921142d810STejun Heo { 49314e568e7SThomas Gleixner sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu)); 49414e568e7SThomas Gleixner } 49514e568e7SThomas Gleixner 49614e568e7SThomas Gleixner static void cpu_stop_park(unsigned int cpu) 49714e568e7SThomas Gleixner { 4981142d810STejun Heo struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 4999c6f7e43SIngo Molnar struct cpu_stop_work *work; 50014e568e7SThomas Gleixner unsigned long flags; 5019c6f7e43SIngo Molnar 5021142d810STejun Heo /* drain remaining works */ 50314e568e7SThomas Gleixner spin_lock_irqsave(&stopper->lock, flags); 5041142d810STejun Heo list_for_each_entry(work, &stopper->works, list) 5051142d810STejun Heo cpu_stop_signal_done(work->done, false); 5061142d810STejun Heo stopper->enabled = false; 50714e568e7SThomas Gleixner spin_unlock_irqrestore(&stopper->lock, flags); 50814e568e7SThomas Gleixner } 50914e568e7SThomas Gleixner 51014e568e7SThomas Gleixner static void cpu_stop_unpark(unsigned int cpu) 51114e568e7SThomas Gleixner { 51214e568e7SThomas Gleixner struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 51314e568e7SThomas Gleixner 51414e568e7SThomas Gleixner spin_lock_irq(&stopper->lock); 51514e568e7SThomas Gleixner stopper->enabled = true; 5161142d810STejun Heo spin_unlock_irq(&stopper->lock); 5171142d810STejun Heo } 5181142d810STejun Heo 51914e568e7SThomas Gleixner static struct smp_hotplug_thread cpu_stop_threads = { 52014e568e7SThomas Gleixner .store = &cpu_stopper_task, 52114e568e7SThomas Gleixner .thread_should_run = cpu_stop_should_run, 52214e568e7SThomas Gleixner .thread_fn = cpu_stopper_thread, 52314e568e7SThomas Gleixner .thread_comm = "migration/%u", 52414e568e7SThomas Gleixner .create = cpu_stop_create, 52514e568e7SThomas Gleixner .setup = cpu_stop_unpark, 52614e568e7SThomas Gleixner .park = cpu_stop_park, 52746c498c2SThomas Gleixner .pre_unpark = cpu_stop_unpark, 52814e568e7SThomas Gleixner .selfparking = true, 5291142d810STejun Heo }; 5301142d810STejun Heo 5311142d810STejun Heo static int __init cpu_stop_init(void) 5321142d810STejun Heo { 5331142d810STejun Heo unsigned int cpu; 5341142d810STejun Heo 5351142d810STejun Heo for_each_possible_cpu(cpu) { 5361142d810STejun Heo struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 5371142d810STejun Heo 5381142d810STejun Heo spin_lock_init(&stopper->lock); 5391142d810STejun Heo INIT_LIST_HEAD(&stopper->works); 5401142d810STejun Heo } 5411142d810STejun Heo 54214e568e7SThomas Gleixner BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); 543f445027eSJeremy Fitzhardinge stop_machine_initialized = true; 5441142d810STejun Heo return 0; 5451142d810STejun Heo } 5461142d810STejun Heo early_initcall(cpu_stop_init); 5471da177e4SLinus Torvalds 548bbf1bb3eSTejun Heo #ifdef CONFIG_STOP_MACHINE 549bbf1bb3eSTejun Heo 55041c7bb95SRusty Russell int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 5511da177e4SLinus Torvalds { 5521be0bd77SPeter Zijlstra struct multi_stop_data msdata = { 5531be0bd77SPeter Zijlstra .fn = fn, 5541be0bd77SPeter Zijlstra .data = data, 5553fc1f1e2STejun Heo .num_threads = num_online_cpus(), 5561be0bd77SPeter Zijlstra .active_cpus = cpus, 5571be0bd77SPeter Zijlstra }; 5581da177e4SLinus Torvalds 559f445027eSJeremy Fitzhardinge if (!stop_machine_initialized) { 560f445027eSJeremy Fitzhardinge /* 561f445027eSJeremy Fitzhardinge * Handle the case where stop_machine() is called 562f445027eSJeremy Fitzhardinge * early in boot before stop_machine() has been 563f445027eSJeremy Fitzhardinge * initialized. 564f445027eSJeremy Fitzhardinge */ 565f445027eSJeremy Fitzhardinge unsigned long flags; 566f445027eSJeremy Fitzhardinge int ret; 567f445027eSJeremy Fitzhardinge 5681be0bd77SPeter Zijlstra WARN_ON_ONCE(msdata.num_threads != 1); 569f445027eSJeremy Fitzhardinge 570f445027eSJeremy Fitzhardinge local_irq_save(flags); 571f445027eSJeremy Fitzhardinge hard_irq_disable(); 572f445027eSJeremy Fitzhardinge ret = (*fn)(data); 573f445027eSJeremy Fitzhardinge local_irq_restore(flags); 574f445027eSJeremy Fitzhardinge 575f445027eSJeremy Fitzhardinge return ret; 576f445027eSJeremy Fitzhardinge } 577f445027eSJeremy Fitzhardinge 5783fc1f1e2STejun Heo /* Set the initial state and stop all online cpus. */ 5791be0bd77SPeter Zijlstra set_state(&msdata, MULTI_STOP_PREPARE); 5801be0bd77SPeter Zijlstra return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); 5811da177e4SLinus Torvalds } 5821da177e4SLinus Torvalds 58341c7bb95SRusty Russell int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 5841da177e4SLinus Torvalds { 5851da177e4SLinus Torvalds int ret; 5861da177e4SLinus Torvalds 5871da177e4SLinus Torvalds /* No CPUs can come up or down during this. */ 58886ef5c9aSGautham R Shenoy get_online_cpus(); 589eeec4fadSRusty Russell ret = __stop_machine(fn, data, cpus); 59086ef5c9aSGautham R Shenoy put_online_cpus(); 5911da177e4SLinus Torvalds return ret; 5921da177e4SLinus Torvalds } 593eeec4fadSRusty Russell EXPORT_SYMBOL_GPL(stop_machine); 594bbf1bb3eSTejun Heo 595f740e6cdSTejun Heo /** 596f740e6cdSTejun Heo * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU 597f740e6cdSTejun Heo * @fn: the function to run 598f740e6cdSTejun Heo * @data: the data ptr for the @fn() 599f740e6cdSTejun Heo * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 600f740e6cdSTejun Heo * 601f740e6cdSTejun Heo * This is identical to stop_machine() but can be called from a CPU which 602f740e6cdSTejun Heo * is not active. The local CPU is in the process of hotplug (so no other 603f740e6cdSTejun Heo * CPU hotplug can start) and not marked active and doesn't have enough 604f740e6cdSTejun Heo * context to sleep. 605f740e6cdSTejun Heo * 606f740e6cdSTejun Heo * This function provides stop_machine() functionality for such state by 607f740e6cdSTejun Heo * using busy-wait for synchronization and executing @fn directly for local 608f740e6cdSTejun Heo * CPU. 609f740e6cdSTejun Heo * 610f740e6cdSTejun Heo * CONTEXT: 611f740e6cdSTejun Heo * Local CPU is inactive. Temporarily stops all active CPUs. 612f740e6cdSTejun Heo * 613f740e6cdSTejun Heo * RETURNS: 614f740e6cdSTejun Heo * 0 if all executions of @fn returned 0, any non zero return value if any 615f740e6cdSTejun Heo * returned non zero. 616f740e6cdSTejun Heo */ 617f740e6cdSTejun Heo int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, 618f740e6cdSTejun Heo const struct cpumask *cpus) 619f740e6cdSTejun Heo { 6201be0bd77SPeter Zijlstra struct multi_stop_data msdata = { .fn = fn, .data = data, 621f740e6cdSTejun Heo .active_cpus = cpus }; 622f740e6cdSTejun Heo struct cpu_stop_done done; 623f740e6cdSTejun Heo int ret; 624f740e6cdSTejun Heo 625f740e6cdSTejun Heo /* Local CPU must be inactive and CPU hotplug in progress. */ 626f740e6cdSTejun Heo BUG_ON(cpu_active(raw_smp_processor_id())); 6271be0bd77SPeter Zijlstra msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ 628f740e6cdSTejun Heo 629f740e6cdSTejun Heo /* No proper task established and can't sleep - busy wait for lock. */ 630f740e6cdSTejun Heo while (!mutex_trylock(&stop_cpus_mutex)) 631f740e6cdSTejun Heo cpu_relax(); 632f740e6cdSTejun Heo 633f740e6cdSTejun Heo /* Schedule work on other CPUs and execute directly for local CPU */ 6341be0bd77SPeter Zijlstra set_state(&msdata, MULTI_STOP_PREPARE); 635f740e6cdSTejun Heo cpu_stop_init_done(&done, num_active_cpus()); 6361be0bd77SPeter Zijlstra queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, 637f740e6cdSTejun Heo &done); 6381be0bd77SPeter Zijlstra ret = multi_cpu_stop(&msdata); 639f740e6cdSTejun Heo 640f740e6cdSTejun Heo /* Busy wait for completion. */ 641f740e6cdSTejun Heo while (!completion_done(&done.completion)) 642f740e6cdSTejun Heo cpu_relax(); 643f740e6cdSTejun Heo 644f740e6cdSTejun Heo mutex_unlock(&stop_cpus_mutex); 645f740e6cdSTejun Heo return ret ?: done.ret; 646f740e6cdSTejun Heo } 647f740e6cdSTejun Heo 648bbf1bb3eSTejun Heo #endif /* CONFIG_STOP_MACHINE */ 649