11142d810STejun Heo /* 21142d810STejun Heo * kernel/stop_machine.c 31142d810STejun Heo * 41142d810STejun Heo * Copyright (C) 2008, 2005 IBM Corporation. 51142d810STejun Heo * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au 61142d810STejun Heo * Copyright (C) 2010 SUSE Linux Products GmbH 71142d810STejun Heo * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 81142d810STejun Heo * 91142d810STejun Heo * This file is released under the GPLv2 and any later version. 10e5582ca2SRusty Russell */ 111142d810STejun Heo #include <linux/completion.h> 121da177e4SLinus Torvalds #include <linux/cpu.h> 131142d810STejun Heo #include <linux/init.h> 14ee527cd3SPrarit Bhargava #include <linux/kthread.h> 159984de1aSPaul Gortmaker #include <linux/export.h> 161142d810STejun Heo #include <linux/percpu.h> 17ee527cd3SPrarit Bhargava #include <linux/sched.h> 18ee527cd3SPrarit Bhargava #include <linux/stop_machine.h> 19a12bb444SBenjamin Herrenschmidt #include <linux/interrupt.h> 201142d810STejun Heo #include <linux/kallsyms.h> 2114e568e7SThomas Gleixner #include <linux/smpboot.h> 2260063497SArun Sharma #include <linux/atomic.h> 237053ea1aSRik van Riel #include <linux/lglock.h> 241142d810STejun Heo 251142d810STejun Heo /* 261142d810STejun Heo * Structure to determine completion condition and record errors. May 271142d810STejun Heo * be shared by works on different cpus. 281142d810STejun Heo */ 291142d810STejun Heo struct cpu_stop_done { 301142d810STejun Heo atomic_t nr_todo; /* nr left to execute */ 311142d810STejun Heo bool executed; /* actually executed? */ 321142d810STejun Heo int ret; /* collected return value */ 331142d810STejun Heo struct completion completion; /* fired if nr_todo reaches 0 */ 341142d810STejun Heo }; 351142d810STejun Heo 361142d810STejun Heo /* the actual stopper, one per every possible cpu, enabled on online cpus */ 371142d810STejun Heo struct cpu_stopper { 3802cb7aa9SOleg Nesterov struct task_struct *thread; 3902cb7aa9SOleg Nesterov 401142d810STejun Heo spinlock_t lock; 41878ae127SRichard Kennedy bool enabled; /* is this stopper enabled? */ 421142d810STejun Heo struct list_head works; /* list of pending works */ 4302cb7aa9SOleg Nesterov 4402cb7aa9SOleg Nesterov struct cpu_stop_work stop_work; /* for stop_cpus */ 451142d810STejun Heo }; 461142d810STejun Heo 471142d810STejun Heo static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); 48f445027eSJeremy Fitzhardinge static bool stop_machine_initialized = false; 491142d810STejun Heo 507053ea1aSRik van Riel /* 517053ea1aSRik van Riel * Avoids a race between stop_two_cpus and global stop_cpus, where 527053ea1aSRik van Riel * the stoppers could get queued up in reverse order, leading to 537053ea1aSRik van Riel * system deadlock. Using an lglock means stop_two_cpus remains 547053ea1aSRik van Riel * relatively cheap. 557053ea1aSRik van Riel */ 567053ea1aSRik van Riel DEFINE_STATIC_LGLOCK(stop_cpus_lock); 577053ea1aSRik van Riel 581142d810STejun Heo static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) 591142d810STejun Heo { 601142d810STejun Heo memset(done, 0, sizeof(*done)); 611142d810STejun Heo atomic_set(&done->nr_todo, nr_todo); 621142d810STejun Heo init_completion(&done->completion); 631142d810STejun Heo } 641142d810STejun Heo 651142d810STejun Heo /* signal completion unless @done is NULL */ 661142d810STejun Heo static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed) 671142d810STejun Heo { 681142d810STejun Heo if (done) { 691142d810STejun Heo if (executed) 701142d810STejun Heo done->executed = true; 711142d810STejun Heo if (atomic_dec_and_test(&done->nr_todo)) 721142d810STejun Heo complete(&done->completion); 731142d810STejun Heo } 741142d810STejun Heo } 751142d810STejun Heo 76*5caa1c08SOleg Nesterov static void __cpu_stop_queue_work(struct cpu_stopper *stopper, 77*5caa1c08SOleg Nesterov struct cpu_stop_work *work) 78*5caa1c08SOleg Nesterov { 79*5caa1c08SOleg Nesterov list_add_tail(&work->list, &stopper->works); 80*5caa1c08SOleg Nesterov wake_up_process(stopper->thread); 81*5caa1c08SOleg Nesterov } 82*5caa1c08SOleg Nesterov 831142d810STejun Heo /* queue @work to @stopper. if offline, @work is completed immediately */ 84860a0ffaSThomas Gleixner static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) 851142d810STejun Heo { 86860a0ffaSThomas Gleixner struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 871142d810STejun Heo unsigned long flags; 881142d810STejun Heo 891142d810STejun Heo spin_lock_irqsave(&stopper->lock, flags); 90*5caa1c08SOleg Nesterov if (stopper->enabled) 91*5caa1c08SOleg Nesterov __cpu_stop_queue_work(stopper, work); 92*5caa1c08SOleg Nesterov else 931142d810STejun Heo cpu_stop_signal_done(work->done, false); 941142d810STejun Heo spin_unlock_irqrestore(&stopper->lock, flags); 951142d810STejun Heo } 961142d810STejun Heo 971142d810STejun Heo /** 981142d810STejun Heo * stop_one_cpu - stop a cpu 991142d810STejun Heo * @cpu: cpu to stop 1001142d810STejun Heo * @fn: function to execute 1011142d810STejun Heo * @arg: argument to @fn 1021142d810STejun Heo * 1031142d810STejun Heo * Execute @fn(@arg) on @cpu. @fn is run in a process context with 1041142d810STejun Heo * the highest priority preempting any task on the cpu and 1051142d810STejun Heo * monopolizing it. This function returns after the execution is 1061142d810STejun Heo * complete. 1071142d810STejun Heo * 1081142d810STejun Heo * This function doesn't guarantee @cpu stays online till @fn 1091142d810STejun Heo * completes. If @cpu goes down in the middle, execution may happen 1101142d810STejun Heo * partially or fully on different cpus. @fn should either be ready 1111142d810STejun Heo * for that or the caller should ensure that @cpu stays online until 1121142d810STejun Heo * this function completes. 1131142d810STejun Heo * 1141142d810STejun Heo * CONTEXT: 1151142d810STejun Heo * Might sleep. 1161142d810STejun Heo * 1171142d810STejun Heo * RETURNS: 1181142d810STejun Heo * -ENOENT if @fn(@arg) was not executed because @cpu was offline; 1191142d810STejun Heo * otherwise, the return value of @fn. 1201142d810STejun Heo */ 1211142d810STejun Heo int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) 1221142d810STejun Heo { 1231142d810STejun Heo struct cpu_stop_done done; 1241142d810STejun Heo struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; 1251142d810STejun Heo 1261142d810STejun Heo cpu_stop_init_done(&done, 1); 127860a0ffaSThomas Gleixner cpu_stop_queue_work(cpu, &work); 1281142d810STejun Heo wait_for_completion(&done.completion); 1291142d810STejun Heo return done.executed ? done.ret : -ENOENT; 1301142d810STejun Heo } 1311142d810STejun Heo 1321be0bd77SPeter Zijlstra /* This controls the threads on each CPU. */ 1331be0bd77SPeter Zijlstra enum multi_stop_state { 1341be0bd77SPeter Zijlstra /* Dummy starting state for thread. */ 1351be0bd77SPeter Zijlstra MULTI_STOP_NONE, 1361be0bd77SPeter Zijlstra /* Awaiting everyone to be scheduled. */ 1371be0bd77SPeter Zijlstra MULTI_STOP_PREPARE, 1381be0bd77SPeter Zijlstra /* Disable interrupts. */ 1391be0bd77SPeter Zijlstra MULTI_STOP_DISABLE_IRQ, 1401be0bd77SPeter Zijlstra /* Run the function */ 1411be0bd77SPeter Zijlstra MULTI_STOP_RUN, 1421be0bd77SPeter Zijlstra /* Exit */ 1431be0bd77SPeter Zijlstra MULTI_STOP_EXIT, 1441be0bd77SPeter Zijlstra }; 1451be0bd77SPeter Zijlstra 1461be0bd77SPeter Zijlstra struct multi_stop_data { 1479a301f22SOleg Nesterov cpu_stop_fn_t fn; 1481be0bd77SPeter Zijlstra void *data; 1491be0bd77SPeter Zijlstra /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ 1501be0bd77SPeter Zijlstra unsigned int num_threads; 1511be0bd77SPeter Zijlstra const struct cpumask *active_cpus; 1521be0bd77SPeter Zijlstra 1531be0bd77SPeter Zijlstra enum multi_stop_state state; 1541be0bd77SPeter Zijlstra atomic_t thread_ack; 1551be0bd77SPeter Zijlstra }; 1561be0bd77SPeter Zijlstra 1571be0bd77SPeter Zijlstra static void set_state(struct multi_stop_data *msdata, 1581be0bd77SPeter Zijlstra enum multi_stop_state newstate) 1591be0bd77SPeter Zijlstra { 1601be0bd77SPeter Zijlstra /* Reset ack counter. */ 1611be0bd77SPeter Zijlstra atomic_set(&msdata->thread_ack, msdata->num_threads); 1621be0bd77SPeter Zijlstra smp_wmb(); 1631be0bd77SPeter Zijlstra msdata->state = newstate; 1641be0bd77SPeter Zijlstra } 1651be0bd77SPeter Zijlstra 1661be0bd77SPeter Zijlstra /* Last one to ack a state moves to the next state. */ 1671be0bd77SPeter Zijlstra static void ack_state(struct multi_stop_data *msdata) 1681be0bd77SPeter Zijlstra { 1691be0bd77SPeter Zijlstra if (atomic_dec_and_test(&msdata->thread_ack)) 1701be0bd77SPeter Zijlstra set_state(msdata, msdata->state + 1); 1711be0bd77SPeter Zijlstra } 1721be0bd77SPeter Zijlstra 1731be0bd77SPeter Zijlstra /* This is the cpu_stop function which stops the CPU. */ 1741be0bd77SPeter Zijlstra static int multi_cpu_stop(void *data) 1751be0bd77SPeter Zijlstra { 1761be0bd77SPeter Zijlstra struct multi_stop_data *msdata = data; 1771be0bd77SPeter Zijlstra enum multi_stop_state curstate = MULTI_STOP_NONE; 1781be0bd77SPeter Zijlstra int cpu = smp_processor_id(), err = 0; 1791be0bd77SPeter Zijlstra unsigned long flags; 1801be0bd77SPeter Zijlstra bool is_active; 1811be0bd77SPeter Zijlstra 1821be0bd77SPeter Zijlstra /* 1831be0bd77SPeter Zijlstra * When called from stop_machine_from_inactive_cpu(), irq might 1841be0bd77SPeter Zijlstra * already be disabled. Save the state and restore it on exit. 1851be0bd77SPeter Zijlstra */ 1861be0bd77SPeter Zijlstra local_save_flags(flags); 1871be0bd77SPeter Zijlstra 1881be0bd77SPeter Zijlstra if (!msdata->active_cpus) 1891be0bd77SPeter Zijlstra is_active = cpu == cpumask_first(cpu_online_mask); 1901be0bd77SPeter Zijlstra else 1911be0bd77SPeter Zijlstra is_active = cpumask_test_cpu(cpu, msdata->active_cpus); 1921be0bd77SPeter Zijlstra 1931be0bd77SPeter Zijlstra /* Simple state machine */ 1941be0bd77SPeter Zijlstra do { 1951be0bd77SPeter Zijlstra /* Chill out and ensure we re-read multi_stop_state. */ 1961be0bd77SPeter Zijlstra cpu_relax(); 1971be0bd77SPeter Zijlstra if (msdata->state != curstate) { 1981be0bd77SPeter Zijlstra curstate = msdata->state; 1991be0bd77SPeter Zijlstra switch (curstate) { 2001be0bd77SPeter Zijlstra case MULTI_STOP_DISABLE_IRQ: 2011be0bd77SPeter Zijlstra local_irq_disable(); 2021be0bd77SPeter Zijlstra hard_irq_disable(); 2031be0bd77SPeter Zijlstra break; 2041be0bd77SPeter Zijlstra case MULTI_STOP_RUN: 2051be0bd77SPeter Zijlstra if (is_active) 2061be0bd77SPeter Zijlstra err = msdata->fn(msdata->data); 2071be0bd77SPeter Zijlstra break; 2081be0bd77SPeter Zijlstra default: 2091be0bd77SPeter Zijlstra break; 2101be0bd77SPeter Zijlstra } 2111be0bd77SPeter Zijlstra ack_state(msdata); 2121be0bd77SPeter Zijlstra } 2131be0bd77SPeter Zijlstra } while (curstate != MULTI_STOP_EXIT); 2141be0bd77SPeter Zijlstra 2151be0bd77SPeter Zijlstra local_irq_restore(flags); 2161be0bd77SPeter Zijlstra return err; 2171be0bd77SPeter Zijlstra } 2181be0bd77SPeter Zijlstra 219*5caa1c08SOleg Nesterov static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, 220*5caa1c08SOleg Nesterov int cpu2, struct cpu_stop_work *work2) 221*5caa1c08SOleg Nesterov { 222*5caa1c08SOleg Nesterov lg_double_lock(&stop_cpus_lock, cpu1, cpu2); 223*5caa1c08SOleg Nesterov cpu_stop_queue_work(cpu1, work1); 224*5caa1c08SOleg Nesterov cpu_stop_queue_work(cpu2, work2); 225*5caa1c08SOleg Nesterov lg_double_unlock(&stop_cpus_lock, cpu1, cpu2); 226*5caa1c08SOleg Nesterov 227*5caa1c08SOleg Nesterov return 0; 228*5caa1c08SOleg Nesterov } 2291be0bd77SPeter Zijlstra /** 2301be0bd77SPeter Zijlstra * stop_two_cpus - stops two cpus 2311be0bd77SPeter Zijlstra * @cpu1: the cpu to stop 2321be0bd77SPeter Zijlstra * @cpu2: the other cpu to stop 2331be0bd77SPeter Zijlstra * @fn: function to execute 2341be0bd77SPeter Zijlstra * @arg: argument to @fn 2351be0bd77SPeter Zijlstra * 2361be0bd77SPeter Zijlstra * Stops both the current and specified CPU and runs @fn on one of them. 2371be0bd77SPeter Zijlstra * 2381be0bd77SPeter Zijlstra * returns when both are completed. 2391be0bd77SPeter Zijlstra */ 2401be0bd77SPeter Zijlstra int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) 2411be0bd77SPeter Zijlstra { 2421be0bd77SPeter Zijlstra struct cpu_stop_done done; 2431be0bd77SPeter Zijlstra struct cpu_stop_work work1, work2; 2446acce3efSPeter Zijlstra struct multi_stop_data msdata; 2456acce3efSPeter Zijlstra 2466acce3efSPeter Zijlstra preempt_disable(); 2476acce3efSPeter Zijlstra msdata = (struct multi_stop_data){ 2481be0bd77SPeter Zijlstra .fn = fn, 2491be0bd77SPeter Zijlstra .data = arg, 2501be0bd77SPeter Zijlstra .num_threads = 2, 2511be0bd77SPeter Zijlstra .active_cpus = cpumask_of(cpu1), 2521be0bd77SPeter Zijlstra }; 2531be0bd77SPeter Zijlstra 2541be0bd77SPeter Zijlstra work1 = work2 = (struct cpu_stop_work){ 2551be0bd77SPeter Zijlstra .fn = multi_cpu_stop, 2561be0bd77SPeter Zijlstra .arg = &msdata, 2571be0bd77SPeter Zijlstra .done = &done 2581be0bd77SPeter Zijlstra }; 2591be0bd77SPeter Zijlstra 2601be0bd77SPeter Zijlstra cpu_stop_init_done(&done, 2); 2611be0bd77SPeter Zijlstra set_state(&msdata, MULTI_STOP_PREPARE); 2621be0bd77SPeter Zijlstra 2631be0bd77SPeter Zijlstra /* 2646acce3efSPeter Zijlstra * If we observe both CPUs active we know _cpu_down() cannot yet have 2656acce3efSPeter Zijlstra * queued its stop_machine works and therefore ours will get executed 2666acce3efSPeter Zijlstra * first. Or its not either one of our CPUs that's getting unplugged, 2676acce3efSPeter Zijlstra * in which case we don't care. 2686acce3efSPeter Zijlstra * 2696acce3efSPeter Zijlstra * This relies on the stopper workqueues to be FIFO. 2706acce3efSPeter Zijlstra */ 2716acce3efSPeter Zijlstra if (!cpu_active(cpu1) || !cpu_active(cpu2)) { 2726acce3efSPeter Zijlstra preempt_enable(); 2736acce3efSPeter Zijlstra return -ENOENT; 2746acce3efSPeter Zijlstra } 2756acce3efSPeter Zijlstra 276*5caa1c08SOleg Nesterov if (cpu1 > cpu2) 277*5caa1c08SOleg Nesterov swap(cpu1, cpu2); 278*5caa1c08SOleg Nesterov if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) { 279*5caa1c08SOleg Nesterov preempt_enable(); 280*5caa1c08SOleg Nesterov return -ENOENT; 281*5caa1c08SOleg Nesterov } 282b17718d0SPeter Zijlstra 2836acce3efSPeter Zijlstra preempt_enable(); 2841be0bd77SPeter Zijlstra 2851be0bd77SPeter Zijlstra wait_for_completion(&done.completion); 2866acce3efSPeter Zijlstra 2871be0bd77SPeter Zijlstra return done.executed ? done.ret : -ENOENT; 2881be0bd77SPeter Zijlstra } 2891be0bd77SPeter Zijlstra 2901142d810STejun Heo /** 2911142d810STejun Heo * stop_one_cpu_nowait - stop a cpu but don't wait for completion 2921142d810STejun Heo * @cpu: cpu to stop 2931142d810STejun Heo * @fn: function to execute 2941142d810STejun Heo * @arg: argument to @fn 295cf250040SFabian Frederick * @work_buf: pointer to cpu_stop_work structure 2961142d810STejun Heo * 2971142d810STejun Heo * Similar to stop_one_cpu() but doesn't wait for completion. The 2981142d810STejun Heo * caller is responsible for ensuring @work_buf is currently unused 2991142d810STejun Heo * and will remain untouched until stopper starts executing @fn. 3001142d810STejun Heo * 3011142d810STejun Heo * CONTEXT: 3021142d810STejun Heo * Don't care. 3031142d810STejun Heo */ 3041142d810STejun Heo void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, 3051142d810STejun Heo struct cpu_stop_work *work_buf) 3061142d810STejun Heo { 3071142d810STejun Heo *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; 308860a0ffaSThomas Gleixner cpu_stop_queue_work(cpu, work_buf); 3091142d810STejun Heo } 3101142d810STejun Heo 3111142d810STejun Heo /* static data for stop_cpus */ 312192d8857SSuresh Siddha static DEFINE_MUTEX(stop_cpus_mutex); 3131142d810STejun Heo 314fd7355baSTejun Heo static void queue_stop_cpus_work(const struct cpumask *cpumask, 315fd7355baSTejun Heo cpu_stop_fn_t fn, void *arg, 316fd7355baSTejun Heo struct cpu_stop_done *done) 3171142d810STejun Heo { 3181142d810STejun Heo struct cpu_stop_work *work; 3191142d810STejun Heo unsigned int cpu; 3201142d810STejun Heo 3211142d810STejun Heo /* 3221142d810STejun Heo * Disable preemption while queueing to avoid getting 3231142d810STejun Heo * preempted by a stopper which might wait for other stoppers 3241142d810STejun Heo * to enter @fn which can lead to deadlock. 3251142d810STejun Heo */ 3267053ea1aSRik van Riel lg_global_lock(&stop_cpus_lock); 327b377c2a0SOleg Nesterov for_each_cpu(cpu, cpumask) { 328b377c2a0SOleg Nesterov work = &per_cpu(cpu_stopper.stop_work, cpu); 329b377c2a0SOleg Nesterov work->fn = fn; 330b377c2a0SOleg Nesterov work->arg = arg; 331b377c2a0SOleg Nesterov work->done = done; 332b377c2a0SOleg Nesterov cpu_stop_queue_work(cpu, work); 333b377c2a0SOleg Nesterov } 3347053ea1aSRik van Riel lg_global_unlock(&stop_cpus_lock); 335fd7355baSTejun Heo } 3361142d810STejun Heo 337fd7355baSTejun Heo static int __stop_cpus(const struct cpumask *cpumask, 338fd7355baSTejun Heo cpu_stop_fn_t fn, void *arg) 339fd7355baSTejun Heo { 340fd7355baSTejun Heo struct cpu_stop_done done; 341fd7355baSTejun Heo 342fd7355baSTejun Heo cpu_stop_init_done(&done, cpumask_weight(cpumask)); 343fd7355baSTejun Heo queue_stop_cpus_work(cpumask, fn, arg, &done); 3441142d810STejun Heo wait_for_completion(&done.completion); 3451142d810STejun Heo return done.executed ? done.ret : -ENOENT; 3461142d810STejun Heo } 3471142d810STejun Heo 3481142d810STejun Heo /** 3491142d810STejun Heo * stop_cpus - stop multiple cpus 3501142d810STejun Heo * @cpumask: cpus to stop 3511142d810STejun Heo * @fn: function to execute 3521142d810STejun Heo * @arg: argument to @fn 3531142d810STejun Heo * 3541142d810STejun Heo * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, 3551142d810STejun Heo * @fn is run in a process context with the highest priority 3561142d810STejun Heo * preempting any task on the cpu and monopolizing it. This function 3571142d810STejun Heo * returns after all executions are complete. 3581142d810STejun Heo * 3591142d810STejun Heo * This function doesn't guarantee the cpus in @cpumask stay online 3601142d810STejun Heo * till @fn completes. If some cpus go down in the middle, execution 3611142d810STejun Heo * on the cpu may happen partially or fully on different cpus. @fn 3621142d810STejun Heo * should either be ready for that or the caller should ensure that 3631142d810STejun Heo * the cpus stay online until this function completes. 3641142d810STejun Heo * 3651142d810STejun Heo * All stop_cpus() calls are serialized making it safe for @fn to wait 3661142d810STejun Heo * for all cpus to start executing it. 3671142d810STejun Heo * 3681142d810STejun Heo * CONTEXT: 3691142d810STejun Heo * Might sleep. 3701142d810STejun Heo * 3711142d810STejun Heo * RETURNS: 3721142d810STejun Heo * -ENOENT if @fn(@arg) was not executed at all because all cpus in 3731142d810STejun Heo * @cpumask were offline; otherwise, 0 if all executions of @fn 3741142d810STejun Heo * returned 0, any non zero return value if any returned non zero. 3751142d810STejun Heo */ 3761142d810STejun Heo int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) 3771142d810STejun Heo { 3781142d810STejun Heo int ret; 3791142d810STejun Heo 3801142d810STejun Heo /* static works are used, process one request at a time */ 3811142d810STejun Heo mutex_lock(&stop_cpus_mutex); 3821142d810STejun Heo ret = __stop_cpus(cpumask, fn, arg); 3831142d810STejun Heo mutex_unlock(&stop_cpus_mutex); 3841142d810STejun Heo return ret; 3851142d810STejun Heo } 3861142d810STejun Heo 3871142d810STejun Heo /** 3881142d810STejun Heo * try_stop_cpus - try to stop multiple cpus 3891142d810STejun Heo * @cpumask: cpus to stop 3901142d810STejun Heo * @fn: function to execute 3911142d810STejun Heo * @arg: argument to @fn 3921142d810STejun Heo * 3931142d810STejun Heo * Identical to stop_cpus() except that it fails with -EAGAIN if 3941142d810STejun Heo * someone else is already using the facility. 3951142d810STejun Heo * 3961142d810STejun Heo * CONTEXT: 3971142d810STejun Heo * Might sleep. 3981142d810STejun Heo * 3991142d810STejun Heo * RETURNS: 4001142d810STejun Heo * -EAGAIN if someone else is already stopping cpus, -ENOENT if 4011142d810STejun Heo * @fn(@arg) was not executed at all because all cpus in @cpumask were 4021142d810STejun Heo * offline; otherwise, 0 if all executions of @fn returned 0, any non 4031142d810STejun Heo * zero return value if any returned non zero. 4041142d810STejun Heo */ 4051142d810STejun Heo int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) 4061142d810STejun Heo { 4071142d810STejun Heo int ret; 4081142d810STejun Heo 4091142d810STejun Heo /* static works are used, process one request at a time */ 4101142d810STejun Heo if (!mutex_trylock(&stop_cpus_mutex)) 4111142d810STejun Heo return -EAGAIN; 4121142d810STejun Heo ret = __stop_cpus(cpumask, fn, arg); 4131142d810STejun Heo mutex_unlock(&stop_cpus_mutex); 4141142d810STejun Heo return ret; 4151142d810STejun Heo } 4161142d810STejun Heo 41714e568e7SThomas Gleixner static int cpu_stop_should_run(unsigned int cpu) 4181142d810STejun Heo { 41914e568e7SThomas Gleixner struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 42014e568e7SThomas Gleixner unsigned long flags; 42114e568e7SThomas Gleixner int run; 42214e568e7SThomas Gleixner 42314e568e7SThomas Gleixner spin_lock_irqsave(&stopper->lock, flags); 42414e568e7SThomas Gleixner run = !list_empty(&stopper->works); 42514e568e7SThomas Gleixner spin_unlock_irqrestore(&stopper->lock, flags); 42614e568e7SThomas Gleixner return run; 42714e568e7SThomas Gleixner } 42814e568e7SThomas Gleixner 42914e568e7SThomas Gleixner static void cpu_stopper_thread(unsigned int cpu) 43014e568e7SThomas Gleixner { 43114e568e7SThomas Gleixner struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 4321142d810STejun Heo struct cpu_stop_work *work; 4331142d810STejun Heo int ret; 4341142d810STejun Heo 4351142d810STejun Heo repeat: 4361142d810STejun Heo work = NULL; 4371142d810STejun Heo spin_lock_irq(&stopper->lock); 4381142d810STejun Heo if (!list_empty(&stopper->works)) { 4391142d810STejun Heo work = list_first_entry(&stopper->works, 4401142d810STejun Heo struct cpu_stop_work, list); 4411142d810STejun Heo list_del_init(&work->list); 4421142d810STejun Heo } 4431142d810STejun Heo spin_unlock_irq(&stopper->lock); 4441142d810STejun Heo 4451142d810STejun Heo if (work) { 4461142d810STejun Heo cpu_stop_fn_t fn = work->fn; 4471142d810STejun Heo void *arg = work->arg; 4481142d810STejun Heo struct cpu_stop_done *done = work->done; 449ca51c5a7SRakib Mullick char ksym_buf[KSYM_NAME_LEN] __maybe_unused; 4501142d810STejun Heo 4511142d810STejun Heo /* cpu stop callbacks are not allowed to sleep */ 4521142d810STejun Heo preempt_disable(); 4531142d810STejun Heo 4541142d810STejun Heo ret = fn(arg); 4551142d810STejun Heo if (ret) 4561142d810STejun Heo done->ret = ret; 4571142d810STejun Heo 4581142d810STejun Heo /* restore preemption and check it's still balanced */ 4591142d810STejun Heo preempt_enable(); 4601142d810STejun Heo WARN_ONCE(preempt_count(), 4611142d810STejun Heo "cpu_stop: %s(%p) leaked preempt count\n", 4621142d810STejun Heo kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, 4631142d810STejun Heo ksym_buf), arg); 4641142d810STejun Heo 4651142d810STejun Heo cpu_stop_signal_done(done, true); 4661142d810STejun Heo goto repeat; 4671142d810STejun Heo } 46814e568e7SThomas Gleixner } 4691142d810STejun Heo 470233e7f26SOleg Nesterov void stop_machine_park(int cpu) 471233e7f26SOleg Nesterov { 472233e7f26SOleg Nesterov struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 473233e7f26SOleg Nesterov /* 474233e7f26SOleg Nesterov * Lockless. cpu_stopper_thread() will take stopper->lock and flush 475233e7f26SOleg Nesterov * the pending works before it parks, until then it is fine to queue 476233e7f26SOleg Nesterov * the new works. 477233e7f26SOleg Nesterov */ 478233e7f26SOleg Nesterov stopper->enabled = false; 479233e7f26SOleg Nesterov kthread_park(stopper->thread); 480233e7f26SOleg Nesterov } 481233e7f26SOleg Nesterov 48234f971f6SPeter Zijlstra extern void sched_set_stop_task(int cpu, struct task_struct *stop); 48334f971f6SPeter Zijlstra 48414e568e7SThomas Gleixner static void cpu_stop_create(unsigned int cpu) 4851142d810STejun Heo { 48602cb7aa9SOleg Nesterov sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); 48714e568e7SThomas Gleixner } 48814e568e7SThomas Gleixner 48914e568e7SThomas Gleixner static void cpu_stop_park(unsigned int cpu) 49014e568e7SThomas Gleixner { 4911142d810STejun Heo struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 4929c6f7e43SIngo Molnar 493233e7f26SOleg Nesterov WARN_ON(!list_empty(&stopper->works)); 49414e568e7SThomas Gleixner } 49514e568e7SThomas Gleixner 49614e568e7SThomas Gleixner static void cpu_stop_unpark(unsigned int cpu) 49714e568e7SThomas Gleixner { 49814e568e7SThomas Gleixner struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 49914e568e7SThomas Gleixner 50014e568e7SThomas Gleixner spin_lock_irq(&stopper->lock); 50114e568e7SThomas Gleixner stopper->enabled = true; 5021142d810STejun Heo spin_unlock_irq(&stopper->lock); 5031142d810STejun Heo } 5041142d810STejun Heo 50514e568e7SThomas Gleixner static struct smp_hotplug_thread cpu_stop_threads = { 50602cb7aa9SOleg Nesterov .store = &cpu_stopper.thread, 50714e568e7SThomas Gleixner .thread_should_run = cpu_stop_should_run, 50814e568e7SThomas Gleixner .thread_fn = cpu_stopper_thread, 50914e568e7SThomas Gleixner .thread_comm = "migration/%u", 51014e568e7SThomas Gleixner .create = cpu_stop_create, 51114e568e7SThomas Gleixner .setup = cpu_stop_unpark, 51214e568e7SThomas Gleixner .park = cpu_stop_park, 51346c498c2SThomas Gleixner .pre_unpark = cpu_stop_unpark, 51414e568e7SThomas Gleixner .selfparking = true, 5151142d810STejun Heo }; 5161142d810STejun Heo 5171142d810STejun Heo static int __init cpu_stop_init(void) 5181142d810STejun Heo { 5191142d810STejun Heo unsigned int cpu; 5201142d810STejun Heo 5211142d810STejun Heo for_each_possible_cpu(cpu) { 5221142d810STejun Heo struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 5231142d810STejun Heo 5241142d810STejun Heo spin_lock_init(&stopper->lock); 5251142d810STejun Heo INIT_LIST_HEAD(&stopper->works); 5261142d810STejun Heo } 5271142d810STejun Heo 52814e568e7SThomas Gleixner BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); 529f445027eSJeremy Fitzhardinge stop_machine_initialized = true; 5301142d810STejun Heo return 0; 5311142d810STejun Heo } 5321142d810STejun Heo early_initcall(cpu_stop_init); 5331da177e4SLinus Torvalds 534bbf1bb3eSTejun Heo #ifdef CONFIG_STOP_MACHINE 535bbf1bb3eSTejun Heo 5369a301f22SOleg Nesterov static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) 5371da177e4SLinus Torvalds { 5381be0bd77SPeter Zijlstra struct multi_stop_data msdata = { 5391be0bd77SPeter Zijlstra .fn = fn, 5401be0bd77SPeter Zijlstra .data = data, 5413fc1f1e2STejun Heo .num_threads = num_online_cpus(), 5421be0bd77SPeter Zijlstra .active_cpus = cpus, 5431be0bd77SPeter Zijlstra }; 5441da177e4SLinus Torvalds 545f445027eSJeremy Fitzhardinge if (!stop_machine_initialized) { 546f445027eSJeremy Fitzhardinge /* 547f445027eSJeremy Fitzhardinge * Handle the case where stop_machine() is called 548f445027eSJeremy Fitzhardinge * early in boot before stop_machine() has been 549f445027eSJeremy Fitzhardinge * initialized. 550f445027eSJeremy Fitzhardinge */ 551f445027eSJeremy Fitzhardinge unsigned long flags; 552f445027eSJeremy Fitzhardinge int ret; 553f445027eSJeremy Fitzhardinge 5541be0bd77SPeter Zijlstra WARN_ON_ONCE(msdata.num_threads != 1); 555f445027eSJeremy Fitzhardinge 556f445027eSJeremy Fitzhardinge local_irq_save(flags); 557f445027eSJeremy Fitzhardinge hard_irq_disable(); 558f445027eSJeremy Fitzhardinge ret = (*fn)(data); 559f445027eSJeremy Fitzhardinge local_irq_restore(flags); 560f445027eSJeremy Fitzhardinge 561f445027eSJeremy Fitzhardinge return ret; 562f445027eSJeremy Fitzhardinge } 563f445027eSJeremy Fitzhardinge 5643fc1f1e2STejun Heo /* Set the initial state and stop all online cpus. */ 5651be0bd77SPeter Zijlstra set_state(&msdata, MULTI_STOP_PREPARE); 5661be0bd77SPeter Zijlstra return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); 5671da177e4SLinus Torvalds } 5681da177e4SLinus Torvalds 5699a301f22SOleg Nesterov int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) 5701da177e4SLinus Torvalds { 5711da177e4SLinus Torvalds int ret; 5721da177e4SLinus Torvalds 5731da177e4SLinus Torvalds /* No CPUs can come up or down during this. */ 57486ef5c9aSGautham R Shenoy get_online_cpus(); 575eeec4fadSRusty Russell ret = __stop_machine(fn, data, cpus); 57686ef5c9aSGautham R Shenoy put_online_cpus(); 5771da177e4SLinus Torvalds return ret; 5781da177e4SLinus Torvalds } 579eeec4fadSRusty Russell EXPORT_SYMBOL_GPL(stop_machine); 580bbf1bb3eSTejun Heo 581f740e6cdSTejun Heo /** 582f740e6cdSTejun Heo * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU 583f740e6cdSTejun Heo * @fn: the function to run 584f740e6cdSTejun Heo * @data: the data ptr for the @fn() 585f740e6cdSTejun Heo * @cpus: the cpus to run the @fn() on (NULL = any online cpu) 586f740e6cdSTejun Heo * 587f740e6cdSTejun Heo * This is identical to stop_machine() but can be called from a CPU which 588f740e6cdSTejun Heo * is not active. The local CPU is in the process of hotplug (so no other 589f740e6cdSTejun Heo * CPU hotplug can start) and not marked active and doesn't have enough 590f740e6cdSTejun Heo * context to sleep. 591f740e6cdSTejun Heo * 592f740e6cdSTejun Heo * This function provides stop_machine() functionality for such state by 593f740e6cdSTejun Heo * using busy-wait for synchronization and executing @fn directly for local 594f740e6cdSTejun Heo * CPU. 595f740e6cdSTejun Heo * 596f740e6cdSTejun Heo * CONTEXT: 597f740e6cdSTejun Heo * Local CPU is inactive. Temporarily stops all active CPUs. 598f740e6cdSTejun Heo * 599f740e6cdSTejun Heo * RETURNS: 600f740e6cdSTejun Heo * 0 if all executions of @fn returned 0, any non zero return value if any 601f740e6cdSTejun Heo * returned non zero. 602f740e6cdSTejun Heo */ 6039a301f22SOleg Nesterov int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, 604f740e6cdSTejun Heo const struct cpumask *cpus) 605f740e6cdSTejun Heo { 6061be0bd77SPeter Zijlstra struct multi_stop_data msdata = { .fn = fn, .data = data, 607f740e6cdSTejun Heo .active_cpus = cpus }; 608f740e6cdSTejun Heo struct cpu_stop_done done; 609f740e6cdSTejun Heo int ret; 610f740e6cdSTejun Heo 611f740e6cdSTejun Heo /* Local CPU must be inactive and CPU hotplug in progress. */ 612f740e6cdSTejun Heo BUG_ON(cpu_active(raw_smp_processor_id())); 6131be0bd77SPeter Zijlstra msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ 614f740e6cdSTejun Heo 615f740e6cdSTejun Heo /* No proper task established and can't sleep - busy wait for lock. */ 616f740e6cdSTejun Heo while (!mutex_trylock(&stop_cpus_mutex)) 617f740e6cdSTejun Heo cpu_relax(); 618f740e6cdSTejun Heo 619f740e6cdSTejun Heo /* Schedule work on other CPUs and execute directly for local CPU */ 6201be0bd77SPeter Zijlstra set_state(&msdata, MULTI_STOP_PREPARE); 621f740e6cdSTejun Heo cpu_stop_init_done(&done, num_active_cpus()); 6221be0bd77SPeter Zijlstra queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, 623f740e6cdSTejun Heo &done); 6241be0bd77SPeter Zijlstra ret = multi_cpu_stop(&msdata); 625f740e6cdSTejun Heo 626f740e6cdSTejun Heo /* Busy wait for completion. */ 627f740e6cdSTejun Heo while (!completion_done(&done.completion)) 628f740e6cdSTejun Heo cpu_relax(); 629f740e6cdSTejun Heo 630f740e6cdSTejun Heo mutex_unlock(&stop_cpus_mutex); 631f740e6cdSTejun Heo return ret ?: done.ret; 632f740e6cdSTejun Heo } 633f740e6cdSTejun Heo 634bbf1bb3eSTejun Heo #endif /* CONFIG_STOP_MACHINE */ 635