11da177e4SLinus Torvalds /* CPU control.
21da177e4SLinus Torvalds * (C) 2001, 2002, 2003, 2004 Rusty Russell
31da177e4SLinus Torvalds *
41da177e4SLinus Torvalds * This code is licenced under the GPL.
51da177e4SLinus Torvalds */
6bf2c59fcSPeter Zijlstra #include <linux/sched/mm.h>
71da177e4SLinus Torvalds #include <linux/proc_fs.h>
81da177e4SLinus Torvalds #include <linux/smp.h>
91da177e4SLinus Torvalds #include <linux/init.h>
101da177e4SLinus Torvalds #include <linux/notifier.h>
113f07c014SIngo Molnar #include <linux/sched/signal.h>
12ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h>
139ca12ac0SNicholas Piggin #include <linux/sched/isolation.h>
1429930025SIngo Molnar #include <linux/sched/task.h>
15a74cfffbSThomas Gleixner #include <linux/sched/smt.h>
161da177e4SLinus Torvalds #include <linux/unistd.h>
171da177e4SLinus Torvalds #include <linux/cpu.h>
18cb79295eSAnton Vorontsov #include <linux/oom.h>
19cb79295eSAnton Vorontsov #include <linux/rcupdate.h>
206f062123SThomas Gleixner #include <linux/delay.h>
219984de1aSPaul Gortmaker #include <linux/export.h>
22e4cc2f87SAnton Vorontsov #include <linux/bug.h>
231da177e4SLinus Torvalds #include <linux/kthread.h>
241da177e4SLinus Torvalds #include <linux/stop_machine.h>
2581615b62SIngo Molnar #include <linux/mutex.h>
265a0e3ad6STejun Heo #include <linux/gfp.h>
2779cfbdfaSSrivatsa S. Bhat #include <linux/suspend.h>
28a19423b9SGautham R. Shenoy #include <linux/lockdep.h>
29345527b1SPreeti U Murthy #include <linux/tick.h>
30a8994181SThomas Gleixner #include <linux/irq.h>
31941154bdSThomas Gleixner #include <linux/nmi.h>
324cb28cedSThomas Gleixner #include <linux/smpboot.h>
33e6d4989aSRichard Weinberger #include <linux/relay.h>
346731d4f1SSebastian Andrzej Siewior #include <linux/slab.h>
35dce1ca05SMark Rutland #include <linux/scs.h>
36fc8dffd3SThomas Gleixner #include <linux/percpu-rwsem.h>
37b22afcdfSThomas Gleixner #include <linux/cpuset.h>
383191dd5aSJason A. Donenfeld #include <linux/random.h>
39bae1a962SKuppuswamy Sathyanarayanan #include <linux/cc_platform.h>
40cff7d378SThomas Gleixner
41bb3632c6STodd E Brandt #include <trace/events/power.h>
42cff7d378SThomas Gleixner #define CREATE_TRACE_POINTS
43cff7d378SThomas Gleixner #include <trace/events/cpuhp.h>
441da177e4SLinus Torvalds
4538498a67SThomas Gleixner #include "smpboot.h"
4638498a67SThomas Gleixner
47cff7d378SThomas Gleixner /**
4811bc021dSRandy Dunlap * struct cpuhp_cpu_state - Per cpu hotplug state storage
49cff7d378SThomas Gleixner * @state: The current cpu state
50cff7d378SThomas Gleixner * @target: The target state
5111bc021dSRandy Dunlap * @fail: Current CPU hotplug callback state
524cb28cedSThomas Gleixner * @thread: Pointer to the hotplug thread
534cb28cedSThomas Gleixner * @should_run: Thread should execute
543b9d6da6SSebastian Andrzej Siewior * @rollback: Perform a rollback
55a724632cSThomas Gleixner * @single: Single callback invocation
56a724632cSThomas Gleixner * @bringup: Single callback bringup or teardown selector
5711bc021dSRandy Dunlap * @node: Remote CPU node; for multi-instance, do a
5811bc021dSRandy Dunlap * single entry callback for install/remove
5911bc021dSRandy Dunlap * @last: For multi-instance rollback, remember how far we got
60a724632cSThomas Gleixner * @cb_state: The state for a single callback (install/uninstall)
614cb28cedSThomas Gleixner * @result: Result of the operation
626f062123SThomas Gleixner * @ap_sync_state: State for AP synchronization
635ebe7742SPeter Zijlstra * @done_up: Signal completion to the issuer of the task for cpu-up
645ebe7742SPeter Zijlstra * @done_down: Signal completion to the issuer of the task for cpu-down
65cff7d378SThomas Gleixner */
66cff7d378SThomas Gleixner struct cpuhp_cpu_state {
67cff7d378SThomas Gleixner enum cpuhp_state state;
68cff7d378SThomas Gleixner enum cpuhp_state target;
691db49484SPeter Zijlstra enum cpuhp_state fail;
704cb28cedSThomas Gleixner #ifdef CONFIG_SMP
714cb28cedSThomas Gleixner struct task_struct *thread;
724cb28cedSThomas Gleixner bool should_run;
733b9d6da6SSebastian Andrzej Siewior bool rollback;
74a724632cSThomas Gleixner bool single;
75a724632cSThomas Gleixner bool bringup;
76cf392d10SThomas Gleixner struct hlist_node *node;
774dddfb5fSPeter Zijlstra struct hlist_node *last;
784cb28cedSThomas Gleixner enum cpuhp_state cb_state;
794cb28cedSThomas Gleixner int result;
806f062123SThomas Gleixner atomic_t ap_sync_state;
815ebe7742SPeter Zijlstra struct completion done_up;
825ebe7742SPeter Zijlstra struct completion done_down;
834cb28cedSThomas Gleixner #endif
84cff7d378SThomas Gleixner };
85cff7d378SThomas Gleixner
861db49484SPeter Zijlstra static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
871db49484SPeter Zijlstra .fail = CPUHP_INVALID,
881db49484SPeter Zijlstra };
89cff7d378SThomas Gleixner
90e797bda3SThomas Gleixner #ifdef CONFIG_SMP
91e797bda3SThomas Gleixner cpumask_t cpus_booted_once_mask;
92e797bda3SThomas Gleixner #endif
93e797bda3SThomas Gleixner
9449dfe2a6SThomas Gleixner #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
955f4b55e1SPeter Zijlstra static struct lockdep_map cpuhp_state_up_map =
965f4b55e1SPeter Zijlstra STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
975f4b55e1SPeter Zijlstra static struct lockdep_map cpuhp_state_down_map =
985f4b55e1SPeter Zijlstra STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
995f4b55e1SPeter Zijlstra
1005f4b55e1SPeter Zijlstra
cpuhp_lock_acquire(bool bringup)10176dc6c09SMathieu Malaterre static inline void cpuhp_lock_acquire(bool bringup)
1025f4b55e1SPeter Zijlstra {
1035f4b55e1SPeter Zijlstra lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
1045f4b55e1SPeter Zijlstra }
1055f4b55e1SPeter Zijlstra
cpuhp_lock_release(bool bringup)10676dc6c09SMathieu Malaterre static inline void cpuhp_lock_release(bool bringup)
1075f4b55e1SPeter Zijlstra {
1085f4b55e1SPeter Zijlstra lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
1095f4b55e1SPeter Zijlstra }
1105f4b55e1SPeter Zijlstra #else
1115f4b55e1SPeter Zijlstra
cpuhp_lock_acquire(bool bringup)11276dc6c09SMathieu Malaterre static inline void cpuhp_lock_acquire(bool bringup) { }
cpuhp_lock_release(bool bringup)11376dc6c09SMathieu Malaterre static inline void cpuhp_lock_release(bool bringup) { }
1145f4b55e1SPeter Zijlstra
11549dfe2a6SThomas Gleixner #endif
11649dfe2a6SThomas Gleixner
117cff7d378SThomas Gleixner /**
11811bc021dSRandy Dunlap * struct cpuhp_step - Hotplug state machine step
119cff7d378SThomas Gleixner * @name: Name of the step
120cff7d378SThomas Gleixner * @startup: Startup function of the step
121cff7d378SThomas Gleixner * @teardown: Teardown function of the step
122757c989bSThomas Gleixner * @cant_stop: Bringup/teardown can't be stopped at this step
12311bc021dSRandy Dunlap * @multi_instance: State has multiple instances which get added afterwards
124cff7d378SThomas Gleixner */
125cff7d378SThomas Gleixner struct cpuhp_step {
126cff7d378SThomas Gleixner const char *name;
127cf392d10SThomas Gleixner union {
1283c1627e9SThomas Gleixner int (*single)(unsigned int cpu);
1293c1627e9SThomas Gleixner int (*multi)(unsigned int cpu,
130cf392d10SThomas Gleixner struct hlist_node *node);
1313c1627e9SThomas Gleixner } startup;
132cf392d10SThomas Gleixner union {
1333c1627e9SThomas Gleixner int (*single)(unsigned int cpu);
1343c1627e9SThomas Gleixner int (*multi)(unsigned int cpu,
135cf392d10SThomas Gleixner struct hlist_node *node);
1363c1627e9SThomas Gleixner } teardown;
13711bc021dSRandy Dunlap /* private: */
138cf392d10SThomas Gleixner struct hlist_head list;
13911bc021dSRandy Dunlap /* public: */
140757c989bSThomas Gleixner bool cant_stop;
141cf392d10SThomas Gleixner bool multi_instance;
142cff7d378SThomas Gleixner };
143cff7d378SThomas Gleixner
14498f8cdceSThomas Gleixner static DEFINE_MUTEX(cpuhp_state_mutex);
14517a2f1ceSLai Jiangshan static struct cpuhp_step cpuhp_hp_states[];
146cff7d378SThomas Gleixner
cpuhp_get_step(enum cpuhp_state state)147a724632cSThomas Gleixner static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
148a724632cSThomas Gleixner {
14917a2f1ceSLai Jiangshan return cpuhp_hp_states + state;
150a724632cSThomas Gleixner }
151a724632cSThomas Gleixner
cpuhp_step_empty(bool bringup,struct cpuhp_step * step)152453e4108SVincent Donnefort static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
153453e4108SVincent Donnefort {
154453e4108SVincent Donnefort return bringup ? !step->startup.single : !step->teardown.single;
155453e4108SVincent Donnefort }
156453e4108SVincent Donnefort
157cff7d378SThomas Gleixner /**
15811bc021dSRandy Dunlap * cpuhp_invoke_callback - Invoke the callbacks for a given state
159cff7d378SThomas Gleixner * @cpu: The cpu for which the callback should be invoked
16096abb968SPeter Zijlstra * @state: The state to do callbacks for
161a724632cSThomas Gleixner * @bringup: True if the bringup callback should be invoked
16296abb968SPeter Zijlstra * @node: For multi-instance, do a single entry callback for install/remove
16396abb968SPeter Zijlstra * @lastp: For multi-instance rollback, remember how far we got
164cff7d378SThomas Gleixner *
165cf392d10SThomas Gleixner * Called from cpu hotplug and from the state register machinery.
16611bc021dSRandy Dunlap *
16711bc021dSRandy Dunlap * Return: %0 on success or a negative errno code
168cff7d378SThomas Gleixner */
cpuhp_invoke_callback(unsigned int cpu,enum cpuhp_state state,bool bringup,struct hlist_node * node,struct hlist_node ** lastp)169a724632cSThomas Gleixner static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
17096abb968SPeter Zijlstra bool bringup, struct hlist_node *node,
17196abb968SPeter Zijlstra struct hlist_node **lastp)
172cff7d378SThomas Gleixner {
173cff7d378SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
174a724632cSThomas Gleixner struct cpuhp_step *step = cpuhp_get_step(state);
175cf392d10SThomas Gleixner int (*cbm)(unsigned int cpu, struct hlist_node *node);
176cf392d10SThomas Gleixner int (*cb)(unsigned int cpu);
177cf392d10SThomas Gleixner int ret, cnt;
178cff7d378SThomas Gleixner
1791db49484SPeter Zijlstra if (st->fail == state) {
1801db49484SPeter Zijlstra st->fail = CPUHP_INVALID;
1811db49484SPeter Zijlstra return -EAGAIN;
1821db49484SPeter Zijlstra }
1831db49484SPeter Zijlstra
184453e4108SVincent Donnefort if (cpuhp_step_empty(bringup, step)) {
185453e4108SVincent Donnefort WARN_ON_ONCE(1);
186453e4108SVincent Donnefort return 0;
187453e4108SVincent Donnefort }
188453e4108SVincent Donnefort
189cf392d10SThomas Gleixner if (!step->multi_instance) {
19096abb968SPeter Zijlstra WARN_ON_ONCE(lastp && *lastp);
1913c1627e9SThomas Gleixner cb = bringup ? step->startup.single : step->teardown.single;
192453e4108SVincent Donnefort
193a724632cSThomas Gleixner trace_cpuhp_enter(cpu, st->target, state, cb);
194cff7d378SThomas Gleixner ret = cb(cpu);
195a724632cSThomas Gleixner trace_cpuhp_exit(cpu, st->state, state, ret);
196cf392d10SThomas Gleixner return ret;
197cf392d10SThomas Gleixner }
1983c1627e9SThomas Gleixner cbm = bringup ? step->startup.multi : step->teardown.multi;
199cf392d10SThomas Gleixner
200cf392d10SThomas Gleixner /* Single invocation for instance add/remove */
201cf392d10SThomas Gleixner if (node) {
20296abb968SPeter Zijlstra WARN_ON_ONCE(lastp && *lastp);
203cf392d10SThomas Gleixner trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
204cf392d10SThomas Gleixner ret = cbm(cpu, node);
205cf392d10SThomas Gleixner trace_cpuhp_exit(cpu, st->state, state, ret);
206cf392d10SThomas Gleixner return ret;
207cf392d10SThomas Gleixner }
208cf392d10SThomas Gleixner
209cf392d10SThomas Gleixner /* State transition. Invoke on all instances */
210cf392d10SThomas Gleixner cnt = 0;
211cf392d10SThomas Gleixner hlist_for_each(node, &step->list) {
21296abb968SPeter Zijlstra if (lastp && node == *lastp)
21396abb968SPeter Zijlstra break;
21496abb968SPeter Zijlstra
215cf392d10SThomas Gleixner trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
216cf392d10SThomas Gleixner ret = cbm(cpu, node);
217cf392d10SThomas Gleixner trace_cpuhp_exit(cpu, st->state, state, ret);
21896abb968SPeter Zijlstra if (ret) {
21996abb968SPeter Zijlstra if (!lastp)
220cf392d10SThomas Gleixner goto err;
22196abb968SPeter Zijlstra
22296abb968SPeter Zijlstra *lastp = node;
22396abb968SPeter Zijlstra return ret;
22496abb968SPeter Zijlstra }
225cf392d10SThomas Gleixner cnt++;
226cf392d10SThomas Gleixner }
22796abb968SPeter Zijlstra if (lastp)
22896abb968SPeter Zijlstra *lastp = NULL;
229cf392d10SThomas Gleixner return 0;
230cf392d10SThomas Gleixner err:
231cf392d10SThomas Gleixner /* Rollback the instances if one failed */
2323c1627e9SThomas Gleixner cbm = !bringup ? step->startup.multi : step->teardown.multi;
233cf392d10SThomas Gleixner if (!cbm)
234cf392d10SThomas Gleixner return ret;
235cf392d10SThomas Gleixner
236cf392d10SThomas Gleixner hlist_for_each(node, &step->list) {
237cf392d10SThomas Gleixner if (!cnt--)
238cf392d10SThomas Gleixner break;
239724a8688SPeter Zijlstra
240724a8688SPeter Zijlstra trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
241724a8688SPeter Zijlstra ret = cbm(cpu, node);
242724a8688SPeter Zijlstra trace_cpuhp_exit(cpu, st->state, state, ret);
243724a8688SPeter Zijlstra /*
244724a8688SPeter Zijlstra * Rollback must not fail,
245724a8688SPeter Zijlstra */
246724a8688SPeter Zijlstra WARN_ON_ONCE(ret);
247cff7d378SThomas Gleixner }
248cff7d378SThomas Gleixner return ret;
249cff7d378SThomas Gleixner }
250cff7d378SThomas Gleixner
25198a79d6aSRusty Russell #ifdef CONFIG_SMP
cpuhp_is_ap_state(enum cpuhp_state state)252fcb3029aSArnd Bergmann static bool cpuhp_is_ap_state(enum cpuhp_state state)
253fcb3029aSArnd Bergmann {
254fcb3029aSArnd Bergmann /*
255fcb3029aSArnd Bergmann * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
256fcb3029aSArnd Bergmann * purposes as that state is handled explicitly in cpu_down.
257fcb3029aSArnd Bergmann */
258fcb3029aSArnd Bergmann return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
259fcb3029aSArnd Bergmann }
260fcb3029aSArnd Bergmann
wait_for_ap_thread(struct cpuhp_cpu_state * st,bool bringup)2615ebe7742SPeter Zijlstra static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
2625ebe7742SPeter Zijlstra {
2635ebe7742SPeter Zijlstra struct completion *done = bringup ? &st->done_up : &st->done_down;
2645ebe7742SPeter Zijlstra wait_for_completion(done);
2655ebe7742SPeter Zijlstra }
2665ebe7742SPeter Zijlstra
complete_ap_thread(struct cpuhp_cpu_state * st,bool bringup)2675ebe7742SPeter Zijlstra static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
2685ebe7742SPeter Zijlstra {
2695ebe7742SPeter Zijlstra struct completion *done = bringup ? &st->done_up : &st->done_down;
2705ebe7742SPeter Zijlstra complete(done);
2715ebe7742SPeter Zijlstra }
2725ebe7742SPeter Zijlstra
2735ebe7742SPeter Zijlstra /*
2745ebe7742SPeter Zijlstra * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
2755ebe7742SPeter Zijlstra */
cpuhp_is_atomic_state(enum cpuhp_state state)2765ebe7742SPeter Zijlstra static bool cpuhp_is_atomic_state(enum cpuhp_state state)
2775ebe7742SPeter Zijlstra {
2785ebe7742SPeter Zijlstra return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
2795ebe7742SPeter Zijlstra }
2805ebe7742SPeter Zijlstra
2816f062123SThomas Gleixner /* Synchronization state management */
2826f062123SThomas Gleixner enum cpuhp_sync_state {
2836f062123SThomas Gleixner SYNC_STATE_DEAD,
2846f062123SThomas Gleixner SYNC_STATE_KICKED,
2856f062123SThomas Gleixner SYNC_STATE_SHOULD_DIE,
2866f062123SThomas Gleixner SYNC_STATE_ALIVE,
2876f062123SThomas Gleixner SYNC_STATE_SHOULD_ONLINE,
2886f062123SThomas Gleixner SYNC_STATE_ONLINE,
2896f062123SThomas Gleixner };
2906f062123SThomas Gleixner
2916f062123SThomas Gleixner #ifdef CONFIG_HOTPLUG_CORE_SYNC
2926f062123SThomas Gleixner /**
2936f062123SThomas Gleixner * cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown
2946f062123SThomas Gleixner * @state: The synchronization state to set
2956f062123SThomas Gleixner *
2966f062123SThomas Gleixner * No synchronization point. Just update of the synchronization state, but implies
2976f062123SThomas Gleixner * a full barrier so that the AP changes are visible before the control CPU proceeds.
2986f062123SThomas Gleixner */
cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)2996f062123SThomas Gleixner static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)
3006f062123SThomas Gleixner {
3016f062123SThomas Gleixner atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
3026f062123SThomas Gleixner
3036f062123SThomas Gleixner (void)atomic_xchg(st, state);
3046f062123SThomas Gleixner }
3056f062123SThomas Gleixner
arch_cpuhp_sync_state_poll(void)3066f062123SThomas Gleixner void __weak arch_cpuhp_sync_state_poll(void) { cpu_relax(); }
3076f062123SThomas Gleixner
cpuhp_wait_for_sync_state(unsigned int cpu,enum cpuhp_sync_state state,enum cpuhp_sync_state next_state)3086f062123SThomas Gleixner static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state,
3096f062123SThomas Gleixner enum cpuhp_sync_state next_state)
3106f062123SThomas Gleixner {
3116f062123SThomas Gleixner atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
3126f062123SThomas Gleixner ktime_t now, end, start = ktime_get();
3136f062123SThomas Gleixner int sync;
3146f062123SThomas Gleixner
3156f062123SThomas Gleixner end = start + 10ULL * NSEC_PER_SEC;
3166f062123SThomas Gleixner
3176f062123SThomas Gleixner sync = atomic_read(st);
3186f062123SThomas Gleixner while (1) {
3196f062123SThomas Gleixner if (sync == state) {
3206f062123SThomas Gleixner if (!atomic_try_cmpxchg(st, &sync, next_state))
3216f062123SThomas Gleixner continue;
3226f062123SThomas Gleixner return true;
3236f062123SThomas Gleixner }
3246f062123SThomas Gleixner
3256f062123SThomas Gleixner now = ktime_get();
3266f062123SThomas Gleixner if (now > end) {
3276f062123SThomas Gleixner /* Timeout. Leave the state unchanged */
3286f062123SThomas Gleixner return false;
3296f062123SThomas Gleixner } else if (now - start < NSEC_PER_MSEC) {
3306f062123SThomas Gleixner /* Poll for one millisecond */
3316f062123SThomas Gleixner arch_cpuhp_sync_state_poll();
3326f062123SThomas Gleixner } else {
3336f062123SThomas Gleixner usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
3346f062123SThomas Gleixner }
3356f062123SThomas Gleixner sync = atomic_read(st);
3366f062123SThomas Gleixner }
3376f062123SThomas Gleixner return true;
3386f062123SThomas Gleixner }
3396f062123SThomas Gleixner #else /* CONFIG_HOTPLUG_CORE_SYNC */
cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)3406f062123SThomas Gleixner static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { }
3416f062123SThomas Gleixner #endif /* !CONFIG_HOTPLUG_CORE_SYNC */
3426f062123SThomas Gleixner
3436f062123SThomas Gleixner #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
3446f062123SThomas Gleixner /**
3456f062123SThomas Gleixner * cpuhp_ap_report_dead - Update synchronization state to DEAD
3466f062123SThomas Gleixner *
3476f062123SThomas Gleixner * No synchronization point. Just update of the synchronization state.
3486f062123SThomas Gleixner */
cpuhp_ap_report_dead(void)3496f062123SThomas Gleixner void cpuhp_ap_report_dead(void)
3506f062123SThomas Gleixner {
3516f062123SThomas Gleixner cpuhp_ap_update_sync_state(SYNC_STATE_DEAD);
3526f062123SThomas Gleixner }
3536f062123SThomas Gleixner
arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)3546f062123SThomas Gleixner void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
3556f062123SThomas Gleixner
3566f062123SThomas Gleixner /*
3576f062123SThomas Gleixner * Late CPU shutdown synchronization point. Cannot use cpuhp_state::done_down
3586f062123SThomas Gleixner * because the AP cannot issue complete() at this stage.
3596f062123SThomas Gleixner */
cpuhp_bp_sync_dead(unsigned int cpu)3606f062123SThomas Gleixner static void cpuhp_bp_sync_dead(unsigned int cpu)
3616f062123SThomas Gleixner {
3626f062123SThomas Gleixner atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
3636f062123SThomas Gleixner int sync = atomic_read(st);
3646f062123SThomas Gleixner
3656f062123SThomas Gleixner do {
3666f062123SThomas Gleixner /* CPU can have reported dead already. Don't overwrite that! */
3676f062123SThomas Gleixner if (sync == SYNC_STATE_DEAD)
3686f062123SThomas Gleixner break;
3696f062123SThomas Gleixner } while (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_SHOULD_DIE));
3706f062123SThomas Gleixner
3716f062123SThomas Gleixner if (cpuhp_wait_for_sync_state(cpu, SYNC_STATE_DEAD, SYNC_STATE_DEAD)) {
3726f062123SThomas Gleixner /* CPU reached dead state. Invoke the cleanup function */
3736f062123SThomas Gleixner arch_cpuhp_cleanup_dead_cpu(cpu);
3746f062123SThomas Gleixner return;
3756f062123SThomas Gleixner }
3766f062123SThomas Gleixner
3776f062123SThomas Gleixner /* No further action possible. Emit message and give up. */
3786f062123SThomas Gleixner pr_err("CPU%u failed to report dead state\n", cpu);
3796f062123SThomas Gleixner }
3806f062123SThomas Gleixner #else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */
cpuhp_bp_sync_dead(unsigned int cpu)3816f062123SThomas Gleixner static inline void cpuhp_bp_sync_dead(unsigned int cpu) { }
3826f062123SThomas Gleixner #endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */
3836f062123SThomas Gleixner
3846f062123SThomas Gleixner #ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL
3856f062123SThomas Gleixner /**
3866f062123SThomas Gleixner * cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive
3876f062123SThomas Gleixner *
3886f062123SThomas Gleixner * Updates the AP synchronization state to SYNC_STATE_ALIVE and waits
3896f062123SThomas Gleixner * for the BP to release it.
3906f062123SThomas Gleixner */
cpuhp_ap_sync_alive(void)3916f062123SThomas Gleixner void cpuhp_ap_sync_alive(void)
3926f062123SThomas Gleixner {
3936f062123SThomas Gleixner atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
3946f062123SThomas Gleixner
3956f062123SThomas Gleixner cpuhp_ap_update_sync_state(SYNC_STATE_ALIVE);
3966f062123SThomas Gleixner
3976f062123SThomas Gleixner /* Wait for the control CPU to release it. */
3986f062123SThomas Gleixner while (atomic_read(st) != SYNC_STATE_SHOULD_ONLINE)
3996f062123SThomas Gleixner cpu_relax();
4006f062123SThomas Gleixner }
4016f062123SThomas Gleixner
cpuhp_can_boot_ap(unsigned int cpu)4026f062123SThomas Gleixner static bool cpuhp_can_boot_ap(unsigned int cpu)
4036f062123SThomas Gleixner {
4046f062123SThomas Gleixner atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
4056f062123SThomas Gleixner int sync = atomic_read(st);
4066f062123SThomas Gleixner
4076f062123SThomas Gleixner again:
4086f062123SThomas Gleixner switch (sync) {
4096f062123SThomas Gleixner case SYNC_STATE_DEAD:
4106f062123SThomas Gleixner /* CPU is properly dead */
4116f062123SThomas Gleixner break;
4126f062123SThomas Gleixner case SYNC_STATE_KICKED:
4136f062123SThomas Gleixner /* CPU did not come up in previous attempt */
4146f062123SThomas Gleixner break;
4156f062123SThomas Gleixner case SYNC_STATE_ALIVE:
4166f062123SThomas Gleixner /* CPU is stuck cpuhp_ap_sync_alive(). */
4176f062123SThomas Gleixner break;
4186f062123SThomas Gleixner default:
4196f062123SThomas Gleixner /* CPU failed to report online or dead and is in limbo state. */
4206f062123SThomas Gleixner return false;
4216f062123SThomas Gleixner }
4226f062123SThomas Gleixner
4236f062123SThomas Gleixner /* Prepare for booting */
4246f062123SThomas Gleixner if (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_KICKED))
4256f062123SThomas Gleixner goto again;
4266f062123SThomas Gleixner
4276f062123SThomas Gleixner return true;
4286f062123SThomas Gleixner }
4296f062123SThomas Gleixner
arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)4306f062123SThomas Gleixner void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { }
4316f062123SThomas Gleixner
4326f062123SThomas Gleixner /*
4336f062123SThomas Gleixner * Early CPU bringup synchronization point. Cannot use cpuhp_state::done_up
4346f062123SThomas Gleixner * because the AP cannot issue complete() so early in the bringup.
4356f062123SThomas Gleixner */
cpuhp_bp_sync_alive(unsigned int cpu)4366f062123SThomas Gleixner static int cpuhp_bp_sync_alive(unsigned int cpu)
4376f062123SThomas Gleixner {
4386f062123SThomas Gleixner int ret = 0;
4396f062123SThomas Gleixner
4406f062123SThomas Gleixner if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SYNC_FULL))
4416f062123SThomas Gleixner return 0;
4426f062123SThomas Gleixner
4436f062123SThomas Gleixner if (!cpuhp_wait_for_sync_state(cpu, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE)) {
4446f062123SThomas Gleixner pr_err("CPU%u failed to report alive state\n", cpu);
4456f062123SThomas Gleixner ret = -EIO;
4466f062123SThomas Gleixner }
4476f062123SThomas Gleixner
4486f062123SThomas Gleixner /* Let the architecture cleanup the kick alive mechanics. */
4496f062123SThomas Gleixner arch_cpuhp_cleanup_kick_cpu(cpu);
4506f062123SThomas Gleixner return ret;
4516f062123SThomas Gleixner }
4526f062123SThomas Gleixner #else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */
cpuhp_bp_sync_alive(unsigned int cpu)4536f062123SThomas Gleixner static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; }
cpuhp_can_boot_ap(unsigned int cpu)4546f062123SThomas Gleixner static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; }
4556f062123SThomas Gleixner #endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */
4566f062123SThomas Gleixner
457b3199c02SRusty Russell /* Serializes the updates to cpu_online_mask, cpu_present_mask */
458aa953877SLinus Torvalds static DEFINE_MUTEX(cpu_add_remove_lock);
459090e77c3SThomas Gleixner bool cpuhp_tasks_frozen;
460090e77c3SThomas Gleixner EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
4611da177e4SLinus Torvalds
46279a6cdebSLai Jiangshan /*
46393ae4f97SSrivatsa S. Bhat * The following two APIs (cpu_maps_update_begin/done) must be used when
46493ae4f97SSrivatsa S. Bhat * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
46579a6cdebSLai Jiangshan */
cpu_maps_update_begin(void)46679a6cdebSLai Jiangshan void cpu_maps_update_begin(void)
46779a6cdebSLai Jiangshan {
46879a6cdebSLai Jiangshan mutex_lock(&cpu_add_remove_lock);
46979a6cdebSLai Jiangshan }
47079a6cdebSLai Jiangshan
cpu_maps_update_done(void)47179a6cdebSLai Jiangshan void cpu_maps_update_done(void)
47279a6cdebSLai Jiangshan {
47379a6cdebSLai Jiangshan mutex_unlock(&cpu_add_remove_lock);
47479a6cdebSLai Jiangshan }
4751da177e4SLinus Torvalds
476fc8dffd3SThomas Gleixner /*
477fc8dffd3SThomas Gleixner * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
478e3920fb4SRafael J. Wysocki * Should always be manipulated under cpu_add_remove_lock
479e3920fb4SRafael J. Wysocki */
480e3920fb4SRafael J. Wysocki static int cpu_hotplug_disabled;
481e3920fb4SRafael J. Wysocki
48279a6cdebSLai Jiangshan #ifdef CONFIG_HOTPLUG_CPU
48379a6cdebSLai Jiangshan
484fc8dffd3SThomas Gleixner DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
485a19423b9SGautham R. Shenoy
4861037e4c5SKirill A. Shutemov static bool cpu_hotplug_offline_disabled __ro_after_init;
4871037e4c5SKirill A. Shutemov
cpus_read_lock(void)4888f553c49SThomas Gleixner void cpus_read_lock(void)
489a9d9baa1SAshok Raj {
490fc8dffd3SThomas Gleixner percpu_down_read(&cpu_hotplug_lock);
491a9d9baa1SAshok Raj }
4928f553c49SThomas Gleixner EXPORT_SYMBOL_GPL(cpus_read_lock);
493a9d9baa1SAshok Raj
cpus_read_trylock(void)4946f4ceee9SWaiman Long int cpus_read_trylock(void)
4956f4ceee9SWaiman Long {
4966f4ceee9SWaiman Long return percpu_down_read_trylock(&cpu_hotplug_lock);
4976f4ceee9SWaiman Long }
4986f4ceee9SWaiman Long EXPORT_SYMBOL_GPL(cpus_read_trylock);
4996f4ceee9SWaiman Long
cpus_read_unlock(void)5008f553c49SThomas Gleixner void cpus_read_unlock(void)
501a9d9baa1SAshok Raj {
502fc8dffd3SThomas Gleixner percpu_up_read(&cpu_hotplug_lock);
503a9d9baa1SAshok Raj }
5048f553c49SThomas Gleixner EXPORT_SYMBOL_GPL(cpus_read_unlock);
505a9d9baa1SAshok Raj
cpus_write_lock(void)5068f553c49SThomas Gleixner void cpus_write_lock(void)
507d221938cSGautham R Shenoy {
508fc8dffd3SThomas Gleixner percpu_down_write(&cpu_hotplug_lock);
509d221938cSGautham R Shenoy }
510d221938cSGautham R Shenoy
cpus_write_unlock(void)5118f553c49SThomas Gleixner void cpus_write_unlock(void)
512d221938cSGautham R Shenoy {
513fc8dffd3SThomas Gleixner percpu_up_write(&cpu_hotplug_lock);
514fc8dffd3SThomas Gleixner }
515fc8dffd3SThomas Gleixner
lockdep_assert_cpus_held(void)516fc8dffd3SThomas Gleixner void lockdep_assert_cpus_held(void)
517fc8dffd3SThomas Gleixner {
518ce48c457SValentin Schneider /*
519ce48c457SValentin Schneider * We can't have hotplug operations before userspace starts running,
520ce48c457SValentin Schneider * and some init codepaths will knowingly not take the hotplug lock.
521ce48c457SValentin Schneider * This is all valid, so mute lockdep until it makes sense to report
522ce48c457SValentin Schneider * unheld locks.
523ce48c457SValentin Schneider */
524ce48c457SValentin Schneider if (system_state < SYSTEM_RUNNING)
525ce48c457SValentin Schneider return;
526ce48c457SValentin Schneider
527fc8dffd3SThomas Gleixner percpu_rwsem_assert_held(&cpu_hotplug_lock);
528d221938cSGautham R Shenoy }
52979a6cdebSLai Jiangshan
53043759fe5SFrederic Weisbecker #ifdef CONFIG_LOCKDEP
lockdep_is_cpus_held(void)53143759fe5SFrederic Weisbecker int lockdep_is_cpus_held(void)
53243759fe5SFrederic Weisbecker {
53343759fe5SFrederic Weisbecker return percpu_rwsem_is_held(&cpu_hotplug_lock);
53443759fe5SFrederic Weisbecker }
53543759fe5SFrederic Weisbecker #endif
53643759fe5SFrederic Weisbecker
lockdep_acquire_cpus_lock(void)537cb92173dSPeter Zijlstra static void lockdep_acquire_cpus_lock(void)
538cb92173dSPeter Zijlstra {
5391751060eSPeter Zijlstra rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
540cb92173dSPeter Zijlstra }
541cb92173dSPeter Zijlstra
lockdep_release_cpus_lock(void)542cb92173dSPeter Zijlstra static void lockdep_release_cpus_lock(void)
543cb92173dSPeter Zijlstra {
5441751060eSPeter Zijlstra rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
545cb92173dSPeter Zijlstra }
546cb92173dSPeter Zijlstra
5471037e4c5SKirill A. Shutemov /* Declare CPU offlining not supported */
cpu_hotplug_disable_offlining(void)5481037e4c5SKirill A. Shutemov void cpu_hotplug_disable_offlining(void)
5491037e4c5SKirill A. Shutemov {
5501037e4c5SKirill A. Shutemov cpu_maps_update_begin();
5511037e4c5SKirill A. Shutemov cpu_hotplug_offline_disabled = true;
5521037e4c5SKirill A. Shutemov cpu_maps_update_done();
5531037e4c5SKirill A. Shutemov }
5541037e4c5SKirill A. Shutemov
55516e53dbfSSrivatsa S. Bhat /*
55616e53dbfSSrivatsa S. Bhat * Wait for currently running CPU hotplug operations to complete (if any) and
55716e53dbfSSrivatsa S. Bhat * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
55816e53dbfSSrivatsa S. Bhat * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
55916e53dbfSSrivatsa S. Bhat * hotplug path before performing hotplug operations. So acquiring that lock
56016e53dbfSSrivatsa S. Bhat * guarantees mutual exclusion from any currently running hotplug operations.
56116e53dbfSSrivatsa S. Bhat */
cpu_hotplug_disable(void)56216e53dbfSSrivatsa S. Bhat void cpu_hotplug_disable(void)
56316e53dbfSSrivatsa S. Bhat {
56416e53dbfSSrivatsa S. Bhat cpu_maps_update_begin();
56589af7ba5SVitaly Kuznetsov cpu_hotplug_disabled++;
56616e53dbfSSrivatsa S. Bhat cpu_maps_update_done();
56716e53dbfSSrivatsa S. Bhat }
56832145c46SVitaly Kuznetsov EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
56916e53dbfSSrivatsa S. Bhat
__cpu_hotplug_enable(void)57001b41159SLianwei Wang static void __cpu_hotplug_enable(void)
57101b41159SLianwei Wang {
57201b41159SLianwei Wang if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
57301b41159SLianwei Wang return;
57401b41159SLianwei Wang cpu_hotplug_disabled--;
57501b41159SLianwei Wang }
57601b41159SLianwei Wang
cpu_hotplug_enable(void)57716e53dbfSSrivatsa S. Bhat void cpu_hotplug_enable(void)
57816e53dbfSSrivatsa S. Bhat {
57916e53dbfSSrivatsa S. Bhat cpu_maps_update_begin();
58001b41159SLianwei Wang __cpu_hotplug_enable();
58116e53dbfSSrivatsa S. Bhat cpu_maps_update_done();
58216e53dbfSSrivatsa S. Bhat }
58332145c46SVitaly Kuznetsov EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
584cb92173dSPeter Zijlstra
585cb92173dSPeter Zijlstra #else
586cb92173dSPeter Zijlstra
lockdep_acquire_cpus_lock(void)587cb92173dSPeter Zijlstra static void lockdep_acquire_cpus_lock(void)
588cb92173dSPeter Zijlstra {
589cb92173dSPeter Zijlstra }
590cb92173dSPeter Zijlstra
lockdep_release_cpus_lock(void)591cb92173dSPeter Zijlstra static void lockdep_release_cpus_lock(void)
592cb92173dSPeter Zijlstra {
593cb92173dSPeter Zijlstra }
594cb92173dSPeter Zijlstra
595b9d10be7SToshi Kani #endif /* CONFIG_HOTPLUG_CPU */
59679a6cdebSLai Jiangshan
597a74cfffbSThomas Gleixner /*
598a74cfffbSThomas Gleixner * Architectures that need SMT-specific errata handling during SMT hotplug
599a74cfffbSThomas Gleixner * should override this.
600a74cfffbSThomas Gleixner */
arch_smt_update(void)601a74cfffbSThomas Gleixner void __weak arch_smt_update(void) { }
602a74cfffbSThomas Gleixner
6030cc3cd21SThomas Gleixner #ifdef CONFIG_HOTPLUG_SMT
6043f916919SMichael Ellerman
6050cc3cd21SThomas Gleixner enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
606447ae4acSMichael Ellerman static unsigned int cpu_smt_max_threads __ro_after_init;
607447ae4acSMichael Ellerman unsigned int cpu_smt_num_threads __read_mostly = UINT_MAX;
608bc2d8d26SThomas Gleixner
cpu_smt_disable(bool force)6098e1b706bSJiri Kosina void __init cpu_smt_disable(bool force)
6100cc3cd21SThomas Gleixner {
611e1572f1dSVitaly Kuznetsov if (!cpu_smt_possible())
6128e1b706bSJiri Kosina return;
6138e1b706bSJiri Kosina
6148e1b706bSJiri Kosina if (force) {
6150cc3cd21SThomas Gleixner pr_info("SMT: Force disabled\n");
6160cc3cd21SThomas Gleixner cpu_smt_control = CPU_SMT_FORCE_DISABLED;
6178e1b706bSJiri Kosina } else {
618d0e7d144SBorislav Petkov pr_info("SMT: disabled\n");
6198e1b706bSJiri Kosina cpu_smt_control = CPU_SMT_DISABLED;
6200cc3cd21SThomas Gleixner }
621447ae4acSMichael Ellerman cpu_smt_num_threads = 1;
6228e1b706bSJiri Kosina }
6238e1b706bSJiri Kosina
624fee0aedeSThomas Gleixner /*
625fee0aedeSThomas Gleixner * The decision whether SMT is supported can only be done after the full
626b284909aSJosh Poimboeuf * CPU identification. Called from architecture code.
627fee0aedeSThomas Gleixner */
cpu_smt_set_num_threads(unsigned int num_threads,unsigned int max_threads)628447ae4acSMichael Ellerman void __init cpu_smt_set_num_threads(unsigned int num_threads,
629447ae4acSMichael Ellerman unsigned int max_threads)
630fee0aedeSThomas Gleixner {
631447ae4acSMichael Ellerman WARN_ON(!num_threads || (num_threads > max_threads));
632447ae4acSMichael Ellerman
63391b4a7dbSLaurent Dufour if (max_threads == 1)
634fee0aedeSThomas Gleixner cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
635447ae4acSMichael Ellerman
636447ae4acSMichael Ellerman cpu_smt_max_threads = max_threads;
637447ae4acSMichael Ellerman
638447ae4acSMichael Ellerman /*
639447ae4acSMichael Ellerman * If SMT has been disabled via the kernel command line or SMT is
640447ae4acSMichael Ellerman * not supported, set cpu_smt_num_threads to 1 for consistency.
641447ae4acSMichael Ellerman * If enabled, take the architecture requested number of threads
642447ae4acSMichael Ellerman * to bring up into account.
643447ae4acSMichael Ellerman */
644447ae4acSMichael Ellerman if (cpu_smt_control != CPU_SMT_ENABLED)
645447ae4acSMichael Ellerman cpu_smt_num_threads = 1;
646447ae4acSMichael Ellerman else if (num_threads < cpu_smt_num_threads)
647447ae4acSMichael Ellerman cpu_smt_num_threads = num_threads;
648fee0aedeSThomas Gleixner }
649fee0aedeSThomas Gleixner
smt_cmdline_disable(char * str)6508e1b706bSJiri Kosina static int __init smt_cmdline_disable(char *str)
6518e1b706bSJiri Kosina {
6528e1b706bSJiri Kosina cpu_smt_disable(str && !strcmp(str, "force"));
6530cc3cd21SThomas Gleixner return 0;
6540cc3cd21SThomas Gleixner }
6550cc3cd21SThomas Gleixner early_param("nosmt", smt_cmdline_disable);
6560cc3cd21SThomas Gleixner
65738253464SMichael Ellerman /*
65838253464SMichael Ellerman * For Archicture supporting partial SMT states check if the thread is allowed.
65938253464SMichael Ellerman * Otherwise this has already been checked through cpu_smt_max_threads when
66038253464SMichael Ellerman * setting the SMT level.
66138253464SMichael Ellerman */
cpu_smt_thread_allowed(unsigned int cpu)66238253464SMichael Ellerman static inline bool cpu_smt_thread_allowed(unsigned int cpu)
66338253464SMichael Ellerman {
66438253464SMichael Ellerman #ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC
66538253464SMichael Ellerman return topology_smt_thread_allowed(cpu);
66638253464SMichael Ellerman #else
66738253464SMichael Ellerman return true;
66838253464SMichael Ellerman #endif
66938253464SMichael Ellerman }
67038253464SMichael Ellerman
cpu_bootable(unsigned int cpu)671d91bdd96SThomas Gleixner static inline bool cpu_bootable(unsigned int cpu)
6720cc3cd21SThomas Gleixner {
67338253464SMichael Ellerman if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
6740cc3cd21SThomas Gleixner return true;
6750cc3cd21SThomas Gleixner
676d91bdd96SThomas Gleixner /* All CPUs are bootable if controls are not configured */
677d91bdd96SThomas Gleixner if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
678d91bdd96SThomas Gleixner return true;
679d91bdd96SThomas Gleixner
680d91bdd96SThomas Gleixner /* All CPUs are bootable if CPU is not SMT capable */
681d91bdd96SThomas Gleixner if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
682d91bdd96SThomas Gleixner return true;
683d91bdd96SThomas Gleixner
684b284909aSJosh Poimboeuf if (topology_is_primary_thread(cpu))
6850cc3cd21SThomas Gleixner return true;
6860cc3cd21SThomas Gleixner
6870cc3cd21SThomas Gleixner /*
6880cc3cd21SThomas Gleixner * On x86 it's required to boot all logical CPUs at least once so
6890cc3cd21SThomas Gleixner * that the init code can get a chance to set CR4.MCE on each
690182e073fSEthon Paul * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
6910cc3cd21SThomas Gleixner * core will shutdown the machine.
6920cc3cd21SThomas Gleixner */
693e797bda3SThomas Gleixner return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
6940cc3cd21SThomas Gleixner }
695e1572f1dSVitaly Kuznetsov
69652b38b7aSZhang Rui /* Returns true if SMT is supported and not forcefully (irreversibly) disabled */
cpu_smt_possible(void)697e1572f1dSVitaly Kuznetsov bool cpu_smt_possible(void)
698e1572f1dSVitaly Kuznetsov {
699e1572f1dSVitaly Kuznetsov return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
700e1572f1dSVitaly Kuznetsov cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
701e1572f1dSVitaly Kuznetsov }
702e1572f1dSVitaly Kuznetsov EXPORT_SYMBOL_GPL(cpu_smt_possible);
70318415f33SThomas Gleixner
7040cc3cd21SThomas Gleixner #else
cpu_bootable(unsigned int cpu)705d91bdd96SThomas Gleixner static inline bool cpu_bootable(unsigned int cpu) { return true; }
7060cc3cd21SThomas Gleixner #endif
7070cc3cd21SThomas Gleixner
7084dddfb5fSPeter Zijlstra static inline enum cpuhp_state
cpuhp_set_state(int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)709b7ba6d8dSSteven Price cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
7104dddfb5fSPeter Zijlstra {
7114dddfb5fSPeter Zijlstra enum cpuhp_state prev_state = st->state;
7122ea46c6fSPeter Zijlstra bool bringup = st->state < target;
7134dddfb5fSPeter Zijlstra
7144dddfb5fSPeter Zijlstra st->rollback = false;
7154dddfb5fSPeter Zijlstra st->last = NULL;
7164dddfb5fSPeter Zijlstra
7174dddfb5fSPeter Zijlstra st->target = target;
7184dddfb5fSPeter Zijlstra st->single = false;
7192ea46c6fSPeter Zijlstra st->bringup = bringup;
720b7ba6d8dSSteven Price if (cpu_dying(cpu) != !bringup)
721b7ba6d8dSSteven Price set_cpu_dying(cpu, !bringup);
7224dddfb5fSPeter Zijlstra
7234dddfb5fSPeter Zijlstra return prev_state;
7244dddfb5fSPeter Zijlstra }
7254dddfb5fSPeter Zijlstra
7264dddfb5fSPeter Zijlstra static inline void
cpuhp_reset_state(int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state prev_state)727b7ba6d8dSSteven Price cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
728b7ba6d8dSSteven Price enum cpuhp_state prev_state)
7294dddfb5fSPeter Zijlstra {
7302ea46c6fSPeter Zijlstra bool bringup = !st->bringup;
7312ea46c6fSPeter Zijlstra
732453e4108SVincent Donnefort st->target = prev_state;
733453e4108SVincent Donnefort
734453e4108SVincent Donnefort /*
735453e4108SVincent Donnefort * Already rolling back. No need invert the bringup value or to change
736453e4108SVincent Donnefort * the current state.
737453e4108SVincent Donnefort */
738453e4108SVincent Donnefort if (st->rollback)
739453e4108SVincent Donnefort return;
740453e4108SVincent Donnefort
7414dddfb5fSPeter Zijlstra st->rollback = true;
7424dddfb5fSPeter Zijlstra
7434dddfb5fSPeter Zijlstra /*
7444dddfb5fSPeter Zijlstra * If we have st->last we need to undo partial multi_instance of this
7454dddfb5fSPeter Zijlstra * state first. Otherwise start undo at the previous state.
7464dddfb5fSPeter Zijlstra */
7474dddfb5fSPeter Zijlstra if (!st->last) {
7484dddfb5fSPeter Zijlstra if (st->bringup)
7494dddfb5fSPeter Zijlstra st->state--;
7504dddfb5fSPeter Zijlstra else
7514dddfb5fSPeter Zijlstra st->state++;
7524dddfb5fSPeter Zijlstra }
7534dddfb5fSPeter Zijlstra
7542ea46c6fSPeter Zijlstra st->bringup = bringup;
755b7ba6d8dSSteven Price if (cpu_dying(cpu) != !bringup)
756b7ba6d8dSSteven Price set_cpu_dying(cpu, !bringup);
7574dddfb5fSPeter Zijlstra }
7584dddfb5fSPeter Zijlstra
7594dddfb5fSPeter Zijlstra /* Regular hotplug invocation of the AP hotplug thread */
__cpuhp_kick_ap(struct cpuhp_cpu_state * st)7604dddfb5fSPeter Zijlstra static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
7614dddfb5fSPeter Zijlstra {
7624dddfb5fSPeter Zijlstra if (!st->single && st->state == st->target)
7634dddfb5fSPeter Zijlstra return;
7644dddfb5fSPeter Zijlstra
7654dddfb5fSPeter Zijlstra st->result = 0;
7664dddfb5fSPeter Zijlstra /*
7674dddfb5fSPeter Zijlstra * Make sure the above stores are visible before should_run becomes
7684dddfb5fSPeter Zijlstra * true. Paired with the mb() above in cpuhp_thread_fun()
7694dddfb5fSPeter Zijlstra */
7704dddfb5fSPeter Zijlstra smp_mb();
7714dddfb5fSPeter Zijlstra st->should_run = true;
7724dddfb5fSPeter Zijlstra wake_up_process(st->thread);
7735ebe7742SPeter Zijlstra wait_for_ap_thread(st, st->bringup);
7744dddfb5fSPeter Zijlstra }
7754dddfb5fSPeter Zijlstra
cpuhp_kick_ap(int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)776b7ba6d8dSSteven Price static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
777b7ba6d8dSSteven Price enum cpuhp_state target)
7784dddfb5fSPeter Zijlstra {
7794dddfb5fSPeter Zijlstra enum cpuhp_state prev_state;
7804dddfb5fSPeter Zijlstra int ret;
7814dddfb5fSPeter Zijlstra
782b7ba6d8dSSteven Price prev_state = cpuhp_set_state(cpu, st, target);
7834dddfb5fSPeter Zijlstra __cpuhp_kick_ap(st);
7844dddfb5fSPeter Zijlstra if ((ret = st->result)) {
785b7ba6d8dSSteven Price cpuhp_reset_state(cpu, st, prev_state);
7864dddfb5fSPeter Zijlstra __cpuhp_kick_ap(st);
7874dddfb5fSPeter Zijlstra }
7884dddfb5fSPeter Zijlstra
7894dddfb5fSPeter Zijlstra return ret;
7904dddfb5fSPeter Zijlstra }
7919cd4f1a4SThomas Gleixner
bringup_wait_for_ap_online(unsigned int cpu)79222b612e2SThomas Gleixner static int bringup_wait_for_ap_online(unsigned int cpu)
7938df3e07eSThomas Gleixner {
7948df3e07eSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
7958df3e07eSThomas Gleixner
7969cd4f1a4SThomas Gleixner /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
7975ebe7742SPeter Zijlstra wait_for_ap_thread(st, true);
798dea1d0f5SThomas Gleixner if (WARN_ON_ONCE((!cpu_online(cpu))))
799dea1d0f5SThomas Gleixner return -ECANCELED;
8009cd4f1a4SThomas Gleixner
80145178ac0SPeter Zijlstra /* Unpark the hotplug thread of the target cpu */
8029cd4f1a4SThomas Gleixner kthread_unpark(st->thread);
8039cd4f1a4SThomas Gleixner
8040cc3cd21SThomas Gleixner /*
8050cc3cd21SThomas Gleixner * SMT soft disabling on X86 requires to bring the CPU out of the
8060cc3cd21SThomas Gleixner * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
807f5602011SJiri Kosina * CPU marked itself as booted_once in notify_cpu_starting() so the
808d91bdd96SThomas Gleixner * cpu_bootable() check will now return false if this is not the
8090cc3cd21SThomas Gleixner * primary sibling.
8100cc3cd21SThomas Gleixner */
811d91bdd96SThomas Gleixner if (!cpu_bootable(cpu))
8120cc3cd21SThomas Gleixner return -ECANCELED;
8134dddfb5fSPeter Zijlstra return 0;
8148df3e07eSThomas Gleixner }
8158df3e07eSThomas Gleixner
816a631be92SThomas Gleixner #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
cpuhp_kick_ap_alive(unsigned int cpu)817a631be92SThomas Gleixner static int cpuhp_kick_ap_alive(unsigned int cpu)
818a631be92SThomas Gleixner {
819a631be92SThomas Gleixner if (!cpuhp_can_boot_ap(cpu))
820a631be92SThomas Gleixner return -EAGAIN;
821a631be92SThomas Gleixner
822a631be92SThomas Gleixner return arch_cpuhp_kick_ap_alive(cpu, idle_thread_get(cpu));
823a631be92SThomas Gleixner }
824a631be92SThomas Gleixner
cpuhp_bringup_ap(unsigned int cpu)825a631be92SThomas Gleixner static int cpuhp_bringup_ap(unsigned int cpu)
826a631be92SThomas Gleixner {
827a631be92SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
828a631be92SThomas Gleixner int ret;
829a631be92SThomas Gleixner
830a631be92SThomas Gleixner /*
831a631be92SThomas Gleixner * Some architectures have to walk the irq descriptors to
832a631be92SThomas Gleixner * setup the vector space for the cpu which comes online.
833a631be92SThomas Gleixner * Prevent irq alloc/free across the bringup.
834a631be92SThomas Gleixner */
835a631be92SThomas Gleixner irq_lock_sparse();
836a631be92SThomas Gleixner
837a631be92SThomas Gleixner ret = cpuhp_bp_sync_alive(cpu);
838a631be92SThomas Gleixner if (ret)
839a631be92SThomas Gleixner goto out_unlock;
840a631be92SThomas Gleixner
841a631be92SThomas Gleixner ret = bringup_wait_for_ap_online(cpu);
842a631be92SThomas Gleixner if (ret)
843a631be92SThomas Gleixner goto out_unlock;
844a631be92SThomas Gleixner
845a631be92SThomas Gleixner irq_unlock_sparse();
846a631be92SThomas Gleixner
847a631be92SThomas Gleixner if (st->target <= CPUHP_AP_ONLINE_IDLE)
848a631be92SThomas Gleixner return 0;
849a631be92SThomas Gleixner
850a631be92SThomas Gleixner return cpuhp_kick_ap(cpu, st, st->target);
851a631be92SThomas Gleixner
852a631be92SThomas Gleixner out_unlock:
853a631be92SThomas Gleixner irq_unlock_sparse();
854a631be92SThomas Gleixner return ret;
855a631be92SThomas Gleixner }
856a631be92SThomas Gleixner #else
bringup_cpu(unsigned int cpu)857ba997462SThomas Gleixner static int bringup_cpu(unsigned int cpu)
858ba997462SThomas Gleixner {
85922b612e2SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
860ba997462SThomas Gleixner struct task_struct *idle = idle_thread_get(cpu);
861ba997462SThomas Gleixner int ret;
862ba997462SThomas Gleixner
8636f062123SThomas Gleixner if (!cpuhp_can_boot_ap(cpu))
8646f062123SThomas Gleixner return -EAGAIN;
8656f062123SThomas Gleixner
866aa877175SBoris Ostrovsky /*
867aa877175SBoris Ostrovsky * Some architectures have to walk the irq descriptors to
868aa877175SBoris Ostrovsky * setup the vector space for the cpu which comes online.
86922b612e2SThomas Gleixner *
87022b612e2SThomas Gleixner * Prevent irq alloc/free across the bringup by acquiring the
87122b612e2SThomas Gleixner * sparse irq lock. Hold it until the upcoming CPU completes the
87222b612e2SThomas Gleixner * startup in cpuhp_online_idle() which allows to avoid
87322b612e2SThomas Gleixner * intermediate synchronization points in the architecture code.
874aa877175SBoris Ostrovsky */
875aa877175SBoris Ostrovsky irq_lock_sparse();
876aa877175SBoris Ostrovsky
877ba997462SThomas Gleixner ret = __cpu_up(cpu, idle);
878530e9b76SThomas Gleixner if (ret)
87922b612e2SThomas Gleixner goto out_unlock;
88022b612e2SThomas Gleixner
8816f062123SThomas Gleixner ret = cpuhp_bp_sync_alive(cpu);
8826f062123SThomas Gleixner if (ret)
8836f062123SThomas Gleixner goto out_unlock;
8846f062123SThomas Gleixner
88522b612e2SThomas Gleixner ret = bringup_wait_for_ap_online(cpu);
88622b612e2SThomas Gleixner if (ret)
88722b612e2SThomas Gleixner goto out_unlock;
88822b612e2SThomas Gleixner
88922b612e2SThomas Gleixner irq_unlock_sparse();
89022b612e2SThomas Gleixner
89122b612e2SThomas Gleixner if (st->target <= CPUHP_AP_ONLINE_IDLE)
89222b612e2SThomas Gleixner return 0;
89322b612e2SThomas Gleixner
89422b612e2SThomas Gleixner return cpuhp_kick_ap(cpu, st, st->target);
89522b612e2SThomas Gleixner
89622b612e2SThomas Gleixner out_unlock:
89722b612e2SThomas Gleixner irq_unlock_sparse();
898ba997462SThomas Gleixner return ret;
899ba997462SThomas Gleixner }
900a631be92SThomas Gleixner #endif
901ba997462SThomas Gleixner
finish_cpu(unsigned int cpu)902bf2c59fcSPeter Zijlstra static int finish_cpu(unsigned int cpu)
903bf2c59fcSPeter Zijlstra {
904bf2c59fcSPeter Zijlstra struct task_struct *idle = idle_thread_get(cpu);
905bf2c59fcSPeter Zijlstra struct mm_struct *mm = idle->active_mm;
906bf2c59fcSPeter Zijlstra
907bf2c59fcSPeter Zijlstra /*
908bf2c59fcSPeter Zijlstra * idle_task_exit() will have switched to &init_mm, now
909bf2c59fcSPeter Zijlstra * clean up any remaining active_mm state.
910bf2c59fcSPeter Zijlstra */
911bf2c59fcSPeter Zijlstra if (mm != &init_mm)
912bf2c59fcSPeter Zijlstra idle->active_mm = &init_mm;
913aa464ba9SNicholas Piggin mmdrop_lazy_tlb(mm);
914bf2c59fcSPeter Zijlstra return 0;
915bf2c59fcSPeter Zijlstra }
916bf2c59fcSPeter Zijlstra
9172e1a3483SThomas Gleixner /*
9182e1a3483SThomas Gleixner * Hotplug state machine related functions
9192e1a3483SThomas Gleixner */
9202e1a3483SThomas Gleixner
921453e4108SVincent Donnefort /*
922453e4108SVincent Donnefort * Get the next state to run. Empty ones will be skipped. Returns true if a
923453e4108SVincent Donnefort * state must be run.
924453e4108SVincent Donnefort *
925453e4108SVincent Donnefort * st->state will be modified ahead of time, to match state_to_run, as if it
926453e4108SVincent Donnefort * has already ran.
927453e4108SVincent Donnefort */
cpuhp_next_state(bool bringup,enum cpuhp_state * state_to_run,struct cpuhp_cpu_state * st,enum cpuhp_state target)928453e4108SVincent Donnefort static bool cpuhp_next_state(bool bringup,
929453e4108SVincent Donnefort enum cpuhp_state *state_to_run,
930453e4108SVincent Donnefort struct cpuhp_cpu_state *st,
931453e4108SVincent Donnefort enum cpuhp_state target)
9322e1a3483SThomas Gleixner {
933453e4108SVincent Donnefort do {
934453e4108SVincent Donnefort if (bringup) {
935453e4108SVincent Donnefort if (st->state >= target)
936453e4108SVincent Donnefort return false;
937453e4108SVincent Donnefort
938453e4108SVincent Donnefort *state_to_run = ++st->state;
939453e4108SVincent Donnefort } else {
940453e4108SVincent Donnefort if (st->state <= target)
941453e4108SVincent Donnefort return false;
942453e4108SVincent Donnefort
943453e4108SVincent Donnefort *state_to_run = st->state--;
944453e4108SVincent Donnefort }
945453e4108SVincent Donnefort
946453e4108SVincent Donnefort if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
947453e4108SVincent Donnefort break;
948453e4108SVincent Donnefort } while (true);
949453e4108SVincent Donnefort
950453e4108SVincent Donnefort return true;
951453e4108SVincent Donnefort }
952453e4108SVincent Donnefort
__cpuhp_invoke_callback_range(bool bringup,unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target,bool nofail)9536f855b39SVincent Donnefort static int __cpuhp_invoke_callback_range(bool bringup,
9546f855b39SVincent Donnefort unsigned int cpu,
9556f855b39SVincent Donnefort struct cpuhp_cpu_state *st,
9566f855b39SVincent Donnefort enum cpuhp_state target,
9576f855b39SVincent Donnefort bool nofail)
9586f855b39SVincent Donnefort {
9596f855b39SVincent Donnefort enum cpuhp_state state;
9606f855b39SVincent Donnefort int ret = 0;
9616f855b39SVincent Donnefort
9626f855b39SVincent Donnefort while (cpuhp_next_state(bringup, &state, st, target)) {
9636f855b39SVincent Donnefort int err;
9646f855b39SVincent Donnefort
9656f855b39SVincent Donnefort err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
9666f855b39SVincent Donnefort if (!err)
9676f855b39SVincent Donnefort continue;
9686f855b39SVincent Donnefort
9696f855b39SVincent Donnefort if (nofail) {
9706f855b39SVincent Donnefort pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
9716f855b39SVincent Donnefort cpu, bringup ? "UP" : "DOWN",
9726f855b39SVincent Donnefort cpuhp_get_step(st->state)->name,
9736f855b39SVincent Donnefort st->state, err);
9746f855b39SVincent Donnefort ret = -1;
9756f855b39SVincent Donnefort } else {
9766f855b39SVincent Donnefort ret = err;
9776f855b39SVincent Donnefort break;
9786f855b39SVincent Donnefort }
9796f855b39SVincent Donnefort }
9806f855b39SVincent Donnefort
9816f855b39SVincent Donnefort return ret;
9826f855b39SVincent Donnefort }
9836f855b39SVincent Donnefort
cpuhp_invoke_callback_range(bool bringup,unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)9846f855b39SVincent Donnefort static inline int cpuhp_invoke_callback_range(bool bringup,
985453e4108SVincent Donnefort unsigned int cpu,
986453e4108SVincent Donnefort struct cpuhp_cpu_state *st,
987453e4108SVincent Donnefort enum cpuhp_state target)
988453e4108SVincent Donnefort {
9896f855b39SVincent Donnefort return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
990453e4108SVincent Donnefort }
991453e4108SVincent Donnefort
cpuhp_invoke_callback_range_nofail(bool bringup,unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)9926f855b39SVincent Donnefort static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
9936f855b39SVincent Donnefort unsigned int cpu,
9946f855b39SVincent Donnefort struct cpuhp_cpu_state *st,
9956f855b39SVincent Donnefort enum cpuhp_state target)
9966f855b39SVincent Donnefort {
9976f855b39SVincent Donnefort __cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
9982e1a3483SThomas Gleixner }
9992e1a3483SThomas Gleixner
can_rollback_cpu(struct cpuhp_cpu_state * st)1000206b9235SThomas Gleixner static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
1001206b9235SThomas Gleixner {
1002206b9235SThomas Gleixner if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
1003206b9235SThomas Gleixner return true;
1004206b9235SThomas Gleixner /*
1005206b9235SThomas Gleixner * When CPU hotplug is disabled, then taking the CPU down is not
1006206b9235SThomas Gleixner * possible because takedown_cpu() and the architecture and
1007206b9235SThomas Gleixner * subsystem specific mechanisms are not available. So the CPU
1008206b9235SThomas Gleixner * which would be completely unplugged again needs to stay around
1009206b9235SThomas Gleixner * in the current state.
1010206b9235SThomas Gleixner */
1011206b9235SThomas Gleixner return st->state <= CPUHP_BRINGUP_CPU;
1012206b9235SThomas Gleixner }
1013206b9235SThomas Gleixner
cpuhp_up_callbacks(unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)10142e1a3483SThomas Gleixner static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1015a724632cSThomas Gleixner enum cpuhp_state target)
10162e1a3483SThomas Gleixner {
10172e1a3483SThomas Gleixner enum cpuhp_state prev_state = st->state;
10182e1a3483SThomas Gleixner int ret = 0;
10192e1a3483SThomas Gleixner
1020453e4108SVincent Donnefort ret = cpuhp_invoke_callback_range(true, cpu, st, target);
10212e1a3483SThomas Gleixner if (ret) {
1022ebca71a8SDongli Zhang pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
1023ebca71a8SDongli Zhang ret, cpu, cpuhp_get_step(st->state)->name,
1024ebca71a8SDongli Zhang st->state);
1025ebca71a8SDongli Zhang
1026b7ba6d8dSSteven Price cpuhp_reset_state(cpu, st, prev_state);
1027453e4108SVincent Donnefort if (can_rollback_cpu(st))
1028453e4108SVincent Donnefort WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
1029453e4108SVincent Donnefort prev_state));
10302e1a3483SThomas Gleixner }
10312e1a3483SThomas Gleixner return ret;
10322e1a3483SThomas Gleixner }
10332e1a3483SThomas Gleixner
10344cb28cedSThomas Gleixner /*
10354cb28cedSThomas Gleixner * The cpu hotplug threads manage the bringup and teardown of the cpus
10364cb28cedSThomas Gleixner */
cpuhp_should_run(unsigned int cpu)10374cb28cedSThomas Gleixner static int cpuhp_should_run(unsigned int cpu)
10384cb28cedSThomas Gleixner {
10394cb28cedSThomas Gleixner struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
10404cb28cedSThomas Gleixner
10414cb28cedSThomas Gleixner return st->should_run;
10424cb28cedSThomas Gleixner }
10434cb28cedSThomas Gleixner
10444cb28cedSThomas Gleixner /*
10454cb28cedSThomas Gleixner * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
10464cb28cedSThomas Gleixner * callbacks when a state gets [un]installed at runtime.
10474dddfb5fSPeter Zijlstra *
10484dddfb5fSPeter Zijlstra * Each invocation of this function by the smpboot thread does a single AP
10494dddfb5fSPeter Zijlstra * state callback.
10504dddfb5fSPeter Zijlstra *
10514dddfb5fSPeter Zijlstra * It has 3 modes of operation:
10524dddfb5fSPeter Zijlstra * - single: runs st->cb_state
10534dddfb5fSPeter Zijlstra * - up: runs ++st->state, while st->state < st->target
10544dddfb5fSPeter Zijlstra * - down: runs st->state--, while st->state > st->target
10554dddfb5fSPeter Zijlstra *
10564dddfb5fSPeter Zijlstra * When complete or on error, should_run is cleared and the completion is fired.
10574cb28cedSThomas Gleixner */
cpuhp_thread_fun(unsigned int cpu)10584cb28cedSThomas Gleixner static void cpuhp_thread_fun(unsigned int cpu)
10594cb28cedSThomas Gleixner {
10604cb28cedSThomas Gleixner struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
10614dddfb5fSPeter Zijlstra bool bringup = st->bringup;
10624dddfb5fSPeter Zijlstra enum cpuhp_state state;
10634cb28cedSThomas Gleixner
1064f8b7530aSNeeraj Upadhyay if (WARN_ON_ONCE(!st->should_run))
1065f8b7530aSNeeraj Upadhyay return;
1066f8b7530aSNeeraj Upadhyay
10674cb28cedSThomas Gleixner /*
10684dddfb5fSPeter Zijlstra * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
10694dddfb5fSPeter Zijlstra * that if we see ->should_run we also see the rest of the state.
10704cb28cedSThomas Gleixner */
10714cb28cedSThomas Gleixner smp_mb();
10724dddfb5fSPeter Zijlstra
1073cb92173dSPeter Zijlstra /*
1074cb92173dSPeter Zijlstra * The BP holds the hotplug lock, but we're now running on the AP,
1075cb92173dSPeter Zijlstra * ensure that anybody asserting the lock is held, will actually find
1076cb92173dSPeter Zijlstra * it so.
1077cb92173dSPeter Zijlstra */
1078cb92173dSPeter Zijlstra lockdep_acquire_cpus_lock();
10795f4b55e1SPeter Zijlstra cpuhp_lock_acquire(bringup);
10804dddfb5fSPeter Zijlstra
1081a724632cSThomas Gleixner if (st->single) {
10824dddfb5fSPeter Zijlstra state = st->cb_state;
10834dddfb5fSPeter Zijlstra st->should_run = false;
10844dddfb5fSPeter Zijlstra } else {
1085453e4108SVincent Donnefort st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
1086453e4108SVincent Donnefort if (!st->should_run)
1087453e4108SVincent Donnefort goto end;
10884dddfb5fSPeter Zijlstra }
10894dddfb5fSPeter Zijlstra
10904dddfb5fSPeter Zijlstra WARN_ON_ONCE(!cpuhp_is_ap_state(state));
10914dddfb5fSPeter Zijlstra
10924dddfb5fSPeter Zijlstra if (cpuhp_is_atomic_state(state)) {
10934cb28cedSThomas Gleixner local_irq_disable();
10944dddfb5fSPeter Zijlstra st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
10954cb28cedSThomas Gleixner local_irq_enable();
10963b9d6da6SSebastian Andrzej Siewior
10974dddfb5fSPeter Zijlstra /*
10984dddfb5fSPeter Zijlstra * STARTING/DYING must not fail!
10994dddfb5fSPeter Zijlstra */
11004dddfb5fSPeter Zijlstra WARN_ON_ONCE(st->result);
11014cb28cedSThomas Gleixner } else {
11024dddfb5fSPeter Zijlstra st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
11034cb28cedSThomas Gleixner }
11044dddfb5fSPeter Zijlstra
11054dddfb5fSPeter Zijlstra if (st->result) {
11064dddfb5fSPeter Zijlstra /*
11074dddfb5fSPeter Zijlstra * If we fail on a rollback, we're up a creek without no
11084dddfb5fSPeter Zijlstra * paddle, no way forward, no way back. We loose, thanks for
11094dddfb5fSPeter Zijlstra * playing.
11104dddfb5fSPeter Zijlstra */
11114dddfb5fSPeter Zijlstra WARN_ON_ONCE(st->rollback);
11124dddfb5fSPeter Zijlstra st->should_run = false;
11134dddfb5fSPeter Zijlstra }
11144dddfb5fSPeter Zijlstra
1115453e4108SVincent Donnefort end:
11165f4b55e1SPeter Zijlstra cpuhp_lock_release(bringup);
1117cb92173dSPeter Zijlstra lockdep_release_cpus_lock();
11184dddfb5fSPeter Zijlstra
11194dddfb5fSPeter Zijlstra if (!st->should_run)
11205ebe7742SPeter Zijlstra complete_ap_thread(st, bringup);
11214cb28cedSThomas Gleixner }
11224cb28cedSThomas Gleixner
11234cb28cedSThomas Gleixner /* Invoke a single callback on a remote cpu */
1124a724632cSThomas Gleixner static int
cpuhp_invoke_ap_callback(int cpu,enum cpuhp_state state,bool bringup,struct hlist_node * node)1125cf392d10SThomas Gleixner cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
1126cf392d10SThomas Gleixner struct hlist_node *node)
11274cb28cedSThomas Gleixner {
11284cb28cedSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
11294dddfb5fSPeter Zijlstra int ret;
11304cb28cedSThomas Gleixner
11314cb28cedSThomas Gleixner if (!cpu_online(cpu))
11324cb28cedSThomas Gleixner return 0;
11334cb28cedSThomas Gleixner
11345f4b55e1SPeter Zijlstra cpuhp_lock_acquire(false);
11355f4b55e1SPeter Zijlstra cpuhp_lock_release(false);
11365f4b55e1SPeter Zijlstra
11375f4b55e1SPeter Zijlstra cpuhp_lock_acquire(true);
11385f4b55e1SPeter Zijlstra cpuhp_lock_release(true);
113949dfe2a6SThomas Gleixner
11406a4e2451SThomas Gleixner /*
11416a4e2451SThomas Gleixner * If we are up and running, use the hotplug thread. For early calls
11426a4e2451SThomas Gleixner * we invoke the thread function directly.
11436a4e2451SThomas Gleixner */
11446a4e2451SThomas Gleixner if (!st->thread)
114596abb968SPeter Zijlstra return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
11466a4e2451SThomas Gleixner
11474dddfb5fSPeter Zijlstra st->rollback = false;
11484dddfb5fSPeter Zijlstra st->last = NULL;
11494dddfb5fSPeter Zijlstra
11504dddfb5fSPeter Zijlstra st->node = node;
11514dddfb5fSPeter Zijlstra st->bringup = bringup;
11524cb28cedSThomas Gleixner st->cb_state = state;
1153a724632cSThomas Gleixner st->single = true;
11544dddfb5fSPeter Zijlstra
11554dddfb5fSPeter Zijlstra __cpuhp_kick_ap(st);
1156a724632cSThomas Gleixner
11574cb28cedSThomas Gleixner /*
11584dddfb5fSPeter Zijlstra * If we failed and did a partial, do a rollback.
11594cb28cedSThomas Gleixner */
11604dddfb5fSPeter Zijlstra if ((ret = st->result) && st->last) {
11614dddfb5fSPeter Zijlstra st->rollback = true;
11624dddfb5fSPeter Zijlstra st->bringup = !bringup;
11634dddfb5fSPeter Zijlstra
11644dddfb5fSPeter Zijlstra __cpuhp_kick_ap(st);
11654cb28cedSThomas Gleixner }
11664cb28cedSThomas Gleixner
11671f7c70d6SThomas Gleixner /*
11681f7c70d6SThomas Gleixner * Clean up the leftovers so the next hotplug operation wont use stale
11691f7c70d6SThomas Gleixner * data.
11701f7c70d6SThomas Gleixner */
11711f7c70d6SThomas Gleixner st->node = st->last = NULL;
11724dddfb5fSPeter Zijlstra return ret;
11731cf4f629SThomas Gleixner }
11741cf4f629SThomas Gleixner
cpuhp_kick_ap_work(unsigned int cpu)11751cf4f629SThomas Gleixner static int cpuhp_kick_ap_work(unsigned int cpu)
11761cf4f629SThomas Gleixner {
11771cf4f629SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
11784dddfb5fSPeter Zijlstra enum cpuhp_state prev_state = st->state;
11794dddfb5fSPeter Zijlstra int ret;
11801cf4f629SThomas Gleixner
11815f4b55e1SPeter Zijlstra cpuhp_lock_acquire(false);
11825f4b55e1SPeter Zijlstra cpuhp_lock_release(false);
11835f4b55e1SPeter Zijlstra
11845f4b55e1SPeter Zijlstra cpuhp_lock_acquire(true);
11855f4b55e1SPeter Zijlstra cpuhp_lock_release(true);
11864dddfb5fSPeter Zijlstra
11874dddfb5fSPeter Zijlstra trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
1188b7ba6d8dSSteven Price ret = cpuhp_kick_ap(cpu, st, st->target);
11894dddfb5fSPeter Zijlstra trace_cpuhp_exit(cpu, st->state, prev_state, ret);
11904dddfb5fSPeter Zijlstra
11914dddfb5fSPeter Zijlstra return ret;
11924cb28cedSThomas Gleixner }
11934cb28cedSThomas Gleixner
11944cb28cedSThomas Gleixner static struct smp_hotplug_thread cpuhp_threads = {
11954cb28cedSThomas Gleixner .store = &cpuhp_state.thread,
11964cb28cedSThomas Gleixner .thread_should_run = cpuhp_should_run,
11974cb28cedSThomas Gleixner .thread_fn = cpuhp_thread_fun,
11984cb28cedSThomas Gleixner .thread_comm = "cpuhp/%u",
11994cb28cedSThomas Gleixner .selfparking = true,
12004cb28cedSThomas Gleixner };
12014cb28cedSThomas Gleixner
cpuhp_init_state(void)1202d308077eSSteven Price static __init void cpuhp_init_state(void)
1203d308077eSSteven Price {
1204d308077eSSteven Price struct cpuhp_cpu_state *st;
1205d308077eSSteven Price int cpu;
1206d308077eSSteven Price
1207d308077eSSteven Price for_each_possible_cpu(cpu) {
1208d308077eSSteven Price st = per_cpu_ptr(&cpuhp_state, cpu);
1209d308077eSSteven Price init_completion(&st->done_up);
1210d308077eSSteven Price init_completion(&st->done_down);
1211d308077eSSteven Price }
1212d308077eSSteven Price }
1213d308077eSSteven Price
cpuhp_threads_init(void)12144cb28cedSThomas Gleixner void __init cpuhp_threads_init(void)
12154cb28cedSThomas Gleixner {
1216d308077eSSteven Price cpuhp_init_state();
12174cb28cedSThomas Gleixner BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
12184cb28cedSThomas Gleixner kthread_unpark(this_cpu_read(cpuhp_state.thread));
12194cb28cedSThomas Gleixner }
12204cb28cedSThomas Gleixner
1221777c6e0dSMichal Hocko #ifdef CONFIG_HOTPLUG_CPU
12228ff00399SNicholas Piggin #ifndef arch_clear_mm_cpumask_cpu
12238ff00399SNicholas Piggin #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
12248ff00399SNicholas Piggin #endif
12258ff00399SNicholas Piggin
1226e4cc2f87SAnton Vorontsov /**
1227e4cc2f87SAnton Vorontsov * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
1228e4cc2f87SAnton Vorontsov * @cpu: a CPU id
1229e4cc2f87SAnton Vorontsov *
1230e4cc2f87SAnton Vorontsov * This function walks all processes, finds a valid mm struct for each one and
1231e4cc2f87SAnton Vorontsov * then clears a corresponding bit in mm's cpumask. While this all sounds
1232e4cc2f87SAnton Vorontsov * trivial, there are various non-obvious corner cases, which this function
1233e4cc2f87SAnton Vorontsov * tries to solve in a safe manner.
1234e4cc2f87SAnton Vorontsov *
1235e4cc2f87SAnton Vorontsov * Also note that the function uses a somewhat relaxed locking scheme, so it may
1236e4cc2f87SAnton Vorontsov * be called only for an already offlined CPU.
1237e4cc2f87SAnton Vorontsov */
clear_tasks_mm_cpumask(int cpu)1238cb79295eSAnton Vorontsov void clear_tasks_mm_cpumask(int cpu)
1239cb79295eSAnton Vorontsov {
1240cb79295eSAnton Vorontsov struct task_struct *p;
1241cb79295eSAnton Vorontsov
1242cb79295eSAnton Vorontsov /*
1243cb79295eSAnton Vorontsov * This function is called after the cpu is taken down and marked
1244cb79295eSAnton Vorontsov * offline, so its not like new tasks will ever get this cpu set in
1245cb79295eSAnton Vorontsov * their mm mask. -- Peter Zijlstra
1246cb79295eSAnton Vorontsov * Thus, we may use rcu_read_lock() here, instead of grabbing
1247cb79295eSAnton Vorontsov * full-fledged tasklist_lock.
1248cb79295eSAnton Vorontsov */
1249e4cc2f87SAnton Vorontsov WARN_ON(cpu_online(cpu));
1250cb79295eSAnton Vorontsov rcu_read_lock();
1251cb79295eSAnton Vorontsov for_each_process(p) {
1252cb79295eSAnton Vorontsov struct task_struct *t;
1253cb79295eSAnton Vorontsov
1254e4cc2f87SAnton Vorontsov /*
1255e4cc2f87SAnton Vorontsov * Main thread might exit, but other threads may still have
1256e4cc2f87SAnton Vorontsov * a valid mm. Find one.
1257e4cc2f87SAnton Vorontsov */
1258cb79295eSAnton Vorontsov t = find_lock_task_mm(p);
1259cb79295eSAnton Vorontsov if (!t)
1260cb79295eSAnton Vorontsov continue;
12618ff00399SNicholas Piggin arch_clear_mm_cpumask_cpu(cpu, t->mm);
1262cb79295eSAnton Vorontsov task_unlock(t);
1263cb79295eSAnton Vorontsov }
1264cb79295eSAnton Vorontsov rcu_read_unlock();
1265cb79295eSAnton Vorontsov }
1266cb79295eSAnton Vorontsov
12671da177e4SLinus Torvalds /* Take this CPU down. */
take_cpu_down(void * _param)126871cf5aeeSMathias Krause static int take_cpu_down(void *_param)
12691da177e4SLinus Torvalds {
12704baa0afcSThomas Gleixner struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
12714baa0afcSThomas Gleixner enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
1272090e77c3SThomas Gleixner int err, cpu = smp_processor_id();
12731da177e4SLinus Torvalds
12741da177e4SLinus Torvalds /* Ensure this CPU doesn't handle any more interrupts. */
12751da177e4SLinus Torvalds err = __cpu_disable();
12761da177e4SLinus Torvalds if (err < 0)
1277f3705136SZwane Mwaikambo return err;
1278f3705136SZwane Mwaikambo
1279a724632cSThomas Gleixner /*
1280453e4108SVincent Donnefort * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
1281453e4108SVincent Donnefort * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
1282a724632cSThomas Gleixner */
1283453e4108SVincent Donnefort WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
1284453e4108SVincent Donnefort
1285724a8688SPeter Zijlstra /*
12866f855b39SVincent Donnefort * Invoke the former CPU_DYING callbacks. DYING must not fail!
1287724a8688SPeter Zijlstra */
12886f855b39SVincent Donnefort cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
12894baa0afcSThomas Gleixner
129014e568e7SThomas Gleixner /* Park the stopper thread */
1291090e77c3SThomas Gleixner stop_machine_park(cpu);
1292f3705136SZwane Mwaikambo return 0;
12931da177e4SLinus Torvalds }
12941da177e4SLinus Torvalds
takedown_cpu(unsigned int cpu)129598458172SThomas Gleixner static int takedown_cpu(unsigned int cpu)
12961da177e4SLinus Torvalds {
1297e69aab13SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
129898458172SThomas Gleixner int err;
12991da177e4SLinus Torvalds
13002a58c527SThomas Gleixner /* Park the smpboot threads */
130113070833SYuan ZhaoXiong kthread_park(st->thread);
13021cf4f629SThomas Gleixner
13036acce3efSPeter Zijlstra /*
1304a8994181SThomas Gleixner * Prevent irq alloc/free while the dying cpu reorganizes the
1305a8994181SThomas Gleixner * interrupt affinities.
1306a8994181SThomas Gleixner */
1307a8994181SThomas Gleixner irq_lock_sparse();
1308a8994181SThomas Gleixner
1309a8994181SThomas Gleixner /*
13106acce3efSPeter Zijlstra * So now all preempt/rcu users must observe !cpu_active().
13116acce3efSPeter Zijlstra */
1312210e2133SSebastian Andrzej Siewior err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
131304321587SRusty Russell if (err) {
13143b9d6da6SSebastian Andrzej Siewior /* CPU refused to die */
1315a8994181SThomas Gleixner irq_unlock_sparse();
13163b9d6da6SSebastian Andrzej Siewior /* Unpark the hotplug thread so we can rollback there */
131713070833SYuan ZhaoXiong kthread_unpark(st->thread);
131898458172SThomas Gleixner return err;
13191da177e4SLinus Torvalds }
132004321587SRusty Russell BUG_ON(cpu_online(cpu));
13211da177e4SLinus Torvalds
132248c5ccaeSPeter Zijlstra /*
13235b1ead68SBrendan Jackman * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
13245b1ead68SBrendan Jackman * all runnable tasks from the CPU, there's only the idle task left now
132548c5ccaeSPeter Zijlstra * that the migration thread is done doing the stop_machine thing.
132651a96c77SPeter Zijlstra *
132751a96c77SPeter Zijlstra * Wait for the stop thread to go away.
132848c5ccaeSPeter Zijlstra */
13295ebe7742SPeter Zijlstra wait_for_ap_thread(st, false);
1330e69aab13SThomas Gleixner BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
13311da177e4SLinus Torvalds
1332a8994181SThomas Gleixner /* Interrupts are moved away from the dying cpu, reenable alloc/free */
1333a8994181SThomas Gleixner irq_unlock_sparse();
1334a8994181SThomas Gleixner
1335345527b1SPreeti U Murthy hotplug_cpu__broadcast_tick_pull(cpu);
13361da177e4SLinus Torvalds /* This actually kills the CPU. */
13371da177e4SLinus Torvalds __cpu_die(cpu);
13381da177e4SLinus Torvalds
13396f062123SThomas Gleixner cpuhp_bp_sync_dead(cpu);
13406f062123SThomas Gleixner
1341a49b116dSThomas Gleixner tick_cleanup_dead_cpu(cpu);
1342a28ab03bSFrederic Weisbecker
1343a28ab03bSFrederic Weisbecker /*
1344a28ab03bSFrederic Weisbecker * Callbacks must be re-integrated right away to the RCU state machine.
1345a28ab03bSFrederic Weisbecker * Otherwise an RCU callback could block a further teardown function
1346a28ab03bSFrederic Weisbecker * waiting for its completion.
1347a28ab03bSFrederic Weisbecker */
1348a58163d8SPaul E. McKenney rcutree_migrate_callbacks(cpu);
1349a28ab03bSFrederic Weisbecker
135098458172SThomas Gleixner return 0;
135198458172SThomas Gleixner }
13521da177e4SLinus Torvalds
cpuhp_complete_idle_dead(void * arg)135371f87b2fSThomas Gleixner static void cpuhp_complete_idle_dead(void *arg)
135471f87b2fSThomas Gleixner {
135571f87b2fSThomas Gleixner struct cpuhp_cpu_state *st = arg;
135671f87b2fSThomas Gleixner
13575ebe7742SPeter Zijlstra complete_ap_thread(st, false);
135871f87b2fSThomas Gleixner }
135971f87b2fSThomas Gleixner
cpuhp_report_idle_dead(void)1360e69aab13SThomas Gleixner void cpuhp_report_idle_dead(void)
1361e69aab13SThomas Gleixner {
1362e69aab13SThomas Gleixner struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1363e69aab13SThomas Gleixner
1364e69aab13SThomas Gleixner BUG_ON(st->state != CPUHP_AP_OFFLINE);
1365500f8f9bSFrederic Weisbecker tick_assert_timekeeping_handover();
1366448e9f34SFrederic Weisbecker rcutree_report_cpu_dead();
136771f87b2fSThomas Gleixner st->state = CPUHP_AP_IDLE_DEAD;
136871f87b2fSThomas Gleixner /*
1369448e9f34SFrederic Weisbecker * We cannot call complete after rcutree_report_cpu_dead() so we delegate it
137071f87b2fSThomas Gleixner * to an online cpu.
137171f87b2fSThomas Gleixner */
137271f87b2fSThomas Gleixner smp_call_function_single(cpumask_first(cpu_online_mask),
137371f87b2fSThomas Gleixner cpuhp_complete_idle_dead, st, 0);
1374e69aab13SThomas Gleixner }
1375e69aab13SThomas Gleixner
cpuhp_down_callbacks(unsigned int cpu,struct cpuhp_cpu_state * st,enum cpuhp_state target)13764dddfb5fSPeter Zijlstra static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
13774dddfb5fSPeter Zijlstra enum cpuhp_state target)
13784dddfb5fSPeter Zijlstra {
13794dddfb5fSPeter Zijlstra enum cpuhp_state prev_state = st->state;
13804dddfb5fSPeter Zijlstra int ret = 0;
13814dddfb5fSPeter Zijlstra
1382453e4108SVincent Donnefort ret = cpuhp_invoke_callback_range(false, cpu, st, target);
13834dddfb5fSPeter Zijlstra if (ret) {
1384ebca71a8SDongli Zhang pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
1385ebca71a8SDongli Zhang ret, cpu, cpuhp_get_step(st->state)->name,
1386ebca71a8SDongli Zhang st->state);
1387453e4108SVincent Donnefort
1388b7ba6d8dSSteven Price cpuhp_reset_state(cpu, st, prev_state);
1389453e4108SVincent Donnefort
139069fa6eb7SThomas Gleixner if (st->state < prev_state)
1391453e4108SVincent Donnefort WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1392453e4108SVincent Donnefort prev_state));
13934dddfb5fSPeter Zijlstra }
1394453e4108SVincent Donnefort
13954dddfb5fSPeter Zijlstra return ret;
13964dddfb5fSPeter Zijlstra }
1397cff7d378SThomas Gleixner
139898458172SThomas Gleixner /* Requires cpu_add_remove_lock to be held */
_cpu_down(unsigned int cpu,int tasks_frozen,enum cpuhp_state target)1399af1f4045SThomas Gleixner static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1400af1f4045SThomas Gleixner enum cpuhp_state target)
140198458172SThomas Gleixner {
1402cff7d378SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1403cff7d378SThomas Gleixner int prev_state, ret = 0;
140498458172SThomas Gleixner
140598458172SThomas Gleixner if (num_online_cpus() == 1)
140698458172SThomas Gleixner return -EBUSY;
140798458172SThomas Gleixner
1408757c989bSThomas Gleixner if (!cpu_present(cpu))
140998458172SThomas Gleixner return -EINVAL;
141098458172SThomas Gleixner
14118f553c49SThomas Gleixner cpus_write_lock();
141298458172SThomas Gleixner
141398458172SThomas Gleixner cpuhp_tasks_frozen = tasks_frozen;
141498458172SThomas Gleixner
1415b7ba6d8dSSteven Price prev_state = cpuhp_set_state(cpu, st, target);
14161cf4f629SThomas Gleixner /*
14171cf4f629SThomas Gleixner * If the current CPU state is in the range of the AP hotplug thread,
14181cf4f629SThomas Gleixner * then we need to kick the thread.
14191cf4f629SThomas Gleixner */
14208df3e07eSThomas Gleixner if (st->state > CPUHP_TEARDOWN_CPU) {
14214dddfb5fSPeter Zijlstra st->target = max((int)target, CPUHP_TEARDOWN_CPU);
14221cf4f629SThomas Gleixner ret = cpuhp_kick_ap_work(cpu);
14231cf4f629SThomas Gleixner /*
14241cf4f629SThomas Gleixner * The AP side has done the error rollback already. Just
14251cf4f629SThomas Gleixner * return the error code..
14261cf4f629SThomas Gleixner */
14271cf4f629SThomas Gleixner if (ret)
14281cf4f629SThomas Gleixner goto out;
14291cf4f629SThomas Gleixner
14301cf4f629SThomas Gleixner /*
14311cf4f629SThomas Gleixner * We might have stopped still in the range of the AP hotplug
14321cf4f629SThomas Gleixner * thread. Nothing to do anymore.
14331cf4f629SThomas Gleixner */
14348df3e07eSThomas Gleixner if (st->state > CPUHP_TEARDOWN_CPU)
14351cf4f629SThomas Gleixner goto out;
14364dddfb5fSPeter Zijlstra
14374dddfb5fSPeter Zijlstra st->target = target;
14381cf4f629SThomas Gleixner }
14391cf4f629SThomas Gleixner /*
14408df3e07eSThomas Gleixner * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
14411cf4f629SThomas Gleixner * to do the further cleanups.
14421cf4f629SThomas Gleixner */
1443a724632cSThomas Gleixner ret = cpuhp_down_callbacks(cpu, st, target);
144462f25069SVincent Donnefort if (ret && st->state < prev_state) {
144562f25069SVincent Donnefort if (st->state == CPUHP_TEARDOWN_CPU) {
1446b7ba6d8dSSteven Price cpuhp_reset_state(cpu, st, prev_state);
14474dddfb5fSPeter Zijlstra __cpuhp_kick_ap(st);
144862f25069SVincent Donnefort } else {
144962f25069SVincent Donnefort WARN(1, "DEAD callback error for CPU%d", cpu);
145062f25069SVincent Donnefort }
14513b9d6da6SSebastian Andrzej Siewior }
145298458172SThomas Gleixner
14531cf4f629SThomas Gleixner out:
14548f553c49SThomas Gleixner cpus_write_unlock();
1455941154bdSThomas Gleixner /*
1456941154bdSThomas Gleixner * Do post unplug cleanup. This is still protected against
1457941154bdSThomas Gleixner * concurrent CPU hotplug via cpu_add_remove_lock.
1458941154bdSThomas Gleixner */
1459941154bdSThomas Gleixner lockup_detector_cleanup();
1460a74cfffbSThomas Gleixner arch_smt_update();
1461cff7d378SThomas Gleixner return ret;
1462e3920fb4SRafael J. Wysocki }
1463e3920fb4SRafael J. Wysocki
14642b8272ffSThomas Gleixner struct cpu_down_work {
14652b8272ffSThomas Gleixner unsigned int cpu;
14662b8272ffSThomas Gleixner enum cpuhp_state target;
14672b8272ffSThomas Gleixner };
14682b8272ffSThomas Gleixner
__cpu_down_maps_locked(void * arg)14692b8272ffSThomas Gleixner static long __cpu_down_maps_locked(void *arg)
14702b8272ffSThomas Gleixner {
14712b8272ffSThomas Gleixner struct cpu_down_work *work = arg;
14722b8272ffSThomas Gleixner
14732b8272ffSThomas Gleixner return _cpu_down(work->cpu, 0, work->target);
14742b8272ffSThomas Gleixner }
14752b8272ffSThomas Gleixner
cpu_down_maps_locked(unsigned int cpu,enum cpuhp_state target)1476cc1fe215SThomas Gleixner static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1477cc1fe215SThomas Gleixner {
14782b8272ffSThomas Gleixner struct cpu_down_work work = { .cpu = cpu, .target = target, };
14792b8272ffSThomas Gleixner
1480bae1a962SKuppuswamy Sathyanarayanan /*
1481bae1a962SKuppuswamy Sathyanarayanan * If the platform does not support hotplug, report it explicitly to
1482bae1a962SKuppuswamy Sathyanarayanan * differentiate it from a transient offlining failure.
1483bae1a962SKuppuswamy Sathyanarayanan */
148466e48e49SKirill A. Shutemov if (cpu_hotplug_offline_disabled)
1485bae1a962SKuppuswamy Sathyanarayanan return -EOPNOTSUPP;
1486cc1fe215SThomas Gleixner if (cpu_hotplug_disabled)
1487cc1fe215SThomas Gleixner return -EBUSY;
14882b8272ffSThomas Gleixner
14892b8272ffSThomas Gleixner /*
14902b8272ffSThomas Gleixner * Ensure that the control task does not run on the to be offlined
14912b8272ffSThomas Gleixner * CPU to prevent a deadlock against cfs_b->period_timer.
149238685e2aSRan Xiaokai * Also keep at least one housekeeping cpu onlined to avoid generating
149338685e2aSRan Xiaokai * an empty sched_domain span.
14942b8272ffSThomas Gleixner */
149538685e2aSRan Xiaokai for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
149638685e2aSRan Xiaokai if (cpu != work.cpu)
14972b8272ffSThomas Gleixner return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
1498cc1fe215SThomas Gleixner }
149938685e2aSRan Xiaokai return -EBUSY;
150038685e2aSRan Xiaokai }
1501cc1fe215SThomas Gleixner
cpu_down(unsigned int cpu,enum cpuhp_state target)150233c3736eSQais Yousef static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1503e3920fb4SRafael J. Wysocki {
15049ea09af3SHeiko Carstens int err;
1505e3920fb4SRafael J. Wysocki
1506d221938cSGautham R Shenoy cpu_maps_update_begin();
1507cc1fe215SThomas Gleixner err = cpu_down_maps_locked(cpu, target);
1508d221938cSGautham R Shenoy cpu_maps_update_done();
15091da177e4SLinus Torvalds return err;
15101da177e4SLinus Torvalds }
15114dddfb5fSPeter Zijlstra
151233c3736eSQais Yousef /**
151333c3736eSQais Yousef * cpu_device_down - Bring down a cpu device
151433c3736eSQais Yousef * @dev: Pointer to the cpu device to offline
151533c3736eSQais Yousef *
151633c3736eSQais Yousef * This function is meant to be used by device core cpu subsystem only.
151733c3736eSQais Yousef *
151833c3736eSQais Yousef * Other subsystems should use remove_cpu() instead.
151911bc021dSRandy Dunlap *
152011bc021dSRandy Dunlap * Return: %0 on success or a negative errno code
152133c3736eSQais Yousef */
cpu_device_down(struct device * dev)152233c3736eSQais Yousef int cpu_device_down(struct device *dev)
1523af1f4045SThomas Gleixner {
152433c3736eSQais Yousef return cpu_down(dev->id, CPUHP_OFFLINE);
1525af1f4045SThomas Gleixner }
15264dddfb5fSPeter Zijlstra
remove_cpu(unsigned int cpu)152793ef1429SQais Yousef int remove_cpu(unsigned int cpu)
152893ef1429SQais Yousef {
152993ef1429SQais Yousef int ret;
153093ef1429SQais Yousef
153193ef1429SQais Yousef lock_device_hotplug();
153293ef1429SQais Yousef ret = device_offline(get_cpu_device(cpu));
153393ef1429SQais Yousef unlock_device_hotplug();
153493ef1429SQais Yousef
153593ef1429SQais Yousef return ret;
153693ef1429SQais Yousef }
153793ef1429SQais Yousef EXPORT_SYMBOL_GPL(remove_cpu);
153893ef1429SQais Yousef
smp_shutdown_nonboot_cpus(unsigned int primary_cpu)15390441a559SQais Yousef void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
15400441a559SQais Yousef {
15410441a559SQais Yousef unsigned int cpu;
15420441a559SQais Yousef int error;
15430441a559SQais Yousef
15440441a559SQais Yousef cpu_maps_update_begin();
15450441a559SQais Yousef
15460441a559SQais Yousef /*
15470441a559SQais Yousef * Make certain the cpu I'm about to reboot on is online.
15480441a559SQais Yousef *
15490441a559SQais Yousef * This is inline to what migrate_to_reboot_cpu() already do.
15500441a559SQais Yousef */
15510441a559SQais Yousef if (!cpu_online(primary_cpu))
15520441a559SQais Yousef primary_cpu = cpumask_first(cpu_online_mask);
15530441a559SQais Yousef
15540441a559SQais Yousef for_each_online_cpu(cpu) {
15550441a559SQais Yousef if (cpu == primary_cpu)
15560441a559SQais Yousef continue;
15570441a559SQais Yousef
15580441a559SQais Yousef error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
15590441a559SQais Yousef if (error) {
15600441a559SQais Yousef pr_err("Failed to offline CPU%d - error=%d",
15610441a559SQais Yousef cpu, error);
15620441a559SQais Yousef break;
15630441a559SQais Yousef }
15640441a559SQais Yousef }
15650441a559SQais Yousef
15660441a559SQais Yousef /*
15670441a559SQais Yousef * Ensure all but the reboot CPU are offline.
15680441a559SQais Yousef */
15690441a559SQais Yousef BUG_ON(num_online_cpus() > 1);
15700441a559SQais Yousef
15710441a559SQais Yousef /*
15720441a559SQais Yousef * Make sure the CPUs won't be enabled by someone else after this
15730441a559SQais Yousef * point. Kexec will reboot to a new kernel shortly resetting
15740441a559SQais Yousef * everything along the way.
15750441a559SQais Yousef */
15760441a559SQais Yousef cpu_hotplug_disabled++;
15770441a559SQais Yousef
15780441a559SQais Yousef cpu_maps_update_done();
15790441a559SQais Yousef }
15804dddfb5fSPeter Zijlstra
15814dddfb5fSPeter Zijlstra #else
15824dddfb5fSPeter Zijlstra #define takedown_cpu NULL
15831da177e4SLinus Torvalds #endif /*CONFIG_HOTPLUG_CPU*/
15841da177e4SLinus Torvalds
15854baa0afcSThomas Gleixner /**
1586ee1e714bSThomas Gleixner * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
15874baa0afcSThomas Gleixner * @cpu: cpu that just started
15884baa0afcSThomas Gleixner *
15894baa0afcSThomas Gleixner * It must be called by the arch code on the new cpu, before the new cpu
15904baa0afcSThomas Gleixner * enables interrupts and before the "boot" cpu returns from __cpu_up().
15914baa0afcSThomas Gleixner */
notify_cpu_starting(unsigned int cpu)15924baa0afcSThomas Gleixner void notify_cpu_starting(unsigned int cpu)
15934baa0afcSThomas Gleixner {
15944baa0afcSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
15954baa0afcSThomas Gleixner enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
15964baa0afcSThomas Gleixner
1597448e9f34SFrederic Weisbecker rcutree_report_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
1598e797bda3SThomas Gleixner cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1599453e4108SVincent Donnefort
1600724a8688SPeter Zijlstra /*
1601724a8688SPeter Zijlstra * STARTING must not fail!
1602724a8688SPeter Zijlstra */
16036f855b39SVincent Donnefort cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
16044baa0afcSThomas Gleixner }
16054baa0afcSThomas Gleixner
1606949338e3SThomas Gleixner /*
16079cd4f1a4SThomas Gleixner * Called from the idle task. Wake up the controlling task which brings the
160845178ac0SPeter Zijlstra * hotplug thread of the upcoming CPU up and then delegates the rest of the
160945178ac0SPeter Zijlstra * online bringup to the hotplug thread.
1610949338e3SThomas Gleixner */
cpuhp_online_idle(enum cpuhp_state state)16118df3e07eSThomas Gleixner void cpuhp_online_idle(enum cpuhp_state state)
1612949338e3SThomas Gleixner {
16138df3e07eSThomas Gleixner struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
16148df3e07eSThomas Gleixner
16158df3e07eSThomas Gleixner /* Happens for the boot cpu */
16168df3e07eSThomas Gleixner if (state != CPUHP_AP_ONLINE_IDLE)
16178df3e07eSThomas Gleixner return;
16188df3e07eSThomas Gleixner
16196f062123SThomas Gleixner cpuhp_ap_update_sync_state(SYNC_STATE_ONLINE);
16206f062123SThomas Gleixner
162145178ac0SPeter Zijlstra /*
16226f062123SThomas Gleixner * Unpark the stopper thread before we start the idle loop (and start
162345178ac0SPeter Zijlstra * scheduling); this ensures the stopper task is always available.
162445178ac0SPeter Zijlstra */
162545178ac0SPeter Zijlstra stop_machine_unpark(smp_processor_id());
162645178ac0SPeter Zijlstra
16278df3e07eSThomas Gleixner st->state = CPUHP_AP_ONLINE_IDLE;
16285ebe7742SPeter Zijlstra complete_ap_thread(st, true);
1629949338e3SThomas Gleixner }
1630949338e3SThomas Gleixner
1631e3920fb4SRafael J. Wysocki /* Requires cpu_add_remove_lock to be held */
_cpu_up(unsigned int cpu,int tasks_frozen,enum cpuhp_state target)1632af1f4045SThomas Gleixner static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
16331da177e4SLinus Torvalds {
1634cff7d378SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
16353bb5d2eeSSuresh Siddha struct task_struct *idle;
16362e1a3483SThomas Gleixner int ret = 0;
16371da177e4SLinus Torvalds
16388f553c49SThomas Gleixner cpus_write_lock();
163938498a67SThomas Gleixner
1640757c989bSThomas Gleixner if (!cpu_present(cpu)) {
16415e5041f3SYasuaki Ishimatsu ret = -EINVAL;
16425e5041f3SYasuaki Ishimatsu goto out;
16435e5041f3SYasuaki Ishimatsu }
16445e5041f3SYasuaki Ishimatsu
1645757c989bSThomas Gleixner /*
164633c3736eSQais Yousef * The caller of cpu_up() might have raced with another
164733c3736eSQais Yousef * caller. Nothing to do.
1648757c989bSThomas Gleixner */
1649757c989bSThomas Gleixner if (st->state >= target)
1650757c989bSThomas Gleixner goto out;
1651757c989bSThomas Gleixner
1652757c989bSThomas Gleixner if (st->state == CPUHP_OFFLINE) {
1653cff7d378SThomas Gleixner /* Let it fail before we try to bring the cpu up */
16543bb5d2eeSSuresh Siddha idle = idle_thread_get(cpu);
16553bb5d2eeSSuresh Siddha if (IS_ERR(idle)) {
16563bb5d2eeSSuresh Siddha ret = PTR_ERR(idle);
165738498a67SThomas Gleixner goto out;
16583bb5d2eeSSuresh Siddha }
16596d712b9bSDavid Woodhouse
16606d712b9bSDavid Woodhouse /*
16616d712b9bSDavid Woodhouse * Reset stale stack state from the last time this CPU was online.
16626d712b9bSDavid Woodhouse */
16636d712b9bSDavid Woodhouse scs_task_reset(idle);
16646d712b9bSDavid Woodhouse kasan_unpoison_task_stack(idle);
1665757c989bSThomas Gleixner }
166638498a67SThomas Gleixner
1667ba997462SThomas Gleixner cpuhp_tasks_frozen = tasks_frozen;
1668ba997462SThomas Gleixner
1669b7ba6d8dSSteven Price cpuhp_set_state(cpu, st, target);
16701cf4f629SThomas Gleixner /*
16711cf4f629SThomas Gleixner * If the current CPU state is in the range of the AP hotplug thread,
16721cf4f629SThomas Gleixner * then we need to kick the thread once more.
16731cf4f629SThomas Gleixner */
16748df3e07eSThomas Gleixner if (st->state > CPUHP_BRINGUP_CPU) {
16751cf4f629SThomas Gleixner ret = cpuhp_kick_ap_work(cpu);
16761cf4f629SThomas Gleixner /*
16771cf4f629SThomas Gleixner * The AP side has done the error rollback already. Just
16781cf4f629SThomas Gleixner * return the error code..
16791cf4f629SThomas Gleixner */
16801cf4f629SThomas Gleixner if (ret)
16811cf4f629SThomas Gleixner goto out;
16821cf4f629SThomas Gleixner }
16831cf4f629SThomas Gleixner
16841cf4f629SThomas Gleixner /*
16851cf4f629SThomas Gleixner * Try to reach the target state. We max out on the BP at
16868df3e07eSThomas Gleixner * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
16871cf4f629SThomas Gleixner * responsible for bringing it up to the target state.
16881cf4f629SThomas Gleixner */
16898df3e07eSThomas Gleixner target = min((int)target, CPUHP_BRINGUP_CPU);
1690a724632cSThomas Gleixner ret = cpuhp_up_callbacks(cpu, st, target);
169138498a67SThomas Gleixner out:
16928f553c49SThomas Gleixner cpus_write_unlock();
1693a74cfffbSThomas Gleixner arch_smt_update();
16941da177e4SLinus Torvalds return ret;
16951da177e4SLinus Torvalds }
1696e3920fb4SRafael J. Wysocki
cpu_up(unsigned int cpu,enum cpuhp_state target)169733c3736eSQais Yousef static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1698e3920fb4SRafael J. Wysocki {
1699e3920fb4SRafael J. Wysocki int err = 0;
1700cf23422bSminskey guo
1701e0b582ecSRusty Russell if (!cpu_possible(cpu)) {
170284117da5SFabian Frederick pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
170384117da5SFabian Frederick cpu);
170473e753a5SKAMEZAWA Hiroyuki return -EINVAL;
170573e753a5SKAMEZAWA Hiroyuki }
1706e3920fb4SRafael J. Wysocki
170701b0f197SToshi Kani err = try_online_node(cpu_to_node(cpu));
1708cf23422bSminskey guo if (err)
1709cf23422bSminskey guo return err;
1710cf23422bSminskey guo
1711d221938cSGautham R Shenoy cpu_maps_update_begin();
1712e761b772SMax Krasnyansky
1713e761b772SMax Krasnyansky if (cpu_hotplug_disabled) {
1714e3920fb4SRafael J. Wysocki err = -EBUSY;
1715e761b772SMax Krasnyansky goto out;
1716e761b772SMax Krasnyansky }
1717d91bdd96SThomas Gleixner if (!cpu_bootable(cpu)) {
171805736e4aSThomas Gleixner err = -EPERM;
171905736e4aSThomas Gleixner goto out;
172005736e4aSThomas Gleixner }
1721e761b772SMax Krasnyansky
1722af1f4045SThomas Gleixner err = _cpu_up(cpu, 0, target);
1723e761b772SMax Krasnyansky out:
1724d221938cSGautham R Shenoy cpu_maps_update_done();
1725e3920fb4SRafael J. Wysocki return err;
1726e3920fb4SRafael J. Wysocki }
1727af1f4045SThomas Gleixner
172833c3736eSQais Yousef /**
172933c3736eSQais Yousef * cpu_device_up - Bring up a cpu device
173033c3736eSQais Yousef * @dev: Pointer to the cpu device to online
173133c3736eSQais Yousef *
173233c3736eSQais Yousef * This function is meant to be used by device core cpu subsystem only.
173333c3736eSQais Yousef *
173433c3736eSQais Yousef * Other subsystems should use add_cpu() instead.
173511bc021dSRandy Dunlap *
173611bc021dSRandy Dunlap * Return: %0 on success or a negative errno code
173733c3736eSQais Yousef */
cpu_device_up(struct device * dev)173833c3736eSQais Yousef int cpu_device_up(struct device *dev)
1739af1f4045SThomas Gleixner {
174033c3736eSQais Yousef return cpu_up(dev->id, CPUHP_ONLINE);
1741af1f4045SThomas Gleixner }
1742e3920fb4SRafael J. Wysocki
add_cpu(unsigned int cpu)174393ef1429SQais Yousef int add_cpu(unsigned int cpu)
174493ef1429SQais Yousef {
174593ef1429SQais Yousef int ret;
174693ef1429SQais Yousef
174793ef1429SQais Yousef lock_device_hotplug();
174893ef1429SQais Yousef ret = device_online(get_cpu_device(cpu));
174993ef1429SQais Yousef unlock_device_hotplug();
175093ef1429SQais Yousef
175193ef1429SQais Yousef return ret;
175293ef1429SQais Yousef }
175393ef1429SQais Yousef EXPORT_SYMBOL_GPL(add_cpu);
175493ef1429SQais Yousef
1755d720f986SQais Yousef /**
1756d720f986SQais Yousef * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1757d720f986SQais Yousef * @sleep_cpu: The cpu we hibernated on and should be brought up.
1758d720f986SQais Yousef *
1759d720f986SQais Yousef * On some architectures like arm64, we can hibernate on any CPU, but on
1760d720f986SQais Yousef * wake up the CPU we hibernated on might be offline as a side effect of
1761d720f986SQais Yousef * using maxcpus= for example.
176211bc021dSRandy Dunlap *
176311bc021dSRandy Dunlap * Return: %0 on success or a negative errno code
1764d720f986SQais Yousef */
bringup_hibernate_cpu(unsigned int sleep_cpu)1765d720f986SQais Yousef int bringup_hibernate_cpu(unsigned int sleep_cpu)
1766d720f986SQais Yousef {
1767d720f986SQais Yousef int ret;
1768d720f986SQais Yousef
1769d720f986SQais Yousef if (!cpu_online(sleep_cpu)) {
1770d720f986SQais Yousef pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
177133c3736eSQais Yousef ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1772d720f986SQais Yousef if (ret) {
1773d720f986SQais Yousef pr_err("Failed to bring hibernate-CPU up!\n");
1774d720f986SQais Yousef return ret;
1775d720f986SQais Yousef }
1776d720f986SQais Yousef }
1777d720f986SQais Yousef return 0;
1778d720f986SQais Yousef }
1779d720f986SQais Yousef
cpuhp_bringup_mask(const struct cpumask * mask,unsigned int ncpus,enum cpuhp_state target)178018415f33SThomas Gleixner static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int ncpus,
178118415f33SThomas Gleixner enum cpuhp_state target)
1782b99a2659SQais Yousef {
1783b99a2659SQais Yousef unsigned int cpu;
1784b99a2659SQais Yousef
178518415f33SThomas Gleixner for_each_cpu(cpu, mask) {
178618415f33SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
178718415f33SThomas Gleixner
178818415f33SThomas Gleixner if (cpu_up(cpu, target) && can_rollback_cpu(st)) {
178918415f33SThomas Gleixner /*
179018415f33SThomas Gleixner * If this failed then cpu_up() might have only
179118415f33SThomas Gleixner * rolled back to CPUHP_BP_KICK_AP for the final
179218415f33SThomas Gleixner * online. Clean it up. NOOP if already rolled back.
179318415f33SThomas Gleixner */
179418415f33SThomas Gleixner WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE));
1795b99a2659SQais Yousef }
179606c6796eSThomas Gleixner
179706c6796eSThomas Gleixner if (!--ncpus)
179806c6796eSThomas Gleixner break;
1799b99a2659SQais Yousef }
180018415f33SThomas Gleixner }
180118415f33SThomas Gleixner
180218415f33SThomas Gleixner #ifdef CONFIG_HOTPLUG_PARALLEL
180318415f33SThomas Gleixner static bool __cpuhp_parallel_bringup __ro_after_init = true;
180418415f33SThomas Gleixner
parallel_bringup_parse_param(char * arg)180518415f33SThomas Gleixner static int __init parallel_bringup_parse_param(char *arg)
180618415f33SThomas Gleixner {
180718415f33SThomas Gleixner return kstrtobool(arg, &__cpuhp_parallel_bringup);
180818415f33SThomas Gleixner }
180918415f33SThomas Gleixner early_param("cpuhp.parallel", parallel_bringup_parse_param);
181018415f33SThomas Gleixner
18117a4dcb4aSLaurent Dufour #ifdef CONFIG_HOTPLUG_SMT
cpuhp_smt_aware(void)18127a4dcb4aSLaurent Dufour static inline bool cpuhp_smt_aware(void)
181391b4a7dbSLaurent Dufour {
18147a4dcb4aSLaurent Dufour return cpu_smt_max_threads > 1;
18157a4dcb4aSLaurent Dufour }
18167a4dcb4aSLaurent Dufour
cpuhp_get_primary_thread_mask(void)18177a4dcb4aSLaurent Dufour static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
18187a4dcb4aSLaurent Dufour {
18197a4dcb4aSLaurent Dufour return cpu_primary_thread_mask;
18207a4dcb4aSLaurent Dufour }
182118415f33SThomas Gleixner #else
cpuhp_smt_aware(void)182218415f33SThomas Gleixner static inline bool cpuhp_smt_aware(void)
182318415f33SThomas Gleixner {
182418415f33SThomas Gleixner return false;
182518415f33SThomas Gleixner }
cpuhp_get_primary_thread_mask(void)182618415f33SThomas Gleixner static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
182718415f33SThomas Gleixner {
182818415f33SThomas Gleixner return cpu_none_mask;
182918415f33SThomas Gleixner }
183018415f33SThomas Gleixner #endif
183118415f33SThomas Gleixner
arch_cpuhp_init_parallel_bringup(void)183218415f33SThomas Gleixner bool __weak arch_cpuhp_init_parallel_bringup(void)
183318415f33SThomas Gleixner {
183418415f33SThomas Gleixner return true;
183518415f33SThomas Gleixner }
183618415f33SThomas Gleixner
183718415f33SThomas Gleixner /*
183818415f33SThomas Gleixner * On architectures which have enabled parallel bringup this invokes all BP
183918415f33SThomas Gleixner * prepare states for each of the to be onlined APs first. The last state
184018415f33SThomas Gleixner * sends the startup IPI to the APs. The APs proceed through the low level
184118415f33SThomas Gleixner * bringup code in parallel and then wait for the control CPU to release
184218415f33SThomas Gleixner * them one by one for the final onlining procedure.
184318415f33SThomas Gleixner *
184418415f33SThomas Gleixner * This avoids waiting for each AP to respond to the startup IPI in
184518415f33SThomas Gleixner * CPUHP_BRINGUP_CPU.
184618415f33SThomas Gleixner */
cpuhp_bringup_cpus_parallel(unsigned int ncpus)184718415f33SThomas Gleixner static bool __init cpuhp_bringup_cpus_parallel(unsigned int ncpus)
184818415f33SThomas Gleixner {
184918415f33SThomas Gleixner const struct cpumask *mask = cpu_present_mask;
185018415f33SThomas Gleixner
185118415f33SThomas Gleixner if (__cpuhp_parallel_bringup)
185218415f33SThomas Gleixner __cpuhp_parallel_bringup = arch_cpuhp_init_parallel_bringup();
185318415f33SThomas Gleixner if (!__cpuhp_parallel_bringup)
185418415f33SThomas Gleixner return false;
185518415f33SThomas Gleixner
185618415f33SThomas Gleixner if (cpuhp_smt_aware()) {
185718415f33SThomas Gleixner const struct cpumask *pmask = cpuhp_get_primary_thread_mask();
185818415f33SThomas Gleixner static struct cpumask tmp_mask __initdata;
185918415f33SThomas Gleixner
186018415f33SThomas Gleixner /*
186118415f33SThomas Gleixner * X86 requires to prevent that SMT siblings stopped while
186218415f33SThomas Gleixner * the primary thread does a microcode update for various
186318415f33SThomas Gleixner * reasons. Bring the primary threads up first.
186418415f33SThomas Gleixner */
186518415f33SThomas Gleixner cpumask_and(&tmp_mask, mask, pmask);
186618415f33SThomas Gleixner cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_BP_KICK_AP);
186718415f33SThomas Gleixner cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_ONLINE);
186818415f33SThomas Gleixner /* Account for the online CPUs */
186918415f33SThomas Gleixner ncpus -= num_online_cpus();
18704c8a4985SIngo Molnar if (!ncpus)
187118415f33SThomas Gleixner return true;
18726ef8eb51SHuacai Chen /* Create the mask for secondary CPUs */
18736ef8eb51SHuacai Chen cpumask_andnot(&tmp_mask, mask, pmask);
18746ef8eb51SHuacai Chen mask = &tmp_mask;
187518415f33SThomas Gleixner }
18764c8a4985SIngo Molnar
187718415f33SThomas Gleixner /* Bring the not-yet started CPUs up */
187818415f33SThomas Gleixner cpuhp_bringup_mask(mask, ncpus, CPUHP_BP_KICK_AP);
187918415f33SThomas Gleixner cpuhp_bringup_mask(mask, ncpus, CPUHP_ONLINE);
18804c8a4985SIngo Molnar return true;
188118415f33SThomas Gleixner }
1882e3920fb4SRafael J. Wysocki #else
cpuhp_bringup_cpus_parallel(unsigned int ncpus)1883f3de4be9SRafael J. Wysocki static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; }
1884e0b582ecSRusty Russell #endif /* CONFIG_HOTPLUG_PARALLEL */
1885e3920fb4SRafael J. Wysocki
bringup_nonboot_cpus(unsigned int max_cpus)1886fb7fb84aSQais Yousef void __init bringup_nonboot_cpus(unsigned int max_cpus)
1887e3920fb4SRafael J. Wysocki {
1888d391e552SJames Morse if (!max_cpus)
1889e3920fb4SRafael J. Wysocki return;
1890d221938cSGautham R Shenoy
18919ca12ac0SNicholas Piggin /* Try parallel bringup optimization if enabled */
18929ca12ac0SNicholas Piggin if (cpuhp_bringup_cpus_parallel(max_cpus))
189304d4e665SFrederic Weisbecker return;
189404d4e665SFrederic Weisbecker
18959ca12ac0SNicholas Piggin /* Full per CPU serialized bringup */
1896d391e552SJames Morse cpuhp_bringup_mask(cpu_present_mask, max_cpus, CPUHP_ONLINE);
1897d391e552SJames Morse }
18989ca12ac0SNicholas Piggin
18999ca12ac0SNicholas Piggin #ifdef CONFIG_PM_SLEEP_SMP
19009ee349adSXiaotian Feng static cpumask_var_t frozen_cpus;
19019ee349adSXiaotian Feng
freeze_secondary_cpus(int primary)1902e3920fb4SRafael J. Wysocki int freeze_secondary_cpus(int primary)
1903e3920fb4SRafael J. Wysocki {
1904e0b582ecSRusty Russell int cpu, error = 0;
19056ad4c188SPeter Zijlstra
190684117da5SFabian Frederick cpu_maps_update_begin();
1907fde78e46SStanislav Spassov if (primary == -1) {
1908fde78e46SStanislav Spassov primary = cpumask_first(cpu_online_mask);
1909e3920fb4SRafael J. Wysocki if (!housekeeping_cpu(primary, HK_TYPE_TIMER))
1910a66d955eSPavankumar Kondeti primary = housekeeping_any_cpu(HK_TYPE_TIMER);
1911fb7fb84aSQais Yousef } else {
1912a66d955eSPavankumar Kondeti if (!cpu_online(primary))
1913a66d955eSPavankumar Kondeti primary = cpumask_first(cpu_online_mask);
1914a66d955eSPavankumar Kondeti }
1915a66d955eSPavankumar Kondeti
1916a66d955eSPavankumar Kondeti /*
1917bb3632c6STodd E Brandt * We take down all of the non-boot CPUs in one shot to avoid races
1918af1f4045SThomas Gleixner * with the userspace trying to use the CPU hotplug at the same time
1919bb3632c6STodd E Brandt */
1920feae3203SMike Travis cpumask_clear(frozen_cpus);
1921e0b582ecSRusty Russell
1922feae3203SMike Travis pr_info("Disabling non-boot CPUs ...\n");
192384117da5SFabian Frederick for (cpu = nr_cpu_ids - 1; cpu >= 0; cpu--) {
1924e3920fb4SRafael J. Wysocki if (!cpu_online(cpu) || cpu == primary)
1925e3920fb4SRafael J. Wysocki continue;
1926e3920fb4SRafael J. Wysocki
192786886e55SJoseph Cihula if (pm_wakeup_pending()) {
192889af7ba5SVitaly Kuznetsov pr_info("Wakeup pending. Abort CPU freeze\n");
1929e3920fb4SRafael J. Wysocki error = -EBUSY;
193089af7ba5SVitaly Kuznetsov break;
193184117da5SFabian Frederick }
193289af7ba5SVitaly Kuznetsov
193389af7ba5SVitaly Kuznetsov trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
193489af7ba5SVitaly Kuznetsov error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
193556555855SQais Yousef trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
193656555855SQais Yousef if (!error)
193789af7ba5SVitaly Kuznetsov cpumask_set_cpu(cpu, frozen_cpus);
193889af7ba5SVitaly Kuznetsov else {
193989af7ba5SVitaly Kuznetsov pr_err("Error taking CPU%d down: %d\n", cpu, error);
1940d221938cSGautham R Shenoy break;
1941e3920fb4SRafael J. Wysocki }
1942e3920fb4SRafael J. Wysocki }
1943e3920fb4SRafael J. Wysocki
194456555855SQais Yousef if (!error)
1945d0af9eedSSuresh Siddha BUG_ON(num_online_cpus() > 1);
1946d0af9eedSSuresh Siddha else
1947d0af9eedSSuresh Siddha pr_err("Non-boot CPUs are not disabled\n");
194856555855SQais Yousef
1949d0af9eedSSuresh Siddha /*
1950d0af9eedSSuresh Siddha * Make sure the CPUs won't be enabled by someone else. We need to do
1951d0af9eedSSuresh Siddha * this even in case of failure as all freeze_secondary_cpus() users are
195256555855SQais Yousef * supposed to do thaw_secondary_cpus() on the failure path.
1953e3920fb4SRafael J. Wysocki */
1954e3920fb4SRafael J. Wysocki cpu_hotplug_disabled++;
1955e3920fb4SRafael J. Wysocki
1956e3920fb4SRafael J. Wysocki cpu_maps_update_done();
1957d221938cSGautham R Shenoy return error;
195801b41159SLianwei Wang }
1959e0b582ecSRusty Russell
arch_thaw_secondary_cpus_begin(void)19601d64b9cbSRafael J. Wysocki void __weak arch_thaw_secondary_cpus_begin(void)
1961e3920fb4SRafael J. Wysocki {
196284117da5SFabian Frederick }
1963d0af9eedSSuresh Siddha
arch_thaw_secondary_cpus_end(void)196456555855SQais Yousef void __weak arch_thaw_secondary_cpus_end(void)
1965d0af9eedSSuresh Siddha {
1966e0b582ecSRusty Russell }
1967bb3632c6STodd E Brandt
thaw_secondary_cpus(void)1968af1f4045SThomas Gleixner void thaw_secondary_cpus(void)
1969bb3632c6STodd E Brandt {
1970e3920fb4SRafael J. Wysocki int cpu, error;
197184117da5SFabian Frederick
1972e3920fb4SRafael J. Wysocki /* Allow everyone to use the CPU hotplug again */
1973e3920fb4SRafael J. Wysocki cpu_maps_update_begin();
197484117da5SFabian Frederick __cpu_hotplug_enable();
1975e3920fb4SRafael J. Wysocki if (cpumask_empty(frozen_cpus))
1976d0af9eedSSuresh Siddha goto out;
197756555855SQais Yousef
1978d0af9eedSSuresh Siddha pr_info("Enabling non-boot CPUs ...\n");
1979e0b582ecSRusty Russell
19801d64b9cbSRafael J. Wysocki arch_thaw_secondary_cpus_begin();
1981d221938cSGautham R Shenoy
1982e3920fb4SRafael J. Wysocki for_each_cpu(cpu, frozen_cpus) {
1983e0b582ecSRusty Russell trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1984d7268a31SFenghua Yu error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1985e0b582ecSRusty Russell trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1986e0b582ecSRusty Russell if (!error) {
1987e0b582ecSRusty Russell pr_info("CPU%d is up\n", cpu);
1988e0b582ecSRusty Russell continue;
1989e0b582ecSRusty Russell }
1990e0b582ecSRusty Russell pr_warn("Error taking CPU%d up: %d\n", cpu, error);
199179cfbdfaSSrivatsa S. Bhat }
199279cfbdfaSSrivatsa S. Bhat
199379cfbdfaSSrivatsa S. Bhat arch_thaw_secondary_cpus_end();
199479cfbdfaSSrivatsa S. Bhat
199579cfbdfaSSrivatsa S. Bhat cpumask_clear(frozen_cpus);
199679cfbdfaSSrivatsa S. Bhat out:
199779cfbdfaSSrivatsa S. Bhat cpu_maps_update_done();
199879cfbdfaSSrivatsa S. Bhat }
199979cfbdfaSSrivatsa S. Bhat
alloc_frozen_cpus(void)200079cfbdfaSSrivatsa S. Bhat static int __init alloc_frozen_cpus(void)
200179cfbdfaSSrivatsa S. Bhat {
200279cfbdfaSSrivatsa S. Bhat if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
200379cfbdfaSSrivatsa S. Bhat return -ENOMEM;
200479cfbdfaSSrivatsa S. Bhat return 0;
200579cfbdfaSSrivatsa S. Bhat }
200679cfbdfaSSrivatsa S. Bhat core_initcall(alloc_frozen_cpus);
200779cfbdfaSSrivatsa S. Bhat
200879cfbdfaSSrivatsa S. Bhat /*
200979cfbdfaSSrivatsa S. Bhat * When callbacks for CPU hotplug notifications are being executed, we must
201079cfbdfaSSrivatsa S. Bhat * ensure that the state of the system with respect to the tasks being frozen
201116e53dbfSSrivatsa S. Bhat * or not, as reported by the notification, remains unchanged *throughout the
201279cfbdfaSSrivatsa S. Bhat * duration* of the execution of the callbacks.
201379cfbdfaSSrivatsa S. Bhat * Hence we need to prevent the freezer from racing with regular CPU hotplug.
201479cfbdfaSSrivatsa S. Bhat *
201579cfbdfaSSrivatsa S. Bhat * This synchronization is implemented by mutually excluding regular CPU
201616e53dbfSSrivatsa S. Bhat * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
201779cfbdfaSSrivatsa S. Bhat * Hibernate notifications.
201879cfbdfaSSrivatsa S. Bhat */
201979cfbdfaSSrivatsa S. Bhat static int
cpu_hotplug_pm_callback(struct notifier_block * nb,unsigned long action,void * ptr)202079cfbdfaSSrivatsa S. Bhat cpu_hotplug_pm_callback(struct notifier_block *nb,
202179cfbdfaSSrivatsa S. Bhat unsigned long action, void *ptr)
202279cfbdfaSSrivatsa S. Bhat {
202379cfbdfaSSrivatsa S. Bhat switch (action) {
202479cfbdfaSSrivatsa S. Bhat
202579cfbdfaSSrivatsa S. Bhat case PM_SUSPEND_PREPARE:
202679cfbdfaSSrivatsa S. Bhat case PM_HIBERNATION_PREPARE:
2027d7268a31SFenghua Yu cpu_hotplug_disable();
202879cfbdfaSSrivatsa S. Bhat break;
20296e32d479SFenghua Yu
20306e32d479SFenghua Yu case PM_POST_SUSPEND:
20316e32d479SFenghua Yu case PM_POST_HIBERNATION:
20326e32d479SFenghua Yu cpu_hotplug_enable();
20336e32d479SFenghua Yu break;
203479cfbdfaSSrivatsa S. Bhat
203579cfbdfaSSrivatsa S. Bhat default:
203679cfbdfaSSrivatsa S. Bhat return NOTIFY_DONE;
203779cfbdfaSSrivatsa S. Bhat }
203879cfbdfaSSrivatsa S. Bhat
2039f3de4be9SRafael J. Wysocki return NOTIFY_OK;
204068f4f1ecSMax Krasnyansky }
20418ce371f9SPeter Zijlstra
20428ce371f9SPeter Zijlstra
cpu_hotplug_pm_sync_init(void)204368f4f1ecSMax Krasnyansky static int __init cpu_hotplug_pm_sync_init(void)
2044b8d317d1SMike Travis {
2045cff7d378SThomas Gleixner /*
204617a2f1ceSLai Jiangshan * cpu_hotplug_pm_callback has higher priority than x86
2047cff7d378SThomas Gleixner * bsp_pm_callback which depends on cpu_hotplug_pm_callback
2048cff7d378SThomas Gleixner * to disable cpu hotplug to avoid cpu hotplug race.
20493c1627e9SThomas Gleixner */
20503c1627e9SThomas Gleixner pm_notifier(cpu_hotplug_pm_callback, 0);
2051cff7d378SThomas Gleixner return 0;
2052cff7d378SThomas Gleixner }
2053cff7d378SThomas Gleixner core_initcall(cpu_hotplug_pm_sync_init);
2054677f6646SThomas Gleixner
20553c1627e9SThomas Gleixner #endif /* CONFIG_PM_SLEEP_SMP */
20563c1627e9SThomas Gleixner
2057757c989bSThomas Gleixner int __boot_cpu_id;
2058cff7d378SThomas Gleixner
205900e16c3dSThomas Gleixner #endif /* CONFIG_SMP */
20603c1627e9SThomas Gleixner
20613c1627e9SThomas Gleixner /* Boot processor state steps */
20623c1627e9SThomas Gleixner static struct cpuhp_step cpuhp_hp_states[] = {
206300e16c3dSThomas Gleixner [CPUHP_OFFLINE] = {
20643191dd5aSJason A. Donenfeld .name = "offline",
20653191dd5aSJason A. Donenfeld .startup.single = NULL,
20663191dd5aSJason A. Donenfeld .teardown.single = NULL,
20673191dd5aSJason A. Donenfeld },
20683191dd5aSJason A. Donenfeld #ifdef CONFIG_SMP
20697ee681b2SThomas Gleixner [CPUHP_CREATE_THREADS]= {
20703c1627e9SThomas Gleixner .name = "threads:prepare",
20713c1627e9SThomas Gleixner .startup.single = smpboot_create_threads,
20723c1627e9SThomas Gleixner .teardown.single = NULL,
20737ee681b2SThomas Gleixner .cant_stop = true,
207427590dc1SThomas Gleixner },
20753c1627e9SThomas Gleixner [CPUHP_PERF_PREPARE] = {
20763c1627e9SThomas Gleixner .name = "perf:prepare",
20775c0930ccSThomas Gleixner .startup.single = perf_event_init_cpu,
207827590dc1SThomas Gleixner .teardown.single = perf_event_exit_cpu,
207931487f83SRichard Weinberger },
2080677f6646SThomas Gleixner [CPUHP_RANDOM_PREPARE] = {
20813c1627e9SThomas Gleixner .name = "random:prepare",
20823c1627e9SThomas Gleixner .startup.single = random_prepare_cpu,
208331487f83SRichard Weinberger .teardown.single = NULL,
2084e6d4989aSRichard Weinberger },
2085e6d4989aSRichard Weinberger [CPUHP_WORKQUEUE_PREP] = {
2086e6d4989aSRichard Weinberger .name = "workqueue:prepare",
2087e6d4989aSRichard Weinberger .startup.single = workqueue_prepare_cpu,
2088e6d4989aSRichard Weinberger .teardown.single = NULL,
20894df83742SThomas Gleixner },
2090677f6646SThomas Gleixner [CPUHP_HRTIMERS_PREPARE] = {
20913c1627e9SThomas Gleixner .name = "hrtimers:prepare",
20923c1627e9SThomas Gleixner .startup.single = hrtimers_prepare_cpu,
20934df83742SThomas Gleixner .teardown.single = NULL,
2094d10ef6f9SThomas Gleixner },
20954fae16dfSRichard Cochran [CPUHP_SMPCFD_PREPARE] = {
20964fae16dfSRichard Cochran .name = "smpcfd:prepare",
20974fae16dfSRichard Cochran .startup.single = smpcfd_prepare_cpu,
20984fae16dfSRichard Cochran .teardown.single = smpcfd_dead_cpu,
209926456f87SThomas Gleixner },
2100d018031fSMukesh Ojha [CPUHP_RELAY_PREPARE] = {
210126456f87SThomas Gleixner .name = "relay:prepare",
21023c1627e9SThomas Gleixner .startup.single = relay_prepare_cpu,
21034fae16dfSRichard Cochran .teardown.single = NULL,
2104a631be92SThomas Gleixner },
2105a631be92SThomas Gleixner [CPUHP_RCUTREE_PREP] = {
2106a631be92SThomas Gleixner .name = "RCU/tree:prepare",
2107a631be92SThomas Gleixner .startup.single = rcutree_prepare_cpu,
2108a631be92SThomas Gleixner .teardown.single = rcutree_dead_cpu,
2109a631be92SThomas Gleixner },
2110a631be92SThomas Gleixner /*
2111a631be92SThomas Gleixner * On the tear-down path, timers_dead_cpu() must be invoked
2112a631be92SThomas Gleixner * before blk_mq_queue_reinit_notify() from notify_dead(),
2113a631be92SThomas Gleixner * otherwise a RCU stall occurs.
2114a631be92SThomas Gleixner */
2115a631be92SThomas Gleixner [CPUHP_TIMERS_PREPARE] = {
2116a631be92SThomas Gleixner .name = "timers:prepare",
2117a631be92SThomas Gleixner .startup.single = timers_prepare_cpu,
2118a631be92SThomas Gleixner .teardown.single = timers_dead_cpu,
2119a631be92SThomas Gleixner },
2120a631be92SThomas Gleixner
2121a631be92SThomas Gleixner #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
2122a631be92SThomas Gleixner /*
2123a631be92SThomas Gleixner * Kicks the AP alive. AP will wait in cpuhp_ap_sync_alive() until
2124a631be92SThomas Gleixner * the next step will release it.
2125a631be92SThomas Gleixner */
2126a631be92SThomas Gleixner [CPUHP_BP_KICK_AP] = {
2127a631be92SThomas Gleixner .name = "cpu:kick_ap",
2128a631be92SThomas Gleixner .startup.single = cpuhp_kick_ap_alive,
2129cff7d378SThomas Gleixner },
2130cff7d378SThomas Gleixner
21313c1627e9SThomas Gleixner /*
2132bf2c59fcSPeter Zijlstra * Waits for the AP to reach cpuhp_ap_sync_alive() and then
2133757c989bSThomas Gleixner * releases it for the complete bringup.
21344baa0afcSThomas Gleixner */
2135a631be92SThomas Gleixner [CPUHP_BRINGUP_CPU] = {
2136d10ef6f9SThomas Gleixner .name = "cpu:bringup",
2137d10ef6f9SThomas Gleixner .startup.single = cpuhp_bringup_ap,
2138d10ef6f9SThomas Gleixner .teardown.single = finish_cpu,
2139d10ef6f9SThomas Gleixner .cant_stop = true,
2140d10ef6f9SThomas Gleixner },
2141d10ef6f9SThomas Gleixner #else
2142d10ef6f9SThomas Gleixner /*
2143d10ef6f9SThomas Gleixner * All-in-one CPU bringup state which includes the kick alive.
2144d10ef6f9SThomas Gleixner */
2145d10ef6f9SThomas Gleixner [CPUHP_BRINGUP_CPU] = {
2146d10ef6f9SThomas Gleixner .name = "cpu:bringup",
2147d10ef6f9SThomas Gleixner .startup.single = bringup_cpu,
21489cf7243dSThomas Gleixner .teardown.single = finish_cpu,
21499cf7243dSThomas Gleixner .cant_stop = true,
21509cf7243dSThomas Gleixner },
21513c1627e9SThomas Gleixner #endif
21523c1627e9SThomas Gleixner /* Final state before CPU kills itself */
21539cf7243dSThomas Gleixner [CPUHP_AP_IDLE_DEAD] = {
21544df83742SThomas Gleixner .name = "idle:dead",
2155677f6646SThomas Gleixner },
21563c1627e9SThomas Gleixner /*
21573c1627e9SThomas Gleixner * Last state before CPU enters the idle loop to die. Transient state
21584baa0afcSThomas Gleixner * for synchronization.
215946febd37SLai Jiangshan */
216046febd37SLai Jiangshan [CPUHP_AP_OFFLINE] = {
216146febd37SLai Jiangshan .name = "ap:offline",
216246febd37SLai Jiangshan .cant_stop = true,
216346febd37SLai Jiangshan },
21645c0930ccSThomas Gleixner /* First state is scheduler control. Interrupts are disabled */
21655c0930ccSThomas Gleixner [CPUHP_AP_SCHED_STARTING] = {
21665c0930ccSThomas Gleixner .name = "sched:starting",
21675c0930ccSThomas Gleixner .startup.single = sched_cpu_starting,
21685c0930ccSThomas Gleixner .teardown.single = sched_cpu_dying,
21693ad6eb06SFrederic Weisbecker },
21703ad6eb06SFrederic Weisbecker [CPUHP_AP_RCUTREE_DYING] = {
21713ad6eb06SFrederic Weisbecker .name = "RCU/tree:dying",
21723ad6eb06SFrederic Weisbecker .startup.single = NULL,
21733ad6eb06SFrederic Weisbecker .teardown.single = rcutree_dying_cpu,
2174d10ef6f9SThomas Gleixner },
2175d10ef6f9SThomas Gleixner [CPUHP_AP_SMPCFD_DYING] = {
2176d10ef6f9SThomas Gleixner .name = "smpcfd:dying",
2177d10ef6f9SThomas Gleixner .startup.single = NULL,
2178d10ef6f9SThomas Gleixner .teardown.single = smpcfd_dying_cpu,
217917a2f1ceSLai Jiangshan },
21801cf12e08SThomas Gleixner [CPUHP_AP_HRTIMERS_DYING] = {
218117a2f1ceSLai Jiangshan .name = "hrtimers:dying",
218217a2f1ceSLai Jiangshan .startup.single = NULL,
218317a2f1ceSLai Jiangshan .teardown.single = hrtimers_cpu_dying,
218417a2f1ceSLai Jiangshan },
218517a2f1ceSLai Jiangshan [CPUHP_AP_TICK_DYING] = {
218617a2f1ceSLai Jiangshan .name = "tick:dying",
218717a2f1ceSLai Jiangshan .startup.single = NULL,
218817a2f1ceSLai Jiangshan .teardown.single = tick_cpu_dying,
21891cf12e08SThomas Gleixner },
21901cf12e08SThomas Gleixner /* Entry state on starting. Interrupts enabled from here on. Transient
21911cf12e08SThomas Gleixner * state for synchronsization */
21921cf12e08SThomas Gleixner [CPUHP_AP_ONLINE] = {
21931cf12e08SThomas Gleixner .name = "ap:online",
21941cf12e08SThomas Gleixner },
21951cf12e08SThomas Gleixner /*
2196d10ef6f9SThomas Gleixner * Handled on control processor until the plugged processor manages
21971cf4f629SThomas Gleixner * this itself.
2198677f6646SThomas Gleixner */
21993c1627e9SThomas Gleixner [CPUHP_TEARDOWN_CPU] = {
2200c4de6569SThomas Gleixner .name = "cpu:teardown",
22011cf4f629SThomas Gleixner .startup.single = NULL,
2202c5cb83bbSThomas Gleixner .teardown.single = takedown_cpu,
2203c5cb83bbSThomas Gleixner .cant_stop = true,
2204c5cb83bbSThomas Gleixner },
2205c5cb83bbSThomas Gleixner
2206c5cb83bbSThomas Gleixner [CPUHP_AP_SCHED_WAIT_EMPTY] = {
220700e16c3dSThomas Gleixner .name = "sched:waitempty",
22083c1627e9SThomas Gleixner .startup.single = NULL,
22093c1627e9SThomas Gleixner .teardown.single = sched_cpu_wait_empty,
22103c1627e9SThomas Gleixner },
221100e16c3dSThomas Gleixner
22129cf57731SPeter Zijlstra /* Handle smpboot threads park/unpark */
22139cf57731SPeter Zijlstra [CPUHP_AP_SMPBOOT_THREADS] = {
22149cf57731SPeter Zijlstra .name = "smpboot/threads:online",
22159cf57731SPeter Zijlstra .startup.single = smpboot_unpark_threads,
22169cf57731SPeter Zijlstra .teardown.single = smpboot_park_threads,
22177ee681b2SThomas Gleixner },
22183c1627e9SThomas Gleixner [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
22193c1627e9SThomas Gleixner .name = "irq/affinity:online",
22203c1627e9SThomas Gleixner .startup.single = irq_affinity_online_cpu,
22217ee681b2SThomas Gleixner .teardown.single = NULL,
22223191dd5aSJason A. Donenfeld },
22233191dd5aSJason A. Donenfeld [CPUHP_AP_PERF_ONLINE] = {
22243191dd5aSJason A. Donenfeld .name = "perf:online",
22253191dd5aSJason A. Donenfeld .startup.single = perf_event_init_cpu,
22263191dd5aSJason A. Donenfeld .teardown.single = perf_event_exit_cpu,
22274df83742SThomas Gleixner },
2228677f6646SThomas Gleixner [CPUHP_AP_WATCHDOG_ONLINE] = {
22293c1627e9SThomas Gleixner .name = "lockup_detector:online",
22303c1627e9SThomas Gleixner .startup.single = lockup_detector_online_cpu,
22314df83742SThomas Gleixner .teardown.single = lockup_detector_offline_cpu,
22324baa0afcSThomas Gleixner },
2233d10ef6f9SThomas Gleixner [CPUHP_AP_WORKQUEUE_ONLINE] = {
2234d10ef6f9SThomas Gleixner .name = "workqueue:online",
2235d10ef6f9SThomas Gleixner .startup.single = workqueue_online_cpu,
2236d10ef6f9SThomas Gleixner .teardown.single = workqueue_offline_cpu,
2237aaddd7d1SThomas Gleixner },
2238aaddd7d1SThomas Gleixner [CPUHP_AP_RANDOM_ONLINE] = {
2239aaddd7d1SThomas Gleixner .name = "random:online",
2240aaddd7d1SThomas Gleixner .startup.single = random_online_cpu,
22413c1627e9SThomas Gleixner .teardown.single = NULL,
22423c1627e9SThomas Gleixner },
2243aaddd7d1SThomas Gleixner [CPUHP_AP_RCUTREE_ONLINE] = {
2244aaddd7d1SThomas Gleixner .name = "RCU/tree:online",
2245aaddd7d1SThomas Gleixner .startup.single = rcutree_online_cpu,
2246d10ef6f9SThomas Gleixner .teardown.single = rcutree_offline_cpu,
22474baa0afcSThomas Gleixner },
22484baa0afcSThomas Gleixner #endif
22493c1627e9SThomas Gleixner /*
22503c1627e9SThomas Gleixner * The dynamically registered state space is here
22514baa0afcSThomas Gleixner */
22524baa0afcSThomas Gleixner
22534baa0afcSThomas Gleixner #ifdef CONFIG_SMP
22545b7aa87eSThomas Gleixner /* Last state is scheduler control setting the cpu active */
22555b7aa87eSThomas Gleixner [CPUHP_AP_ACTIVE] = {
22565b7aa87eSThomas Gleixner .name = "sched:active",
22575b7aa87eSThomas Gleixner .startup.single = sched_cpu_activate,
22585b7aa87eSThomas Gleixner .teardown.single = sched_cpu_deactivate,
22595b7aa87eSThomas Gleixner },
22605b7aa87eSThomas Gleixner #endif
22615b7aa87eSThomas Gleixner
2262dc280d93SThomas Gleixner /* CPU is fully up and running. */
2263dc280d93SThomas Gleixner [CPUHP_ONLINE] = {
2264dc280d93SThomas Gleixner .name = "online",
2265dc280d93SThomas Gleixner .startup.single = NULL,
2266dc280d93SThomas Gleixner .teardown.single = NULL,
2267dc280d93SThomas Gleixner },
2268dc280d93SThomas Gleixner };
22694205e478SThomas Gleixner
22704205e478SThomas Gleixner /* Sanity check for callbacks */
cpuhp_cb_check(enum cpuhp_state state)2271dc280d93SThomas Gleixner static int cpuhp_cb_check(enum cpuhp_state state)
22724205e478SThomas Gleixner {
22734205e478SThomas Gleixner if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
227417a2f1ceSLai Jiangshan return -EINVAL;
22754205e478SThomas Gleixner return 0;
22764205e478SThomas Gleixner }
22774205e478SThomas Gleixner
227817a2f1ceSLai Jiangshan /*
22794205e478SThomas Gleixner * Returns a free for dynamic slot assignment of the Online state. The states
22804205e478SThomas Gleixner * are protected by the cpuhp_slot_states mutex and an empty slot is identified
22814205e478SThomas Gleixner * by having no name assigned.
22824205e478SThomas Gleixner */
cpuhp_reserve_state(enum cpuhp_state state)22834205e478SThomas Gleixner static int cpuhp_reserve_state(enum cpuhp_state state)
22844205e478SThomas Gleixner {
22854205e478SThomas Gleixner enum cpuhp_state i, end;
22864205e478SThomas Gleixner struct cpuhp_step *step;
2287dc280d93SThomas Gleixner
2288dc280d93SThomas Gleixner switch (state) {
2289dc280d93SThomas Gleixner case CPUHP_AP_ONLINE_DYN:
2290dc280d93SThomas Gleixner step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
2291dc280d93SThomas Gleixner end = CPUHP_AP_ONLINE_DYN_END;
2292dc280d93SThomas Gleixner break;
2293dc280d93SThomas Gleixner case CPUHP_BP_PREPARE_DYN:
22945b7aa87eSThomas Gleixner step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
2295cf392d10SThomas Gleixner end = CPUHP_BP_PREPARE_DYN_END;
2296cf392d10SThomas Gleixner break;
22975b7aa87eSThomas Gleixner default:
22985b7aa87eSThomas Gleixner return -EINVAL;
22995b7aa87eSThomas Gleixner }
2300dc280d93SThomas Gleixner
23015b7aa87eSThomas Gleixner for (i = state; i <= end; i++, step++) {
23020c96b273SEthan Barnes if (!step->name)
23030c96b273SEthan Barnes return i;
23040c96b273SEthan Barnes }
23050c96b273SEthan Barnes WARN(1, "No more dynamic states available for CPU hotplug\n");
23060c96b273SEthan Barnes return -ENOSPC;
23070c96b273SEthan Barnes }
23080c96b273SEthan Barnes
cpuhp_store_callbacks(enum cpuhp_state state,const char * name,int (* startup)(unsigned int cpu),int (* teardown)(unsigned int cpu),bool multi_instance)23090c96b273SEthan Barnes static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
23100c96b273SEthan Barnes int (*startup)(unsigned int cpu),
23110c96b273SEthan Barnes int (*teardown)(unsigned int cpu),
23120c96b273SEthan Barnes bool multi_instance)
2313dc280d93SThomas Gleixner {
2314dc280d93SThomas Gleixner /* (Un)Install the callbacks for further cpu hotplug operations */
2315dc434e05SSebastian Andrzej Siewior struct cpuhp_step *sp;
2316dc280d93SThomas Gleixner int ret = 0;
2317dc280d93SThomas Gleixner
23185b7aa87eSThomas Gleixner /*
2319dc434e05SSebastian Andrzej Siewior * If name is NULL, then the state gets removed.
2320dc434e05SSebastian Andrzej Siewior *
2321dc434e05SSebastian Andrzej Siewior * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
23223c1627e9SThomas Gleixner * the first allocation from these dynamic ranges, so the removal
23233c1627e9SThomas Gleixner * would trigger a new allocation and clear the wrong (already
23245b7aa87eSThomas Gleixner * empty) state, leaving the callbacks of the to be cleared state
2325cf392d10SThomas Gleixner * dangling, which causes wreckage on the next hotplug operation.
2326cf392d10SThomas Gleixner */
2327dc280d93SThomas Gleixner if (name && (state == CPUHP_AP_ONLINE_DYN ||
23285b7aa87eSThomas Gleixner state == CPUHP_BP_PREPARE_DYN)) {
23295b7aa87eSThomas Gleixner ret = cpuhp_reserve_state(state);
23305b7aa87eSThomas Gleixner if (ret < 0)
23315b7aa87eSThomas Gleixner return ret;
23323c1627e9SThomas Gleixner state = ret;
23335b7aa87eSThomas Gleixner }
23345b7aa87eSThomas Gleixner sp = cpuhp_get_step(state);
23355b7aa87eSThomas Gleixner if (name && sp->name)
23365b7aa87eSThomas Gleixner return -EBUSY;
23375b7aa87eSThomas Gleixner
23385b7aa87eSThomas Gleixner sp->startup.single = startup;
2339cf392d10SThomas Gleixner sp->teardown.single = teardown;
2340cf392d10SThomas Gleixner sp->name = name;
23415b7aa87eSThomas Gleixner sp->multi_instance = multi_instance;
2342a724632cSThomas Gleixner INIT_HLIST_HEAD(&sp->list);
23435b7aa87eSThomas Gleixner return ret;
23445b7aa87eSThomas Gleixner }
23454dddfb5fSPeter Zijlstra
cpuhp_get_teardown_cb(enum cpuhp_state state)23464dddfb5fSPeter Zijlstra static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
23474dddfb5fSPeter Zijlstra {
23484dddfb5fSPeter Zijlstra return cpuhp_get_step(state)->teardown.single;
2349453e4108SVincent Donnefort }
23505b7aa87eSThomas Gleixner
23515b7aa87eSThomas Gleixner /*
23525b7aa87eSThomas Gleixner * Call the startup/teardown function for a step either on the AP or
23535b7aa87eSThomas Gleixner * on the current CPU.
23545b7aa87eSThomas Gleixner */
cpuhp_issue_call(int cpu,enum cpuhp_state state,bool bringup,struct hlist_node * node)23551cf4f629SThomas Gleixner static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
23561cf4f629SThomas Gleixner struct hlist_node *node)
2357cf392d10SThomas Gleixner {
23581cf4f629SThomas Gleixner struct cpuhp_step *sp = cpuhp_get_step(state);
235996abb968SPeter Zijlstra int ret;
23601cf4f629SThomas Gleixner
236196abb968SPeter Zijlstra /*
23621cf4f629SThomas Gleixner * If there's nothing to do, we done.
23635b7aa87eSThomas Gleixner * Relies on the union for multi_instance.
23645b7aa87eSThomas Gleixner */
23655b7aa87eSThomas Gleixner if (cpuhp_step_empty(bringup, sp))
23665b7aa87eSThomas Gleixner return 0;
23675b7aa87eSThomas Gleixner /*
23685b7aa87eSThomas Gleixner * The non AP bound callbacks can fail on bringup. On teardown
23695b7aa87eSThomas Gleixner * e.g. module removal we crash for now.
23705b7aa87eSThomas Gleixner */
23715b7aa87eSThomas Gleixner #ifdef CONFIG_SMP
23725b7aa87eSThomas Gleixner if (cpuhp_is_ap_state(state))
2373cf392d10SThomas Gleixner ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
23745b7aa87eSThomas Gleixner else
23755b7aa87eSThomas Gleixner ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
23765b7aa87eSThomas Gleixner #else
23775b7aa87eSThomas Gleixner ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
23785b7aa87eSThomas Gleixner #endif
23795b7aa87eSThomas Gleixner BUG_ON(ret && !bringup);
23805b7aa87eSThomas Gleixner return ret;
23815b7aa87eSThomas Gleixner }
23825b7aa87eSThomas Gleixner
23835b7aa87eSThomas Gleixner /*
23845b7aa87eSThomas Gleixner * Called from __cpuhp_setup_state on a recoverable failure.
23855b7aa87eSThomas Gleixner *
23865b7aa87eSThomas Gleixner * Note: The teardown callbacks for rollback are not allowed to fail!
2387cf392d10SThomas Gleixner */
cpuhp_rollback_install(int failedcpu,enum cpuhp_state state,struct hlist_node * node)23885b7aa87eSThomas Gleixner static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
23895b7aa87eSThomas Gleixner struct hlist_node *node)
23905b7aa87eSThomas Gleixner {
23919805c673SThomas Gleixner int cpu;
23929805c673SThomas Gleixner
2393cf392d10SThomas Gleixner /* Roll back the already executed steps on the other cpus */
2394cf392d10SThomas Gleixner for_each_present_cpu(cpu) {
2395cf392d10SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2396cf392d10SThomas Gleixner int cpustate = st->state;
2397cf392d10SThomas Gleixner
2398cf392d10SThomas Gleixner if (cpu >= failedcpu)
23999805c673SThomas Gleixner break;
24009805c673SThomas Gleixner
2401cf392d10SThomas Gleixner /* Did we invoke the startup call on that cpu ? */
2402cf392d10SThomas Gleixner if (cpustate >= state)
2403cf392d10SThomas Gleixner cpuhp_issue_call(cpu, state, false, node);
2404cf392d10SThomas Gleixner }
2405dc434e05SSebastian Andrzej Siewior }
2406cf392d10SThomas Gleixner
__cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,struct hlist_node * node,bool invoke)24073c1627e9SThomas Gleixner int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
2408cf392d10SThomas Gleixner struct hlist_node *node,
2409cf392d10SThomas Gleixner bool invoke)
2410cf392d10SThomas Gleixner {
2411cf392d10SThomas Gleixner struct cpuhp_step *sp;
2412cf392d10SThomas Gleixner int cpu;
2413cf392d10SThomas Gleixner int ret;
2414cf392d10SThomas Gleixner
2415cf392d10SThomas Gleixner lockdep_assert_cpus_held();
2416cf392d10SThomas Gleixner
2417cf392d10SThomas Gleixner sp = cpuhp_get_step(state);
2418cf392d10SThomas Gleixner if (sp->multi_instance == false)
2419cf392d10SThomas Gleixner return -EINVAL;
2420cf392d10SThomas Gleixner
2421cf392d10SThomas Gleixner mutex_lock(&cpuhp_state_mutex);
2422cf392d10SThomas Gleixner
24233c1627e9SThomas Gleixner if (!invoke || !sp->startup.multi)
2424cf392d10SThomas Gleixner goto add_node;
2425dc434e05SSebastian Andrzej Siewior
2426cf392d10SThomas Gleixner /*
2427cf392d10SThomas Gleixner * Try to call the startup callback for each present cpu
2428cf392d10SThomas Gleixner * depending on the hotplug state of the cpu.
2429cf392d10SThomas Gleixner */
2430cf392d10SThomas Gleixner for_each_present_cpu(cpu) {
2431dc434e05SSebastian Andrzej Siewior struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2432cf392d10SThomas Gleixner int cpustate = st->state;
24339805c673SThomas Gleixner
24349805c673SThomas Gleixner if (cpustate < state)
24359805c673SThomas Gleixner continue;
24369805c673SThomas Gleixner
24379805c673SThomas Gleixner ret = cpuhp_issue_call(cpu, state, true, node);
24389805c673SThomas Gleixner if (ret) {
24399805c673SThomas Gleixner if (sp->teardown.multi)
24409805c673SThomas Gleixner cpuhp_rollback_install(cpu, state, node);
24419805c673SThomas Gleixner goto unlock;
24429805c673SThomas Gleixner }
24438f553c49SThomas Gleixner }
2444cf392d10SThomas Gleixner add_node:
2445cf392d10SThomas Gleixner ret = 0;
2446cf392d10SThomas Gleixner hlist_add_head(node, &sp->list);
2447cf392d10SThomas Gleixner unlock:
24485b7aa87eSThomas Gleixner mutex_unlock(&cpuhp_state_mutex);
244971def423SSebastian Andrzej Siewior return ret;
24505b7aa87eSThomas Gleixner }
2451ed3cd1daSBaokun Li
__cpuhp_state_add_instance(enum cpuhp_state state,struct hlist_node * node,bool invoke)24525b7aa87eSThomas Gleixner int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
24535b7aa87eSThomas Gleixner bool invoke)
24545b7aa87eSThomas Gleixner {
24555b7aa87eSThomas Gleixner int ret;
2456dc280d93SThomas Gleixner
2457dc280d93SThomas Gleixner cpus_read_lock();
24585b7aa87eSThomas Gleixner ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
245971def423SSebastian Andrzej Siewior cpus_read_unlock();
246011bc021dSRandy Dunlap return ret;
2461512f0980SBoris Ostrovsky }
2462932d8476SYuntao Wang EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
2463512f0980SBoris Ostrovsky
2464512f0980SBoris Ostrovsky /**
24655b7aa87eSThomas Gleixner * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
246671def423SSebastian Andrzej Siewior * @state: The state to setup
24675b7aa87eSThomas Gleixner * @name: Name of the step
24685b7aa87eSThomas Gleixner * @invoke: If true, the startup function is invoked for cpus where
2469cf392d10SThomas Gleixner * cpu state >= @state
2470cf392d10SThomas Gleixner * @startup: startup callback function
24715b7aa87eSThomas Gleixner * @teardown: teardown callback function
24725b7aa87eSThomas Gleixner * @multi_instance: State is set up for multiple instances which get
2473b9d9d691SThomas Gleixner * added afterwards.
24745b7aa87eSThomas Gleixner *
247571def423SSebastian Andrzej Siewior * The caller needs to hold cpus read locked while calling this function.
247671def423SSebastian Andrzej Siewior * Return:
24775b7aa87eSThomas Gleixner * On success:
24785b7aa87eSThomas Gleixner * Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN;
24795b7aa87eSThomas Gleixner * 0 for all other states
2480dc434e05SSebastian Andrzej Siewior * On failure: proper (negative) error code
24815b7aa87eSThomas Gleixner */
__cpuhp_setup_state_cpuslocked(enum cpuhp_state state,const char * name,bool invoke,int (* startup)(unsigned int cpu),int (* teardown)(unsigned int cpu),bool multi_instance)2482dc280d93SThomas Gleixner int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
2483dc280d93SThomas Gleixner const char *name, bool invoke,
24845b7aa87eSThomas Gleixner int (*startup)(unsigned int cpu),
2485932d8476SYuntao Wang int (*teardown)(unsigned int cpu),
2486b9d9d691SThomas Gleixner bool multi_instance)
2487b9d9d691SThomas Gleixner {
2488b9d9d691SThomas Gleixner int cpu, ret = 0;
2489b9d9d691SThomas Gleixner bool dynstate;
2490b9d9d691SThomas Gleixner
2491dc280d93SThomas Gleixner lockdep_assert_cpus_held();
24925b7aa87eSThomas Gleixner
24935b7aa87eSThomas Gleixner if (cpuhp_cb_check(state) || !name)
24945b7aa87eSThomas Gleixner return -EINVAL;
24955b7aa87eSThomas Gleixner
24965b7aa87eSThomas Gleixner mutex_lock(&cpuhp_state_mutex);
24975b7aa87eSThomas Gleixner
24985b7aa87eSThomas Gleixner ret = cpuhp_store_callbacks(state, name, startup, teardown,
24995b7aa87eSThomas Gleixner multi_instance);
25005b7aa87eSThomas Gleixner
25015b7aa87eSThomas Gleixner dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN;
25025b7aa87eSThomas Gleixner if (ret > 0 && dynstate) {
25035b7aa87eSThomas Gleixner state = ret;
25045b7aa87eSThomas Gleixner ret = 0;
2505cf392d10SThomas Gleixner }
25065b7aa87eSThomas Gleixner
2507a724632cSThomas Gleixner if (ret || !invoke || !startup)
2508cf392d10SThomas Gleixner goto out;
2509cf392d10SThomas Gleixner
25105b7aa87eSThomas Gleixner /*
25115b7aa87eSThomas Gleixner * Try to call the startup callback for each present cpu
25125b7aa87eSThomas Gleixner * depending on the hotplug state of the cpu.
25135b7aa87eSThomas Gleixner */
2514dc434e05SSebastian Andrzej Siewior for_each_present_cpu(cpu) {
2515dc280d93SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2516932d8476SYuntao Wang int cpustate = st->state;
2517932d8476SYuntao Wang
2518dc280d93SThomas Gleixner if (cpustate < state)
2519b9d9d691SThomas Gleixner continue;
25205b7aa87eSThomas Gleixner
25215b7aa87eSThomas Gleixner ret = cpuhp_issue_call(cpu, state, true, NULL);
25225b7aa87eSThomas Gleixner if (ret) {
252371def423SSebastian Andrzej Siewior if (teardown)
252471def423SSebastian Andrzej Siewior cpuhp_rollback_install(cpu, state, NULL);
252571def423SSebastian Andrzej Siewior cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
252671def423SSebastian Andrzej Siewior goto out;
252771def423SSebastian Andrzej Siewior }
252871def423SSebastian Andrzej Siewior }
252971def423SSebastian Andrzej Siewior out:
253071def423SSebastian Andrzej Siewior mutex_unlock(&cpuhp_state_mutex);
253171def423SSebastian Andrzej Siewior /*
253271def423SSebastian Andrzej Siewior * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN,
253371def423SSebastian Andrzej Siewior * return the dynamically allocated state in case of success.
253471def423SSebastian Andrzej Siewior */
253571def423SSebastian Andrzej Siewior if (!ret && dynstate)
253671def423SSebastian Andrzej Siewior return state;
253771def423SSebastian Andrzej Siewior return ret;
253871def423SSebastian Andrzej Siewior }
25395b7aa87eSThomas Gleixner EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
25405b7aa87eSThomas Gleixner
__cpuhp_setup_state(enum cpuhp_state state,const char * name,bool invoke,int (* startup)(unsigned int cpu),int (* teardown)(unsigned int cpu),bool multi_instance)2541cf392d10SThomas Gleixner int __cpuhp_setup_state(enum cpuhp_state state,
2542cf392d10SThomas Gleixner const char *name, bool invoke,
2543cf392d10SThomas Gleixner int (*startup)(unsigned int cpu),
2544cf392d10SThomas Gleixner int (*teardown)(unsigned int cpu),
2545cf392d10SThomas Gleixner bool multi_instance)
2546cf392d10SThomas Gleixner {
2547cf392d10SThomas Gleixner int ret;
2548cf392d10SThomas Gleixner
2549cf392d10SThomas Gleixner cpus_read_lock();
2550cf392d10SThomas Gleixner ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2551cf392d10SThomas Gleixner teardown, multi_instance);
25528f553c49SThomas Gleixner cpus_read_unlock();
2553dc434e05SSebastian Andrzej Siewior return ret;
2554dc434e05SSebastian Andrzej Siewior }
2555cf392d10SThomas Gleixner EXPORT_SYMBOL(__cpuhp_setup_state);
2556cf392d10SThomas Gleixner
__cpuhp_state_remove_instance(enum cpuhp_state state,struct hlist_node * node,bool invoke)2557cf392d10SThomas Gleixner int __cpuhp_state_remove_instance(enum cpuhp_state state,
2558cf392d10SThomas Gleixner struct hlist_node *node, bool invoke)
2559cf392d10SThomas Gleixner {
2560cf392d10SThomas Gleixner struct cpuhp_step *sp = cpuhp_get_step(state);
2561cf392d10SThomas Gleixner int cpu;
2562cf392d10SThomas Gleixner
2563cf392d10SThomas Gleixner BUG_ON(cpuhp_cb_check(state));
2564cf392d10SThomas Gleixner
2565cf392d10SThomas Gleixner if (!sp->multi_instance)
2566cf392d10SThomas Gleixner return -EINVAL;
2567cf392d10SThomas Gleixner
2568cf392d10SThomas Gleixner cpus_read_lock();
2569cf392d10SThomas Gleixner mutex_lock(&cpuhp_state_mutex);
2570cf392d10SThomas Gleixner
2571cf392d10SThomas Gleixner if (!invoke || !cpuhp_get_teardown_cb(state))
2572cf392d10SThomas Gleixner goto remove;
25738f553c49SThomas Gleixner /*
2574cf392d10SThomas Gleixner * Call the teardown callback for each present cpu depending
2575cf392d10SThomas Gleixner * on the hotplug state of the cpu. This function is not
2576cf392d10SThomas Gleixner * allowed to fail currently!
2577cf392d10SThomas Gleixner */
2578dc434e05SSebastian Andrzej Siewior for_each_present_cpu(cpu) {
25795b7aa87eSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
258071def423SSebastian Andrzej Siewior int cpustate = st->state;
25815b7aa87eSThomas Gleixner
25825b7aa87eSThomas Gleixner if (cpustate >= state)
25835b7aa87eSThomas Gleixner cpuhp_issue_call(cpu, state, false, node);
25845b7aa87eSThomas Gleixner }
258571def423SSebastian Andrzej Siewior
25865b7aa87eSThomas Gleixner remove:
25875b7aa87eSThomas Gleixner hlist_del(node);
25885b7aa87eSThomas Gleixner mutex_unlock(&cpuhp_state_mutex);
258971def423SSebastian Andrzej Siewior cpus_read_unlock();
25905b7aa87eSThomas Gleixner
2591cf392d10SThomas Gleixner return 0;
25925b7aa87eSThomas Gleixner }
25935b7aa87eSThomas Gleixner EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
25945b7aa87eSThomas Gleixner
25955b7aa87eSThomas Gleixner /**
259671def423SSebastian Andrzej Siewior * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
25975b7aa87eSThomas Gleixner * @state: The state to remove
2598dc434e05SSebastian Andrzej Siewior * @invoke: If true, the teardown function is invoked for cpus where
2599cf392d10SThomas Gleixner * cpu state >= @state
2600cf392d10SThomas Gleixner *
2601cf392d10SThomas Gleixner * The caller needs to hold cpus read locked while calling this function.
2602cf392d10SThomas Gleixner * The teardown callback is currently not allowed to fail. Think
2603cf392d10SThomas Gleixner * about module removal!
2604cf392d10SThomas Gleixner */
__cpuhp_remove_state_cpuslocked(enum cpuhp_state state,bool invoke)2605cf392d10SThomas Gleixner void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2606a724632cSThomas Gleixner {
26075b7aa87eSThomas Gleixner struct cpuhp_step *sp = cpuhp_get_step(state);
26085b7aa87eSThomas Gleixner int cpu;
26095b7aa87eSThomas Gleixner
26105b7aa87eSThomas Gleixner BUG_ON(cpuhp_cb_check(state));
26115b7aa87eSThomas Gleixner
26125b7aa87eSThomas Gleixner lockdep_assert_cpus_held();
26135b7aa87eSThomas Gleixner
26145b7aa87eSThomas Gleixner mutex_lock(&cpuhp_state_mutex);
26155b7aa87eSThomas Gleixner if (sp->multi_instance) {
26165b7aa87eSThomas Gleixner WARN(!hlist_empty(&sp->list),
26175b7aa87eSThomas Gleixner "Error: Removing state %d which has instances left.\n",
26185b7aa87eSThomas Gleixner state);
2619cf392d10SThomas Gleixner goto remove;
26205b7aa87eSThomas Gleixner }
26215b7aa87eSThomas Gleixner
2622cf392d10SThomas Gleixner if (!invoke || !cpuhp_get_teardown_cb(state))
2623dc434e05SSebastian Andrzej Siewior goto remove;
262471def423SSebastian Andrzej Siewior
262571def423SSebastian Andrzej Siewior /*
262671def423SSebastian Andrzej Siewior * Call the teardown callback for each present cpu depending
262771def423SSebastian Andrzej Siewior * on the hotplug state of the cpu. This function is not
262871def423SSebastian Andrzej Siewior * allowed to fail currently!
262971def423SSebastian Andrzej Siewior */
263071def423SSebastian Andrzej Siewior for_each_present_cpu(cpu) {
26318f553c49SThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
26325b7aa87eSThomas Gleixner int cpustate = st->state;
26335b7aa87eSThomas Gleixner
26345b7aa87eSThomas Gleixner if (cpustate >= state)
2635dc8d37edSArnd Bergmann cpuhp_issue_call(cpu, state, false, NULL);
2636dc8d37edSArnd Bergmann }
2637dc8d37edSArnd Bergmann remove:
2638dc8d37edSArnd Bergmann cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2639dc8d37edSArnd Bergmann mutex_unlock(&cpuhp_state_mutex);
2640dc8d37edSArnd Bergmann }
2641dc8d37edSArnd Bergmann EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2642dc8d37edSArnd Bergmann
__cpuhp_remove_state(enum cpuhp_state state,bool invoke)2643dc8d37edSArnd Bergmann void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2644dc8d37edSArnd Bergmann {
2645dc8d37edSArnd Bergmann cpus_read_lock();
2646dc8d37edSArnd Bergmann __cpuhp_remove_state_cpuslocked(state, invoke);
2647dc8d37edSArnd Bergmann cpus_read_unlock();
2648dc8d37edSArnd Bergmann }
2649dc8d37edSArnd Bergmann EXPORT_SYMBOL(__cpuhp_remove_state);
2650dc8d37edSArnd Bergmann
2651dc8d37edSArnd Bergmann #ifdef CONFIG_HOTPLUG_SMT
cpuhp_offline_cpu_device(unsigned int cpu)2652dc8d37edSArnd Bergmann static void cpuhp_offline_cpu_device(unsigned int cpu)
2653dc8d37edSArnd Bergmann {
2654dc8d37edSArnd Bergmann struct device *dev = get_cpu_device(cpu);
2655dc8d37edSArnd Bergmann
2656dc8d37edSArnd Bergmann dev->offline = true;
2657dc8d37edSArnd Bergmann /* Tell user space about the state change */
2658dc8d37edSArnd Bergmann kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2659dc8d37edSArnd Bergmann }
2660dc8d37edSArnd Bergmann
cpuhp_online_cpu_device(unsigned int cpu)2661dc8d37edSArnd Bergmann static void cpuhp_online_cpu_device(unsigned int cpu)
266238253464SMichael Ellerman {
266338253464SMichael Ellerman struct device *dev = get_cpu_device(cpu);
266438253464SMichael Ellerman
266538253464SMichael Ellerman dev->offline = false;
266638253464SMichael Ellerman /* Tell user space about the state change */
266738253464SMichael Ellerman kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2668dc8d37edSArnd Bergmann }
2669dc8d37edSArnd Bergmann
cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)2670dc8d37edSArnd Bergmann int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2671dc8d37edSArnd Bergmann {
2672dc8d37edSArnd Bergmann int cpu, ret = 0;
2673dc8d37edSArnd Bergmann
2674dc8d37edSArnd Bergmann cpu_maps_update_begin();
2675dc8d37edSArnd Bergmann for_each_online_cpu(cpu) {
2676dc8d37edSArnd Bergmann if (topology_is_primary_thread(cpu))
2677dc8d37edSArnd Bergmann continue;
2678dc8d37edSArnd Bergmann /*
2679dc8d37edSArnd Bergmann * Disable can be called with CPU_SMT_ENABLED when changing
2680dc8d37edSArnd Bergmann * from a higher to lower number of SMT threads per core.
2681dc8d37edSArnd Bergmann */
2682dc8d37edSArnd Bergmann if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
2683dc8d37edSArnd Bergmann continue;
2684dc8d37edSArnd Bergmann ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2685dc8d37edSArnd Bergmann if (ret)
2686dc8d37edSArnd Bergmann break;
2687dc8d37edSArnd Bergmann /*
2688dc8d37edSArnd Bergmann * As this needs to hold the cpu maps lock it's impossible
2689dc8d37edSArnd Bergmann * to call device_offline() because that ends up calling
2690dc8d37edSArnd Bergmann * cpu_down() which takes cpu maps lock. cpu maps lock
2691dc8d37edSArnd Bergmann * needs to be held as this might race against in kernel
2692*6c17ea1fSNysal Jan K.A * abusers of the hotplug machinery (thermal management).
2693*6c17ea1fSNysal Jan K.A *
2694*6c17ea1fSNysal Jan K.A * So nothing would update device:offline state. That would
2695*6c17ea1fSNysal Jan K.A * leave the sysfs entry stale and prevent onlining after
2696*6c17ea1fSNysal Jan K.A * smt control has been changed to 'off' again. This is
2697*6c17ea1fSNysal Jan K.A * called under the sysfs hotplug lock, so it is properly
2698*6c17ea1fSNysal Jan K.A * serialized against the regular offline usage.
2699*6c17ea1fSNysal Jan K.A */
2700*6c17ea1fSNysal Jan K.A cpuhp_offline_cpu_device(cpu);
2701*6c17ea1fSNysal Jan K.A }
2702dc8d37edSArnd Bergmann if (!ret)
2703dc8d37edSArnd Bergmann cpu_smt_control = ctrlval;
2704dc8d37edSArnd Bergmann cpu_maps_update_done();
2705dc8d37edSArnd Bergmann return ret;
2706dc8d37edSArnd Bergmann }
2707dc8d37edSArnd Bergmann
2708dc8d37edSArnd Bergmann /* Check if the core a CPU belongs to is online */
2709dc8d37edSArnd Bergmann #if !defined(topology_is_core_online)
topology_is_core_online(unsigned int cpu)2710dc8d37edSArnd Bergmann static inline bool topology_is_core_online(unsigned int cpu)
2711dc8d37edSArnd Bergmann {
2712*6c17ea1fSNysal Jan K.A return true;
271338253464SMichael Ellerman }
2714dc8d37edSArnd Bergmann #endif
2715dc8d37edSArnd Bergmann
cpuhp_smt_enable(void)2716dc8d37edSArnd Bergmann int cpuhp_smt_enable(void)
2717dc8d37edSArnd Bergmann {
2718dc8d37edSArnd Bergmann int cpu, ret = 0;
2719dc8d37edSArnd Bergmann
2720dc8d37edSArnd Bergmann cpu_maps_update_begin();
2721dc8d37edSArnd Bergmann cpu_smt_control = CPU_SMT_ENABLED;
2722dc8d37edSArnd Bergmann for_each_present_cpu(cpu) {
2723dc8d37edSArnd Bergmann /* Skip online CPUs and CPUs on offline nodes */
2724dc8d37edSArnd Bergmann if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
272598f8cdceSThomas Gleixner continue;
27261782dc87SYueHaibing if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu))
272798f8cdceSThomas Gleixner continue;
272898f8cdceSThomas Gleixner ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
272998f8cdceSThomas Gleixner if (ret)
273098f8cdceSThomas Gleixner break;
273198f8cdceSThomas Gleixner /* See comment in cpuhp_smt_disable() */
273298f8cdceSThomas Gleixner cpuhp_online_cpu_device(cpu);
27331782dc87SYueHaibing }
273498f8cdceSThomas Gleixner cpu_maps_update_done();
27351782dc87SYueHaibing return ret;
2736757c989bSThomas Gleixner }
2737757c989bSThomas Gleixner #endif
2738757c989bSThomas Gleixner
2739757c989bSThomas Gleixner #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
state_show(struct device * dev,struct device_attribute * attr,char * buf)2740757c989bSThomas Gleixner static ssize_t state_show(struct device *dev,
2741757c989bSThomas Gleixner struct device_attribute *attr, char *buf)
2742757c989bSThomas Gleixner {
2743757c989bSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2744757c989bSThomas Gleixner
2745757c989bSThomas Gleixner return sprintf(buf, "%d\n", st->state);
2746757c989bSThomas Gleixner }
2747757c989bSThomas Gleixner static DEVICE_ATTR_RO(state);
2748757c989bSThomas Gleixner
target_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2749757c989bSThomas Gleixner static ssize_t target_store(struct device *dev, struct device_attribute *attr,
2750757c989bSThomas Gleixner const char *buf, size_t count)
2751757c989bSThomas Gleixner {
2752757c989bSThomas Gleixner struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2753757c989bSThomas Gleixner struct cpuhp_step *sp;
2754757c989bSThomas Gleixner int target, ret;
2755757c989bSThomas Gleixner
2756757c989bSThomas Gleixner ret = kstrtoint(buf, 10, &target);
2757757c989bSThomas Gleixner if (ret)
2758757c989bSThomas Gleixner return ret;
2759757c989bSThomas Gleixner
2760757c989bSThomas Gleixner #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2761757c989bSThomas Gleixner if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2762757c989bSThomas Gleixner return -EINVAL;
276340da1b11SSebastian Andrzej Siewior #else
2764757c989bSThomas Gleixner if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2765757c989bSThomas Gleixner return -EINVAL;
276633c3736eSQais Yousef #endif
276764ea6e44SPhil Auld
276833c3736eSQais Yousef ret = lock_device_hotplug_sysfs();
276964ea6e44SPhil Auld if (ret)
277064ea6e44SPhil Auld return ret;
277140da1b11SSebastian Andrzej Siewior
2772757c989bSThomas Gleixner mutex_lock(&cpuhp_state_mutex);
2773757c989bSThomas Gleixner sp = cpuhp_get_step(target);
2774757c989bSThomas Gleixner ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2775757c989bSThomas Gleixner mutex_unlock(&cpuhp_state_mutex);
27761782dc87SYueHaibing if (ret)
277798f8cdceSThomas Gleixner goto out;
277898f8cdceSThomas Gleixner
277998f8cdceSThomas Gleixner if (st->state < target)
278098f8cdceSThomas Gleixner ret = cpu_up(dev->id, target);
278198f8cdceSThomas Gleixner else if (st->state > target)
278298f8cdceSThomas Gleixner ret = cpu_down(dev->id, target);
27831782dc87SYueHaibing else if (WARN_ON(st->target != target))
278498f8cdceSThomas Gleixner st->target = target;
27851782dc87SYueHaibing out:
27861db49484SPeter Zijlstra unlock_device_hotplug();
27871db49484SPeter Zijlstra return ret ? ret : count;
27881db49484SPeter Zijlstra }
27891db49484SPeter Zijlstra
target_show(struct device * dev,struct device_attribute * attr,char * buf)27901db49484SPeter Zijlstra static ssize_t target_show(struct device *dev,
27911db49484SPeter Zijlstra struct device_attribute *attr, char *buf)
27921db49484SPeter Zijlstra {
27931db49484SPeter Zijlstra struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
27941db49484SPeter Zijlstra
27951db49484SPeter Zijlstra return sprintf(buf, "%d\n", st->target);
27963ae70c25SVincent Donnefort }
27973ae70c25SVincent Donnefort static DEVICE_ATTR_RW(target);
27983ae70c25SVincent Donnefort
fail_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)27993ae70c25SVincent Donnefort static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
28003ae70c25SVincent Donnefort const char *buf, size_t count)
280133d4a5a7SEiichi Tsukata {
280233d4a5a7SEiichi Tsukata struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
280333d4a5a7SEiichi Tsukata struct cpuhp_step *sp;
28041db49484SPeter Zijlstra int fail, ret;
28051db49484SPeter Zijlstra
28061db49484SPeter Zijlstra ret = kstrtoint(buf, 10, &fail);
28071db49484SPeter Zijlstra if (ret)
28081db49484SPeter Zijlstra return ret;
28091db49484SPeter Zijlstra
28101db49484SPeter Zijlstra if (fail == CPUHP_INVALID) {
281162f25069SVincent Donnefort st->fail = fail;
281262f25069SVincent Donnefort return count;
281362f25069SVincent Donnefort }
281462f25069SVincent Donnefort
281562f25069SVincent Donnefort if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
281662f25069SVincent Donnefort return -EINVAL;
281762f25069SVincent Donnefort
281862f25069SVincent Donnefort /*
281962f25069SVincent Donnefort * Cannot fail STARTING/DYING callbacks.
28201db49484SPeter Zijlstra */
28211db49484SPeter Zijlstra if (cpuhp_is_atomic_state(fail))
28221db49484SPeter Zijlstra return -EINVAL;
28231db49484SPeter Zijlstra
28241db49484SPeter Zijlstra /*
28251db49484SPeter Zijlstra * DEAD callbacks cannot fail...
28261db49484SPeter Zijlstra * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
28271db49484SPeter Zijlstra * triggering STARTING callbacks, a failure in this state would
28281db49484SPeter Zijlstra * hinder rollback.
28291db49484SPeter Zijlstra */
28301db49484SPeter Zijlstra if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
28311db49484SPeter Zijlstra return -EINVAL;
28321db49484SPeter Zijlstra
28331db49484SPeter Zijlstra /*
28341db49484SPeter Zijlstra * Cannot fail anything that doesn't have callbacks.
28351782dc87SYueHaibing */
28361db49484SPeter Zijlstra mutex_lock(&cpuhp_state_mutex);
28371db49484SPeter Zijlstra sp = cpuhp_get_step(fail);
28381db49484SPeter Zijlstra if (!sp->startup.single && !sp->teardown.single)
28391db49484SPeter Zijlstra ret = -EINVAL;
28401db49484SPeter Zijlstra mutex_unlock(&cpuhp_state_mutex);
28411db49484SPeter Zijlstra if (ret)
28421db49484SPeter Zijlstra return ret;
28431782dc87SYueHaibing
28441db49484SPeter Zijlstra st->fail = fail;
284598f8cdceSThomas Gleixner
284698f8cdceSThomas Gleixner return count;
284798f8cdceSThomas Gleixner }
28481db49484SPeter Zijlstra
fail_show(struct device * dev,struct device_attribute * attr,char * buf)284998f8cdceSThomas Gleixner static ssize_t fail_show(struct device *dev,
285098f8cdceSThomas Gleixner struct device_attribute *attr, char *buf)
285198f8cdceSThomas Gleixner {
2852993647a2SArvind Yadav struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
285398f8cdceSThomas Gleixner
285498f8cdceSThomas Gleixner return sprintf(buf, "%d\n", st->fail);
285598f8cdceSThomas Gleixner }
285698f8cdceSThomas Gleixner
285798f8cdceSThomas Gleixner static DEVICE_ATTR_RW(fail);
28581782dc87SYueHaibing
285998f8cdceSThomas Gleixner static struct attribute *cpuhp_cpu_attrs[] = {
286098f8cdceSThomas Gleixner &dev_attr_state.attr,
286198f8cdceSThomas Gleixner &dev_attr_target.attr,
286298f8cdceSThomas Gleixner &dev_attr_fail.attr,
286398f8cdceSThomas Gleixner NULL
286498f8cdceSThomas Gleixner };
2865757c989bSThomas Gleixner
286698f8cdceSThomas Gleixner static const struct attribute_group cpuhp_cpu_attr_group = {
286798f8cdceSThomas Gleixner .attrs = cpuhp_cpu_attrs,
286898f8cdceSThomas Gleixner .name = "hotplug",
286998f8cdceSThomas Gleixner NULL
287098f8cdceSThomas Gleixner };
287198f8cdceSThomas Gleixner
states_show(struct device * dev,struct device_attribute * attr,char * buf)287298f8cdceSThomas Gleixner static ssize_t states_show(struct device *dev,
287398f8cdceSThomas Gleixner struct device_attribute *attr, char *buf)
287498f8cdceSThomas Gleixner {
287598f8cdceSThomas Gleixner ssize_t cur, res = 0;
287698f8cdceSThomas Gleixner int i;
28771782dc87SYueHaibing
287898f8cdceSThomas Gleixner mutex_lock(&cpuhp_state_mutex);
287998f8cdceSThomas Gleixner for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
288098f8cdceSThomas Gleixner struct cpuhp_step *sp = cpuhp_get_step(i);
288198f8cdceSThomas Gleixner
288298f8cdceSThomas Gleixner if (sp->name) {
288398f8cdceSThomas Gleixner cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2884993647a2SArvind Yadav buf += cur;
288598f8cdceSThomas Gleixner res += cur;
288698f8cdceSThomas Gleixner }
288798f8cdceSThomas Gleixner }
288898f8cdceSThomas Gleixner mutex_unlock(&cpuhp_state_mutex);
288998f8cdceSThomas Gleixner return res;
289005736e4aSThomas Gleixner }
289105736e4aSThomas Gleixner static DEVICE_ATTR_RO(states);
28927f48405cSMichael Ellerman
28937f48405cSMichael Ellerman static struct attribute *cpuhp_cpu_root_attrs[] = {
28947f48405cSMichael Ellerman &dev_attr_states.attr,
28957f48405cSMichael Ellerman NULL
28967f48405cSMichael Ellerman };
28977f48405cSMichael Ellerman
28987f48405cSMichael Ellerman static const struct attribute_group cpuhp_cpu_root_attr_group = {
289905736e4aSThomas Gleixner .attrs = cpuhp_cpu_root_attrs,
2900de7b77e5SJosh Poimboeuf .name = "hotplug",
290105736e4aSThomas Gleixner NULL
290205736e4aSThomas Gleixner };
29037f48405cSMichael Ellerman
29047f48405cSMichael Ellerman #ifdef CONFIG_HOTPLUG_SMT
290505736e4aSThomas Gleixner
cpu_smt_num_threads_valid(unsigned int threads)2906c53361ceSMichael Ellerman static bool cpu_smt_num_threads_valid(unsigned int threads)
2907c53361ceSMichael Ellerman {
2908c53361ceSMichael Ellerman if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC))
2909c53361ceSMichael Ellerman return threads >= 1 && threads <= cpu_smt_max_threads;
2910c53361ceSMichael Ellerman return threads == 1 || threads == cpu_smt_max_threads;
2911c53361ceSMichael Ellerman }
29127f48405cSMichael Ellerman
291305736e4aSThomas Gleixner static ssize_t
__store_smt_control(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)29147f48405cSMichael Ellerman __store_smt_control(struct device *dev, struct device_attribute *attr,
29157f48405cSMichael Ellerman const char *buf, size_t count)
291605736e4aSThomas Gleixner {
29177f48405cSMichael Ellerman int ctrlval, ret, num_threads, orig_threads;
29187f48405cSMichael Ellerman bool force_off;
291905736e4aSThomas Gleixner
29207f48405cSMichael Ellerman if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
29217f48405cSMichael Ellerman return -EPERM;
29227f48405cSMichael Ellerman
29237f48405cSMichael Ellerman if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
29247f48405cSMichael Ellerman return -ENODEV;
29257f48405cSMichael Ellerman
292605736e4aSThomas Gleixner if (sysfs_streq(buf, "on")) {
292705736e4aSThomas Gleixner ctrlval = CPU_SMT_ENABLED;
29287f48405cSMichael Ellerman num_threads = cpu_smt_max_threads;
29297f48405cSMichael Ellerman } else if (sysfs_streq(buf, "off")) {
29307f48405cSMichael Ellerman ctrlval = CPU_SMT_DISABLED;
293105736e4aSThomas Gleixner num_threads = 1;
293205736e4aSThomas Gleixner } else if (sysfs_streq(buf, "forceoff")) {
293305736e4aSThomas Gleixner ctrlval = CPU_SMT_FORCE_DISABLED;
293405736e4aSThomas Gleixner num_threads = 1;
293505736e4aSThomas Gleixner } else if (kstrtoint(buf, 10, &num_threads) == 0) {
29367f48405cSMichael Ellerman if (num_threads == 1)
29377f48405cSMichael Ellerman ctrlval = CPU_SMT_DISABLED;
29387f48405cSMichael Ellerman else if (cpu_smt_num_threads_valid(num_threads))
29397f48405cSMichael Ellerman ctrlval = CPU_SMT_ENABLED;
29407f48405cSMichael Ellerman else
29417f48405cSMichael Ellerman return -EINVAL;
2942215af549SThomas Gleixner } else {
29437f48405cSMichael Ellerman return -EINVAL;
294405736e4aSThomas Gleixner }
294505736e4aSThomas Gleixner
294605736e4aSThomas Gleixner ret = lock_device_hotplug_sysfs();
294705736e4aSThomas Gleixner if (ret)
294805736e4aSThomas Gleixner return ret;
2949de7b77e5SJosh Poimboeuf
2950de7b77e5SJosh Poimboeuf orig_threads = cpu_smt_num_threads;
2951de7b77e5SJosh Poimboeuf cpu_smt_num_threads = num_threads;
2952de7b77e5SJosh Poimboeuf
2953de7b77e5SJosh Poimboeuf force_off = ctrlval != cpu_smt_control && ctrlval == CPU_SMT_FORCE_DISABLED;
2954de7b77e5SJosh Poimboeuf
2955de7b77e5SJosh Poimboeuf if (num_threads > orig_threads)
2956de7b77e5SJosh Poimboeuf ret = cpuhp_smt_enable();
2957de7b77e5SJosh Poimboeuf else if (num_threads < orig_threads || force_off)
2958de7b77e5SJosh Poimboeuf ret = cpuhp_smt_disable(ctrlval);
2959de7b77e5SJosh Poimboeuf
2960de7b77e5SJosh Poimboeuf unlock_device_hotplug();
2961de7b77e5SJosh Poimboeuf return ret ? ret : count;
2962de7b77e5SJosh Poimboeuf }
2963de7b77e5SJosh Poimboeuf
2964de7b77e5SJosh Poimboeuf #else /* !CONFIG_HOTPLUG_SMT */
2965de7b77e5SJosh Poimboeuf static ssize_t
__store_smt_control(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2966de7b77e5SJosh Poimboeuf __store_smt_control(struct device *dev, struct device_attribute *attr,
29671782dc87SYueHaibing const char *buf, size_t count)
29681782dc87SYueHaibing {
2969de7b77e5SJosh Poimboeuf return -ENODEV;
2970de7b77e5SJosh Poimboeuf }
2971de7b77e5SJosh Poimboeuf #endif /* CONFIG_HOTPLUG_SMT */
29727f48405cSMichael Ellerman
29737f48405cSMichael Ellerman static const char *smt_states[] = {
29747f48405cSMichael Ellerman [CPU_SMT_ENABLED] = "on",
29757f48405cSMichael Ellerman [CPU_SMT_DISABLED] = "off",
29767f48405cSMichael Ellerman [CPU_SMT_FORCE_DISABLED] = "forceoff",
29777f48405cSMichael Ellerman [CPU_SMT_NOT_SUPPORTED] = "notsupported",
29787f48405cSMichael Ellerman [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented",
29797f48405cSMichael Ellerman };
29807f48405cSMichael Ellerman
control_show(struct device * dev,struct device_attribute * attr,char * buf)29817f48405cSMichael Ellerman static ssize_t control_show(struct device *dev,
29827f48405cSMichael Ellerman struct device_attribute *attr, char *buf)
2983effe6d27SLi Zhijian {
2984de7b77e5SJosh Poimboeuf const char *state = smt_states[cpu_smt_control];
2985de7b77e5SJosh Poimboeuf
29861782dc87SYueHaibing #ifdef CONFIG_HOTPLUG_SMT
2987de7b77e5SJosh Poimboeuf /*
2988de7b77e5SJosh Poimboeuf * If SMT is enabled but not all threads are enabled then show the
2989de7b77e5SJosh Poimboeuf * number of threads. If all threads are enabled show "on". Otherwise
2990de7b77e5SJosh Poimboeuf * show the state name.
29911782dc87SYueHaibing */
299205736e4aSThomas Gleixner if (cpu_smt_control == CPU_SMT_ENABLED &&
29931782dc87SYueHaibing cpu_smt_num_threads != cpu_smt_max_threads)
29941782dc87SYueHaibing return sysfs_emit(buf, "%d\n", cpu_smt_num_threads);
299505736e4aSThomas Gleixner #endif
2996effe6d27SLi Zhijian
299705736e4aSThomas Gleixner return sysfs_emit(buf, "%s\n", state);
29981782dc87SYueHaibing }
299905736e4aSThomas Gleixner
control_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)300005736e4aSThomas Gleixner static ssize_t control_store(struct device *dev, struct device_attribute *attr,
300105736e4aSThomas Gleixner const char *buf, size_t count)
300205736e4aSThomas Gleixner {
300305736e4aSThomas Gleixner return __store_smt_control(dev, attr, buf, count);
300405736e4aSThomas Gleixner }
300505736e4aSThomas Gleixner static DEVICE_ATTR_RW(control);
300605736e4aSThomas Gleixner
active_show(struct device * dev,struct device_attribute * attr,char * buf)300705736e4aSThomas Gleixner static ssize_t active_show(struct device *dev,
300805736e4aSThomas Gleixner struct device_attribute *attr, char *buf)
300905736e4aSThomas Gleixner {
301005736e4aSThomas Gleixner return sysfs_emit(buf, "%d\n", sched_smt_active());
301105736e4aSThomas Gleixner }
3012de7b77e5SJosh Poimboeuf static DEVICE_ATTR_RO(active);
301305736e4aSThomas Gleixner
3014db281d59SGreg Kroah-Hartman static struct attribute *cpuhp_smt_attrs[] = {
3015db281d59SGreg Kroah-Hartman &dev_attr_control.attr,
3016db281d59SGreg Kroah-Hartman &dev_attr_active.attr,
3017db281d59SGreg Kroah-Hartman NULL
3018db281d59SGreg Kroah-Hartman };
3019db281d59SGreg Kroah-Hartman
3020db281d59SGreg Kroah-Hartman static const struct attribute_group cpuhp_smt_attr_group = {
3021db281d59SGreg Kroah-Hartman .attrs = cpuhp_smt_attrs,
3022db281d59SGreg Kroah-Hartman .name = "smt",
302305736e4aSThomas Gleixner NULL
302405736e4aSThomas Gleixner };
302598f8cdceSThomas Gleixner
cpu_smt_sysfs_init(void)302698f8cdceSThomas Gleixner static int __init cpu_smt_sysfs_init(void)
3027db281d59SGreg Kroah-Hartman {
302898f8cdceSThomas Gleixner struct device *dev_root;
302998f8cdceSThomas Gleixner int ret = -ENODEV;
3030de7b77e5SJosh Poimboeuf
303105736e4aSThomas Gleixner dev_root = bus_get_dev_root(&cpu_subsys);
303205736e4aSThomas Gleixner if (dev_root) {
303305736e4aSThomas Gleixner ret = sysfs_create_group(&dev_root->kobj, &cpuhp_smt_attr_group);
3034db281d59SGreg Kroah-Hartman put_device(dev_root);
3035db281d59SGreg Kroah-Hartman }
3036db281d59SGreg Kroah-Hartman return ret;
3037db281d59SGreg Kroah-Hartman }
303898f8cdceSThomas Gleixner
cpuhp_sysfs_init(void)303998f8cdceSThomas Gleixner static int __init cpuhp_sysfs_init(void)
3040db281d59SGreg Kroah-Hartman {
304198f8cdceSThomas Gleixner struct device *dev_root;
304298f8cdceSThomas Gleixner int cpu, ret;
304398f8cdceSThomas Gleixner
304498f8cdceSThomas Gleixner ret = cpu_smt_sysfs_init();
304598f8cdceSThomas Gleixner if (ret)
304698f8cdceSThomas Gleixner return ret;
304798f8cdceSThomas Gleixner
304898f8cdceSThomas Gleixner dev_root = bus_get_dev_root(&cpu_subsys);
304998f8cdceSThomas Gleixner if (dev_root) {
305098f8cdceSThomas Gleixner ret = sysfs_create_group(&dev_root->kobj, &cpuhp_cpu_root_attr_group);
305198f8cdceSThomas Gleixner put_device(dev_root);
305298f8cdceSThomas Gleixner if (ret)
305398f8cdceSThomas Gleixner return ret;
3054de7b77e5SJosh Poimboeuf }
305598f8cdceSThomas Gleixner
3056e56b3bc7SLinus Torvalds for_each_possible_cpu(cpu) {
3057e56b3bc7SLinus Torvalds struct device *dev = get_cpu_device(cpu);
3058e56b3bc7SLinus Torvalds
3059e56b3bc7SLinus Torvalds if (!dev)
3060e0b582ecSRusty Russell continue;
3061e56b3bc7SLinus Torvalds ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
3062e56b3bc7SLinus Torvalds if (ret)
3063b8d317d1SMike Travis return ret;
3064e56b3bc7SLinus Torvalds }
30654d51985eSMichael Rodriguez return 0;
3066e56b3bc7SLinus Torvalds }
3067e56b3bc7SLinus Torvalds device_initcall(cpuhp_sysfs_init);
3068e56b3bc7SLinus Torvalds #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
3069b8d317d1SMike Travis
3070e56b3bc7SLinus Torvalds /*
3071b8d317d1SMike Travis * cpu_bit_bitmap[] is a special, "compressed" data structure that
3072e56b3bc7SLinus Torvalds * represents all NR_CPUS bits binary values of 1<<nr.
3073e56b3bc7SLinus Torvalds *
3074e56b3bc7SLinus Torvalds * It is used by cpumask_of() to get a constant address to a CPU
3075e56b3bc7SLinus Torvalds * mask value that has a single bit set only.
3076e56b3bc7SLinus Torvalds */
3077b8d317d1SMike Travis
3078b8d317d1SMike Travis /* cpu_bit_bitmap[0] is empty - so we can back into it */
3079e56b3bc7SLinus Torvalds #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
30802d3854a3SRusty Russell #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
30812d3854a3SRusty Russell #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
30822d3854a3SRusty Russell #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
3083b3199c02SRusty Russell
3084b3199c02SRusty Russell const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
3085266e9578SMax Kellermann
3086c4c54dd1SRasmus Villemoes MASK_DECLARE_8(0), MASK_DECLARE_8(8),
3087b3199c02SRusty Russell MASK_DECLARE_8(16), MASK_DECLARE_8(24),
3088da92df49SAlexey Dobriyan #if BITS_PER_LONG > 32
3089b3199c02SRusty Russell MASK_DECLARE_8(32), MASK_DECLARE_8(40),
30904b804c85SRasmus Villemoes MASK_DECLARE_8(48), MASK_DECLARE_8(56),
3091b3199c02SRusty Russell #endif
30924b804c85SRasmus Villemoes };
30934b804c85SRasmus Villemoes EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
3094b3199c02SRusty Russell
30954e1a7df4SJames Morse const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
30964e1a7df4SJames Morse EXPORT_SYMBOL(cpu_all_bits);
30974e1a7df4SJames Morse
30984b804c85SRasmus Villemoes #ifdef CONFIG_INIT_ALL_POSSIBLE
30994b804c85SRasmus Villemoes struct cpumask __cpu_possible_mask __ro_after_init
3100b3199c02SRusty Russell = {CPU_BITS_ALL};
31014b804c85SRasmus Villemoes #else
31024b804c85SRasmus Villemoes struct cpumask __cpu_possible_mask __ro_after_init;
31033fa41520SRusty Russell #endif
3104e40f74c5SPeter Zijlstra EXPORT_SYMBOL(__cpu_possible_mask);
3105e40f74c5SPeter Zijlstra
3106e40f74c5SPeter Zijlstra struct cpumask __cpu_online_mask __read_mostly;
31070c09ab96SThomas Gleixner EXPORT_SYMBOL(__cpu_online_mask);
31080c09ab96SThomas Gleixner
31090c09ab96SThomas Gleixner struct cpumask __cpu_enabled_mask __read_mostly;
31103fa41520SRusty Russell EXPORT_SYMBOL(__cpu_enabled_mask);
31113fa41520SRusty Russell
3112c4c54dd1SRasmus Villemoes struct cpumask __cpu_present_mask __read_mostly;
31133fa41520SRusty Russell EXPORT_SYMBOL(__cpu_present_mask);
31143fa41520SRusty Russell
31153fa41520SRusty Russell struct cpumask __cpu_active_mask __read_mostly;
31163fa41520SRusty Russell EXPORT_SYMBOL(__cpu_active_mask);
3117c4c54dd1SRasmus Villemoes
31183fa41520SRusty Russell struct cpumask __cpu_dying_mask __read_mostly;
31193fa41520SRusty Russell EXPORT_SYMBOL(__cpu_dying_mask);
31203fa41520SRusty Russell
31213fa41520SRusty Russell atomic_t __num_online_cpus __read_mostly;
3122c4c54dd1SRasmus Villemoes EXPORT_SYMBOL(__num_online_cpus);
31233fa41520SRusty Russell
init_cpu_present(const struct cpumask * src)3124cff7d378SThomas Gleixner void init_cpu_present(const struct cpumask *src)
31250c09ab96SThomas Gleixner {
31260c09ab96SThomas Gleixner cpumask_copy(&__cpu_present_mask, src);
31270c09ab96SThomas Gleixner }
31280c09ab96SThomas Gleixner
init_cpu_possible(const struct cpumask * src)31290c09ab96SThomas Gleixner void init_cpu_possible(const struct cpumask *src)
31300c09ab96SThomas Gleixner {
31310c09ab96SThomas Gleixner cpumask_copy(&__cpu_possible_mask, src);
31320c09ab96SThomas Gleixner }
31330c09ab96SThomas Gleixner
init_cpu_online(const struct cpumask * src)31340c09ab96SThomas Gleixner void init_cpu_online(const struct cpumask *src)
31350c09ab96SThomas Gleixner {
31360c09ab96SThomas Gleixner cpumask_copy(&__cpu_online_mask, src);
31370c09ab96SThomas Gleixner }
31380c09ab96SThomas Gleixner
set_cpu_online(unsigned int cpu,bool online)31390c09ab96SThomas Gleixner void set_cpu_online(unsigned int cpu, bool online)
31400c09ab96SThomas Gleixner {
31410c09ab96SThomas Gleixner /*
31420c09ab96SThomas Gleixner * atomic_inc/dec() is required to handle the horrid abuse of this
31430c09ab96SThomas Gleixner * function by the reboot and kexec code which invoke it from
31440c09ab96SThomas Gleixner * IPI/NMI broadcasts when shutting down CPUs. Invocation from
31450c09ab96SThomas Gleixner * regular CPU hotplug is properly serialized.
3146cff7d378SThomas Gleixner *
3147cff7d378SThomas Gleixner * Note, that the fact that __num_online_cpus is of type atomic_t
3148cff7d378SThomas Gleixner * does not protect readers which are not serialized against
3149cff7d378SThomas Gleixner * concurrent hotplug operations.
3150cff7d378SThomas Gleixner */
3151cff7d378SThomas Gleixner if (online) {
3152cff7d378SThomas Gleixner if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
3153cff7d378SThomas Gleixner atomic_inc(&__num_online_cpus);
3154cff7d378SThomas Gleixner } else {
3155cff7d378SThomas Gleixner if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
3156cff7d378SThomas Gleixner atomic_dec(&__num_online_cpus);
3157cff7d378SThomas Gleixner }
31588ce371f9SPeter Zijlstra }
31598ce371f9SPeter Zijlstra
31608ce371f9SPeter Zijlstra /*
31618ce371f9SPeter Zijlstra * Activate the first processor.
3162cff7d378SThomas Gleixner */
boot_cpu_init(void)3163cff7d378SThomas Gleixner void __init boot_cpu_init(void)
3164cff7d378SThomas Gleixner {
3165cff7d378SThomas Gleixner int cpu = smp_processor_id();
3166cff7d378SThomas Gleixner
3167b5b1404dSLinus Torvalds /* Mark the boot cpu "present", "online" etc for SMP and UP case */
3168cff7d378SThomas Gleixner set_cpu_online(cpu, true);
3169269777aaSAbel Vesa set_cpu_active(cpu, true);
3170e797bda3SThomas Gleixner set_cpu_present(cpu, true);
31716f062123SThomas Gleixner set_cpu_possible(cpu, true);
3172269777aaSAbel Vesa
31730cc3cd21SThomas Gleixner #ifdef CONFIG_SMP
3174d385febcSPhil Auld __boot_cpu_id = cpu;
3175cff7d378SThomas Gleixner #endif
317698af8452SJosh Poimboeuf }
3177ce0abef6SSean Christopherson
3178731dc9dfSTyler Hicks /*
3179731dc9dfSTyler Hicks * Must be called _AFTER_ setting up the per_cpu areas
3180731dc9dfSTyler Hicks */
boot_cpu_hotplug_init(void)3181731dc9dfSTyler Hicks void __init boot_cpu_hotplug_init(void)
3182731dc9dfSTyler Hicks {
3183731dc9dfSTyler Hicks #ifdef CONFIG_SMP
3184731dc9dfSTyler Hicks cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
3185731dc9dfSTyler Hicks atomic_set(this_cpu_ptr(&cpuhp_state.ap_sync_state), SYNC_STATE_ONLINE);
3186731dc9dfSTyler Hicks #endif
3187731dc9dfSTyler Hicks this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
3188ce0abef6SSean Christopherson this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
318998af8452SJosh Poimboeuf }
319098af8452SJosh Poimboeuf
319198af8452SJosh Poimboeuf #ifdef CONFIG_CPU_MITIGATIONS
319298af8452SJosh Poimboeuf /*
319398af8452SJosh Poimboeuf * These are used for a global "mitigations=" cmdline option for toggling
319498af8452SJosh Poimboeuf * optional CPU mitigations.
319598af8452SJosh Poimboeuf */
319698af8452SJosh Poimboeuf enum cpu_mitigations {
319798af8452SJosh Poimboeuf CPU_MITIGATIONS_OFF,
31981bf72720SGeert Uytterhoeven CPU_MITIGATIONS_AUTO,
31991bf72720SGeert Uytterhoeven CPU_MITIGATIONS_AUTO_NOSMT,
32001bf72720SGeert Uytterhoeven };
320198af8452SJosh Poimboeuf
320298af8452SJosh Poimboeuf static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
320398af8452SJosh Poimboeuf
mitigations_parse_cmdline(char * arg)3204731dc9dfSTyler Hicks static int __init mitigations_parse_cmdline(char *arg)
3205731dc9dfSTyler Hicks {
3206731dc9dfSTyler Hicks if (!strcmp(arg, "off"))
3207731dc9dfSTyler Hicks cpu_mitigations = CPU_MITIGATIONS_OFF;
3208731dc9dfSTyler Hicks else if (!strcmp(arg, "auto"))
3209731dc9dfSTyler Hicks cpu_mitigations = CPU_MITIGATIONS_AUTO;
3210731dc9dfSTyler Hicks else if (!strcmp(arg, "auto,nosmt"))
3211731dc9dfSTyler Hicks cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
3212731dc9dfSTyler Hicks else
3213731dc9dfSTyler Hicks pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
3214731dc9dfSTyler Hicks arg);
3215731dc9dfSTyler Hicks
3216731dc9dfSTyler Hicks return 0;
3217731dc9dfSTyler Hicks }
3218ce0abef6SSean Christopherson
3219ce0abef6SSean Christopherson /* mitigations=off */
cpu_mitigations_off(void)3220ce0abef6SSean Christopherson bool cpu_mitigations_off(void)
3221ce0abef6SSean Christopherson {
3222ce0abef6SSean Christopherson return cpu_mitigations == CPU_MITIGATIONS_OFF;
3223ce0abef6SSean Christopherson }
3224ce0abef6SSean Christopherson EXPORT_SYMBOL_GPL(cpu_mitigations_off);
3225ce0abef6SSean Christopherson
3226 /* mitigations=auto,nosmt */
cpu_mitigations_auto_nosmt(void)3227 bool cpu_mitigations_auto_nosmt(void)
3228 {
3229 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
3230 }
3231 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
3232 #else
mitigations_parse_cmdline(char * arg)3233 static int __init mitigations_parse_cmdline(char *arg)
3234 {
3235 pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n");
3236 return 0;
3237 }
3238 #endif
3239 early_param("mitigations", mitigations_parse_cmdline);
3240