xref: /linux/kernel/cpu.c (revision cb2e1c2136f71618142557ceca3a8802e87a44cd)
1  /* CPU control.
2   * (C) 2001, 2002, 2003, 2004 Rusty Russell
3   *
4   * This code is licenced under the GPL.
5   */
6  #include <linux/sched/mm.h>
7  #include <linux/proc_fs.h>
8  #include <linux/smp.h>
9  #include <linux/init.h>
10  #include <linux/notifier.h>
11  #include <linux/sched/signal.h>
12  #include <linux/sched/hotplug.h>
13  #include <linux/sched/isolation.h>
14  #include <linux/sched/task.h>
15  #include <linux/sched/smt.h>
16  #include <linux/unistd.h>
17  #include <linux/cpu.h>
18  #include <linux/oom.h>
19  #include <linux/rcupdate.h>
20  #include <linux/delay.h>
21  #include <linux/export.h>
22  #include <linux/bug.h>
23  #include <linux/kthread.h>
24  #include <linux/stop_machine.h>
25  #include <linux/mutex.h>
26  #include <linux/gfp.h>
27  #include <linux/suspend.h>
28  #include <linux/lockdep.h>
29  #include <linux/tick.h>
30  #include <linux/irq.h>
31  #include <linux/nmi.h>
32  #include <linux/smpboot.h>
33  #include <linux/relay.h>
34  #include <linux/slab.h>
35  #include <linux/scs.h>
36  #include <linux/percpu-rwsem.h>
37  #include <linux/cpuset.h>
38  #include <linux/random.h>
39  #include <linux/cc_platform.h>
40  
41  #include <trace/events/power.h>
42  #define CREATE_TRACE_POINTS
43  #include <trace/events/cpuhp.h>
44  
45  #include "smpboot.h"
46  
47  /**
48   * struct cpuhp_cpu_state - Per cpu hotplug state storage
49   * @state:	The current cpu state
50   * @target:	The target state
51   * @fail:	Current CPU hotplug callback state
52   * @thread:	Pointer to the hotplug thread
53   * @should_run:	Thread should execute
54   * @rollback:	Perform a rollback
55   * @single:	Single callback invocation
56   * @bringup:	Single callback bringup or teardown selector
57   * @node:	Remote CPU node; for multi-instance, do a
58   *		single entry callback for install/remove
59   * @last:	For multi-instance rollback, remember how far we got
60   * @cb_state:	The state for a single callback (install/uninstall)
61   * @result:	Result of the operation
62   * @ap_sync_state:	State for AP synchronization
63   * @done_up:	Signal completion to the issuer of the task for cpu-up
64   * @done_down:	Signal completion to the issuer of the task for cpu-down
65   */
66  struct cpuhp_cpu_state {
67  	enum cpuhp_state	state;
68  	enum cpuhp_state	target;
69  	enum cpuhp_state	fail;
70  #ifdef CONFIG_SMP
71  	struct task_struct	*thread;
72  	bool			should_run;
73  	bool			rollback;
74  	bool			single;
75  	bool			bringup;
76  	struct hlist_node	*node;
77  	struct hlist_node	*last;
78  	enum cpuhp_state	cb_state;
79  	int			result;
80  	atomic_t		ap_sync_state;
81  	struct completion	done_up;
82  	struct completion	done_down;
83  #endif
84  };
85  
86  static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
87  	.fail = CPUHP_INVALID,
88  };
89  
90  #ifdef CONFIG_SMP
91  cpumask_t cpus_booted_once_mask;
92  #endif
93  
94  #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
95  static struct lockdep_map cpuhp_state_up_map =
96  	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
97  static struct lockdep_map cpuhp_state_down_map =
98  	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
99  
100  
101  static inline void cpuhp_lock_acquire(bool bringup)
102  {
103  	lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
104  }
105  
106  static inline void cpuhp_lock_release(bool bringup)
107  {
108  	lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
109  }
110  #else
111  
112  static inline void cpuhp_lock_acquire(bool bringup) { }
113  static inline void cpuhp_lock_release(bool bringup) { }
114  
115  #endif
116  
117  /**
118   * struct cpuhp_step - Hotplug state machine step
119   * @name:	Name of the step
120   * @startup:	Startup function of the step
121   * @teardown:	Teardown function of the step
122   * @cant_stop:	Bringup/teardown can't be stopped at this step
123   * @multi_instance:	State has multiple instances which get added afterwards
124   */
125  struct cpuhp_step {
126  	const char		*name;
127  	union {
128  		int		(*single)(unsigned int cpu);
129  		int		(*multi)(unsigned int cpu,
130  					 struct hlist_node *node);
131  	} startup;
132  	union {
133  		int		(*single)(unsigned int cpu);
134  		int		(*multi)(unsigned int cpu,
135  					 struct hlist_node *node);
136  	} teardown;
137  	/* private: */
138  	struct hlist_head	list;
139  	/* public: */
140  	bool			cant_stop;
141  	bool			multi_instance;
142  };
143  
144  static DEFINE_MUTEX(cpuhp_state_mutex);
145  static struct cpuhp_step cpuhp_hp_states[];
146  
147  static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
148  {
149  	return cpuhp_hp_states + state;
150  }
151  
152  static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
153  {
154  	return bringup ? !step->startup.single : !step->teardown.single;
155  }
156  
157  /**
158   * cpuhp_invoke_callback - Invoke the callbacks for a given state
159   * @cpu:	The cpu for which the callback should be invoked
160   * @state:	The state to do callbacks for
161   * @bringup:	True if the bringup callback should be invoked
162   * @node:	For multi-instance, do a single entry callback for install/remove
163   * @lastp:	For multi-instance rollback, remember how far we got
164   *
165   * Called from cpu hotplug and from the state register machinery.
166   *
167   * Return: %0 on success or a negative errno code
168   */
169  static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
170  				 bool bringup, struct hlist_node *node,
171  				 struct hlist_node **lastp)
172  {
173  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
174  	struct cpuhp_step *step = cpuhp_get_step(state);
175  	int (*cbm)(unsigned int cpu, struct hlist_node *node);
176  	int (*cb)(unsigned int cpu);
177  	int ret, cnt;
178  
179  	if (st->fail == state) {
180  		st->fail = CPUHP_INVALID;
181  		return -EAGAIN;
182  	}
183  
184  	if (cpuhp_step_empty(bringup, step)) {
185  		WARN_ON_ONCE(1);
186  		return 0;
187  	}
188  
189  	if (!step->multi_instance) {
190  		WARN_ON_ONCE(lastp && *lastp);
191  		cb = bringup ? step->startup.single : step->teardown.single;
192  
193  		trace_cpuhp_enter(cpu, st->target, state, cb);
194  		ret = cb(cpu);
195  		trace_cpuhp_exit(cpu, st->state, state, ret);
196  		return ret;
197  	}
198  	cbm = bringup ? step->startup.multi : step->teardown.multi;
199  
200  	/* Single invocation for instance add/remove */
201  	if (node) {
202  		WARN_ON_ONCE(lastp && *lastp);
203  		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
204  		ret = cbm(cpu, node);
205  		trace_cpuhp_exit(cpu, st->state, state, ret);
206  		return ret;
207  	}
208  
209  	/* State transition. Invoke on all instances */
210  	cnt = 0;
211  	hlist_for_each(node, &step->list) {
212  		if (lastp && node == *lastp)
213  			break;
214  
215  		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
216  		ret = cbm(cpu, node);
217  		trace_cpuhp_exit(cpu, st->state, state, ret);
218  		if (ret) {
219  			if (!lastp)
220  				goto err;
221  
222  			*lastp = node;
223  			return ret;
224  		}
225  		cnt++;
226  	}
227  	if (lastp)
228  		*lastp = NULL;
229  	return 0;
230  err:
231  	/* Rollback the instances if one failed */
232  	cbm = !bringup ? step->startup.multi : step->teardown.multi;
233  	if (!cbm)
234  		return ret;
235  
236  	hlist_for_each(node, &step->list) {
237  		if (!cnt--)
238  			break;
239  
240  		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
241  		ret = cbm(cpu, node);
242  		trace_cpuhp_exit(cpu, st->state, state, ret);
243  		/*
244  		 * Rollback must not fail,
245  		 */
246  		WARN_ON_ONCE(ret);
247  	}
248  	return ret;
249  }
250  
251  #ifdef CONFIG_SMP
252  static bool cpuhp_is_ap_state(enum cpuhp_state state)
253  {
254  	/*
255  	 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
256  	 * purposes as that state is handled explicitly in cpu_down.
257  	 */
258  	return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
259  }
260  
261  static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
262  {
263  	struct completion *done = bringup ? &st->done_up : &st->done_down;
264  	wait_for_completion(done);
265  }
266  
267  static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
268  {
269  	struct completion *done = bringup ? &st->done_up : &st->done_down;
270  	complete(done);
271  }
272  
273  /*
274   * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
275   */
276  static bool cpuhp_is_atomic_state(enum cpuhp_state state)
277  {
278  	return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
279  }
280  
281  /* Synchronization state management */
282  enum cpuhp_sync_state {
283  	SYNC_STATE_DEAD,
284  	SYNC_STATE_KICKED,
285  	SYNC_STATE_SHOULD_DIE,
286  	SYNC_STATE_ALIVE,
287  	SYNC_STATE_SHOULD_ONLINE,
288  	SYNC_STATE_ONLINE,
289  };
290  
291  #ifdef CONFIG_HOTPLUG_CORE_SYNC
292  /**
293   * cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown
294   * @state:	The synchronization state to set
295   *
296   * No synchronization point. Just update of the synchronization state, but implies
297   * a full barrier so that the AP changes are visible before the control CPU proceeds.
298   */
299  static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)
300  {
301  	atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
302  
303  	(void)atomic_xchg(st, state);
304  }
305  
306  void __weak arch_cpuhp_sync_state_poll(void) { cpu_relax(); }
307  
308  static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state,
309  				      enum cpuhp_sync_state next_state)
310  {
311  	atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
312  	ktime_t now, end, start = ktime_get();
313  	int sync;
314  
315  	end = start + 10ULL * NSEC_PER_SEC;
316  
317  	sync = atomic_read(st);
318  	while (1) {
319  		if (sync == state) {
320  			if (!atomic_try_cmpxchg(st, &sync, next_state))
321  				continue;
322  			return true;
323  		}
324  
325  		now = ktime_get();
326  		if (now > end) {
327  			/* Timeout. Leave the state unchanged */
328  			return false;
329  		} else if (now - start < NSEC_PER_MSEC) {
330  			/* Poll for one millisecond */
331  			arch_cpuhp_sync_state_poll();
332  		} else {
333  			usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
334  		}
335  		sync = atomic_read(st);
336  	}
337  	return true;
338  }
339  #else  /* CONFIG_HOTPLUG_CORE_SYNC */
340  static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { }
341  #endif /* !CONFIG_HOTPLUG_CORE_SYNC */
342  
343  #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
344  /**
345   * cpuhp_ap_report_dead - Update synchronization state to DEAD
346   *
347   * No synchronization point. Just update of the synchronization state.
348   */
349  void cpuhp_ap_report_dead(void)
350  {
351  	cpuhp_ap_update_sync_state(SYNC_STATE_DEAD);
352  }
353  
354  void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
355  
356  /*
357   * Late CPU shutdown synchronization point. Cannot use cpuhp_state::done_down
358   * because the AP cannot issue complete() at this stage.
359   */
360  static void cpuhp_bp_sync_dead(unsigned int cpu)
361  {
362  	atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
363  	int sync = atomic_read(st);
364  
365  	do {
366  		/* CPU can have reported dead already. Don't overwrite that! */
367  		if (sync == SYNC_STATE_DEAD)
368  			break;
369  	} while (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_SHOULD_DIE));
370  
371  	if (cpuhp_wait_for_sync_state(cpu, SYNC_STATE_DEAD, SYNC_STATE_DEAD)) {
372  		/* CPU reached dead state. Invoke the cleanup function */
373  		arch_cpuhp_cleanup_dead_cpu(cpu);
374  		return;
375  	}
376  
377  	/* No further action possible. Emit message and give up. */
378  	pr_err("CPU%u failed to report dead state\n", cpu);
379  }
380  #else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */
381  static inline void cpuhp_bp_sync_dead(unsigned int cpu) { }
382  #endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */
383  
384  #ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL
385  /**
386   * cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive
387   *
388   * Updates the AP synchronization state to SYNC_STATE_ALIVE and waits
389   * for the BP to release it.
390   */
391  void cpuhp_ap_sync_alive(void)
392  {
393  	atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
394  
395  	cpuhp_ap_update_sync_state(SYNC_STATE_ALIVE);
396  
397  	/* Wait for the control CPU to release it. */
398  	while (atomic_read(st) != SYNC_STATE_SHOULD_ONLINE)
399  		cpu_relax();
400  }
401  
402  static bool cpuhp_can_boot_ap(unsigned int cpu)
403  {
404  	atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
405  	int sync = atomic_read(st);
406  
407  again:
408  	switch (sync) {
409  	case SYNC_STATE_DEAD:
410  		/* CPU is properly dead */
411  		break;
412  	case SYNC_STATE_KICKED:
413  		/* CPU did not come up in previous attempt */
414  		break;
415  	case SYNC_STATE_ALIVE:
416  		/* CPU is stuck cpuhp_ap_sync_alive(). */
417  		break;
418  	default:
419  		/* CPU failed to report online or dead and is in limbo state. */
420  		return false;
421  	}
422  
423  	/* Prepare for booting */
424  	if (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_KICKED))
425  		goto again;
426  
427  	return true;
428  }
429  
430  void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { }
431  
432  /*
433   * Early CPU bringup synchronization point. Cannot use cpuhp_state::done_up
434   * because the AP cannot issue complete() so early in the bringup.
435   */
436  static int cpuhp_bp_sync_alive(unsigned int cpu)
437  {
438  	int ret = 0;
439  
440  	if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SYNC_FULL))
441  		return 0;
442  
443  	if (!cpuhp_wait_for_sync_state(cpu, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE)) {
444  		pr_err("CPU%u failed to report alive state\n", cpu);
445  		ret = -EIO;
446  	}
447  
448  	/* Let the architecture cleanup the kick alive mechanics. */
449  	arch_cpuhp_cleanup_kick_cpu(cpu);
450  	return ret;
451  }
452  #else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */
453  static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; }
454  static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; }
455  #endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */
456  
457  /* Serializes the updates to cpu_online_mask, cpu_present_mask */
458  static DEFINE_MUTEX(cpu_add_remove_lock);
459  bool cpuhp_tasks_frozen;
460  EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
461  
462  /*
463   * The following two APIs (cpu_maps_update_begin/done) must be used when
464   * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
465   */
466  void cpu_maps_update_begin(void)
467  {
468  	mutex_lock(&cpu_add_remove_lock);
469  }
470  
471  void cpu_maps_update_done(void)
472  {
473  	mutex_unlock(&cpu_add_remove_lock);
474  }
475  
476  /*
477   * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
478   * Should always be manipulated under cpu_add_remove_lock
479   */
480  static int cpu_hotplug_disabled;
481  
482  #ifdef CONFIG_HOTPLUG_CPU
483  
484  DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
485  
486  static bool cpu_hotplug_offline_disabled __ro_after_init;
487  
488  void cpus_read_lock(void)
489  {
490  	percpu_down_read(&cpu_hotplug_lock);
491  }
492  EXPORT_SYMBOL_GPL(cpus_read_lock);
493  
494  int cpus_read_trylock(void)
495  {
496  	return percpu_down_read_trylock(&cpu_hotplug_lock);
497  }
498  EXPORT_SYMBOL_GPL(cpus_read_trylock);
499  
500  void cpus_read_unlock(void)
501  {
502  	percpu_up_read(&cpu_hotplug_lock);
503  }
504  EXPORT_SYMBOL_GPL(cpus_read_unlock);
505  
506  void cpus_write_lock(void)
507  {
508  	percpu_down_write(&cpu_hotplug_lock);
509  }
510  
511  void cpus_write_unlock(void)
512  {
513  	percpu_up_write(&cpu_hotplug_lock);
514  }
515  
516  void lockdep_assert_cpus_held(void)
517  {
518  	/*
519  	 * We can't have hotplug operations before userspace starts running,
520  	 * and some init codepaths will knowingly not take the hotplug lock.
521  	 * This is all valid, so mute lockdep until it makes sense to report
522  	 * unheld locks.
523  	 */
524  	if (system_state < SYSTEM_RUNNING)
525  		return;
526  
527  	percpu_rwsem_assert_held(&cpu_hotplug_lock);
528  }
529  
530  #ifdef CONFIG_LOCKDEP
531  int lockdep_is_cpus_held(void)
532  {
533  	return percpu_rwsem_is_held(&cpu_hotplug_lock);
534  }
535  #endif
536  
537  static void lockdep_acquire_cpus_lock(void)
538  {
539  	rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
540  }
541  
542  static void lockdep_release_cpus_lock(void)
543  {
544  	rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
545  }
546  
547  /* Declare CPU offlining not supported */
548  void cpu_hotplug_disable_offlining(void)
549  {
550  	cpu_maps_update_begin();
551  	cpu_hotplug_offline_disabled = true;
552  	cpu_maps_update_done();
553  }
554  
555  /*
556   * Wait for currently running CPU hotplug operations to complete (if any) and
557   * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
558   * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
559   * hotplug path before performing hotplug operations. So acquiring that lock
560   * guarantees mutual exclusion from any currently running hotplug operations.
561   */
562  void cpu_hotplug_disable(void)
563  {
564  	cpu_maps_update_begin();
565  	cpu_hotplug_disabled++;
566  	cpu_maps_update_done();
567  }
568  EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
569  
570  static void __cpu_hotplug_enable(void)
571  {
572  	if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
573  		return;
574  	cpu_hotplug_disabled--;
575  }
576  
577  void cpu_hotplug_enable(void)
578  {
579  	cpu_maps_update_begin();
580  	__cpu_hotplug_enable();
581  	cpu_maps_update_done();
582  }
583  EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
584  
585  #else
586  
587  static void lockdep_acquire_cpus_lock(void)
588  {
589  }
590  
591  static void lockdep_release_cpus_lock(void)
592  {
593  }
594  
595  #endif	/* CONFIG_HOTPLUG_CPU */
596  
597  /*
598   * Architectures that need SMT-specific errata handling during SMT hotplug
599   * should override this.
600   */
601  void __weak arch_smt_update(void) { }
602  
603  #ifdef CONFIG_HOTPLUG_SMT
604  
605  enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
606  static unsigned int cpu_smt_max_threads __ro_after_init;
607  unsigned int cpu_smt_num_threads __read_mostly = UINT_MAX;
608  
609  void __init cpu_smt_disable(bool force)
610  {
611  	if (!cpu_smt_possible())
612  		return;
613  
614  	if (force) {
615  		pr_info("SMT: Force disabled\n");
616  		cpu_smt_control = CPU_SMT_FORCE_DISABLED;
617  	} else {
618  		pr_info("SMT: disabled\n");
619  		cpu_smt_control = CPU_SMT_DISABLED;
620  	}
621  	cpu_smt_num_threads = 1;
622  }
623  
624  /*
625   * The decision whether SMT is supported can only be done after the full
626   * CPU identification. Called from architecture code.
627   */
628  void __init cpu_smt_set_num_threads(unsigned int num_threads,
629  				    unsigned int max_threads)
630  {
631  	WARN_ON(!num_threads || (num_threads > max_threads));
632  
633  	if (max_threads == 1)
634  		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
635  
636  	cpu_smt_max_threads = max_threads;
637  
638  	/*
639  	 * If SMT has been disabled via the kernel command line or SMT is
640  	 * not supported, set cpu_smt_num_threads to 1 for consistency.
641  	 * If enabled, take the architecture requested number of threads
642  	 * to bring up into account.
643  	 */
644  	if (cpu_smt_control != CPU_SMT_ENABLED)
645  		cpu_smt_num_threads = 1;
646  	else if (num_threads < cpu_smt_num_threads)
647  		cpu_smt_num_threads = num_threads;
648  }
649  
650  static int __init smt_cmdline_disable(char *str)
651  {
652  	cpu_smt_disable(str && !strcmp(str, "force"));
653  	return 0;
654  }
655  early_param("nosmt", smt_cmdline_disable);
656  
657  /*
658   * For Archicture supporting partial SMT states check if the thread is allowed.
659   * Otherwise this has already been checked through cpu_smt_max_threads when
660   * setting the SMT level.
661   */
662  static inline bool cpu_smt_thread_allowed(unsigned int cpu)
663  {
664  #ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC
665  	return topology_smt_thread_allowed(cpu);
666  #else
667  	return true;
668  #endif
669  }
670  
671  static inline bool cpu_bootable(unsigned int cpu)
672  {
673  	if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
674  		return true;
675  
676  	/* All CPUs are bootable if controls are not configured */
677  	if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
678  		return true;
679  
680  	/* All CPUs are bootable if CPU is not SMT capable */
681  	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
682  		return true;
683  
684  	if (topology_is_primary_thread(cpu))
685  		return true;
686  
687  	/*
688  	 * On x86 it's required to boot all logical CPUs at least once so
689  	 * that the init code can get a chance to set CR4.MCE on each
690  	 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
691  	 * core will shutdown the machine.
692  	 */
693  	return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
694  }
695  
696  /* Returns true if SMT is supported and not forcefully (irreversibly) disabled */
697  bool cpu_smt_possible(void)
698  {
699  	return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
700  		cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
701  }
702  EXPORT_SYMBOL_GPL(cpu_smt_possible);
703  
704  #else
705  static inline bool cpu_bootable(unsigned int cpu) { return true; }
706  #endif
707  
708  static inline enum cpuhp_state
709  cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
710  {
711  	enum cpuhp_state prev_state = st->state;
712  	bool bringup = st->state < target;
713  
714  	st->rollback = false;
715  	st->last = NULL;
716  
717  	st->target = target;
718  	st->single = false;
719  	st->bringup = bringup;
720  	if (cpu_dying(cpu) != !bringup)
721  		set_cpu_dying(cpu, !bringup);
722  
723  	return prev_state;
724  }
725  
726  static inline void
727  cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
728  		  enum cpuhp_state prev_state)
729  {
730  	bool bringup = !st->bringup;
731  
732  	st->target = prev_state;
733  
734  	/*
735  	 * Already rolling back. No need invert the bringup value or to change
736  	 * the current state.
737  	 */
738  	if (st->rollback)
739  		return;
740  
741  	st->rollback = true;
742  
743  	/*
744  	 * If we have st->last we need to undo partial multi_instance of this
745  	 * state first. Otherwise start undo at the previous state.
746  	 */
747  	if (!st->last) {
748  		if (st->bringup)
749  			st->state--;
750  		else
751  			st->state++;
752  	}
753  
754  	st->bringup = bringup;
755  	if (cpu_dying(cpu) != !bringup)
756  		set_cpu_dying(cpu, !bringup);
757  }
758  
759  /* Regular hotplug invocation of the AP hotplug thread */
760  static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
761  {
762  	if (!st->single && st->state == st->target)
763  		return;
764  
765  	st->result = 0;
766  	/*
767  	 * Make sure the above stores are visible before should_run becomes
768  	 * true. Paired with the mb() above in cpuhp_thread_fun()
769  	 */
770  	smp_mb();
771  	st->should_run = true;
772  	wake_up_process(st->thread);
773  	wait_for_ap_thread(st, st->bringup);
774  }
775  
776  static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
777  			 enum cpuhp_state target)
778  {
779  	enum cpuhp_state prev_state;
780  	int ret;
781  
782  	prev_state = cpuhp_set_state(cpu, st, target);
783  	__cpuhp_kick_ap(st);
784  	if ((ret = st->result)) {
785  		cpuhp_reset_state(cpu, st, prev_state);
786  		__cpuhp_kick_ap(st);
787  	}
788  
789  	return ret;
790  }
791  
792  static int bringup_wait_for_ap_online(unsigned int cpu)
793  {
794  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
795  
796  	/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
797  	wait_for_ap_thread(st, true);
798  	if (WARN_ON_ONCE((!cpu_online(cpu))))
799  		return -ECANCELED;
800  
801  	/* Unpark the hotplug thread of the target cpu */
802  	kthread_unpark(st->thread);
803  
804  	/*
805  	 * SMT soft disabling on X86 requires to bring the CPU out of the
806  	 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
807  	 * CPU marked itself as booted_once in notify_cpu_starting() so the
808  	 * cpu_bootable() check will now return false if this is not the
809  	 * primary sibling.
810  	 */
811  	if (!cpu_bootable(cpu))
812  		return -ECANCELED;
813  	return 0;
814  }
815  
816  #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
817  static int cpuhp_kick_ap_alive(unsigned int cpu)
818  {
819  	if (!cpuhp_can_boot_ap(cpu))
820  		return -EAGAIN;
821  
822  	return arch_cpuhp_kick_ap_alive(cpu, idle_thread_get(cpu));
823  }
824  
825  static int cpuhp_bringup_ap(unsigned int cpu)
826  {
827  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
828  	int ret;
829  
830  	/*
831  	 * Some architectures have to walk the irq descriptors to
832  	 * setup the vector space for the cpu which comes online.
833  	 * Prevent irq alloc/free across the bringup.
834  	 */
835  	irq_lock_sparse();
836  
837  	ret = cpuhp_bp_sync_alive(cpu);
838  	if (ret)
839  		goto out_unlock;
840  
841  	ret = bringup_wait_for_ap_online(cpu);
842  	if (ret)
843  		goto out_unlock;
844  
845  	irq_unlock_sparse();
846  
847  	if (st->target <= CPUHP_AP_ONLINE_IDLE)
848  		return 0;
849  
850  	return cpuhp_kick_ap(cpu, st, st->target);
851  
852  out_unlock:
853  	irq_unlock_sparse();
854  	return ret;
855  }
856  #else
857  static int bringup_cpu(unsigned int cpu)
858  {
859  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
860  	struct task_struct *idle = idle_thread_get(cpu);
861  	int ret;
862  
863  	if (!cpuhp_can_boot_ap(cpu))
864  		return -EAGAIN;
865  
866  	/*
867  	 * Some architectures have to walk the irq descriptors to
868  	 * setup the vector space for the cpu which comes online.
869  	 *
870  	 * Prevent irq alloc/free across the bringup by acquiring the
871  	 * sparse irq lock. Hold it until the upcoming CPU completes the
872  	 * startup in cpuhp_online_idle() which allows to avoid
873  	 * intermediate synchronization points in the architecture code.
874  	 */
875  	irq_lock_sparse();
876  
877  	ret = __cpu_up(cpu, idle);
878  	if (ret)
879  		goto out_unlock;
880  
881  	ret = cpuhp_bp_sync_alive(cpu);
882  	if (ret)
883  		goto out_unlock;
884  
885  	ret = bringup_wait_for_ap_online(cpu);
886  	if (ret)
887  		goto out_unlock;
888  
889  	irq_unlock_sparse();
890  
891  	if (st->target <= CPUHP_AP_ONLINE_IDLE)
892  		return 0;
893  
894  	return cpuhp_kick_ap(cpu, st, st->target);
895  
896  out_unlock:
897  	irq_unlock_sparse();
898  	return ret;
899  }
900  #endif
901  
902  static int finish_cpu(unsigned int cpu)
903  {
904  	struct task_struct *idle = idle_thread_get(cpu);
905  	struct mm_struct *mm = idle->active_mm;
906  
907  	/*
908  	 * idle_task_exit() will have switched to &init_mm, now
909  	 * clean up any remaining active_mm state.
910  	 */
911  	if (mm != &init_mm)
912  		idle->active_mm = &init_mm;
913  	mmdrop_lazy_tlb(mm);
914  	return 0;
915  }
916  
917  /*
918   * Hotplug state machine related functions
919   */
920  
921  /*
922   * Get the next state to run. Empty ones will be skipped. Returns true if a
923   * state must be run.
924   *
925   * st->state will be modified ahead of time, to match state_to_run, as if it
926   * has already ran.
927   */
928  static bool cpuhp_next_state(bool bringup,
929  			     enum cpuhp_state *state_to_run,
930  			     struct cpuhp_cpu_state *st,
931  			     enum cpuhp_state target)
932  {
933  	do {
934  		if (bringup) {
935  			if (st->state >= target)
936  				return false;
937  
938  			*state_to_run = ++st->state;
939  		} else {
940  			if (st->state <= target)
941  				return false;
942  
943  			*state_to_run = st->state--;
944  		}
945  
946  		if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
947  			break;
948  	} while (true);
949  
950  	return true;
951  }
952  
953  static int __cpuhp_invoke_callback_range(bool bringup,
954  					 unsigned int cpu,
955  					 struct cpuhp_cpu_state *st,
956  					 enum cpuhp_state target,
957  					 bool nofail)
958  {
959  	enum cpuhp_state state;
960  	int ret = 0;
961  
962  	while (cpuhp_next_state(bringup, &state, st, target)) {
963  		int err;
964  
965  		err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
966  		if (!err)
967  			continue;
968  
969  		if (nofail) {
970  			pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
971  				cpu, bringup ? "UP" : "DOWN",
972  				cpuhp_get_step(st->state)->name,
973  				st->state, err);
974  			ret = -1;
975  		} else {
976  			ret = err;
977  			break;
978  		}
979  	}
980  
981  	return ret;
982  }
983  
984  static inline int cpuhp_invoke_callback_range(bool bringup,
985  					      unsigned int cpu,
986  					      struct cpuhp_cpu_state *st,
987  					      enum cpuhp_state target)
988  {
989  	return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
990  }
991  
992  static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
993  						      unsigned int cpu,
994  						      struct cpuhp_cpu_state *st,
995  						      enum cpuhp_state target)
996  {
997  	__cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
998  }
999  
1000  static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
1001  {
1002  	if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
1003  		return true;
1004  	/*
1005  	 * When CPU hotplug is disabled, then taking the CPU down is not
1006  	 * possible because takedown_cpu() and the architecture and
1007  	 * subsystem specific mechanisms are not available. So the CPU
1008  	 * which would be completely unplugged again needs to stay around
1009  	 * in the current state.
1010  	 */
1011  	return st->state <= CPUHP_BRINGUP_CPU;
1012  }
1013  
1014  static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1015  			      enum cpuhp_state target)
1016  {
1017  	enum cpuhp_state prev_state = st->state;
1018  	int ret = 0;
1019  
1020  	ret = cpuhp_invoke_callback_range(true, cpu, st, target);
1021  	if (ret) {
1022  		pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
1023  			 ret, cpu, cpuhp_get_step(st->state)->name,
1024  			 st->state);
1025  
1026  		cpuhp_reset_state(cpu, st, prev_state);
1027  		if (can_rollback_cpu(st))
1028  			WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
1029  							    prev_state));
1030  	}
1031  	return ret;
1032  }
1033  
1034  /*
1035   * The cpu hotplug threads manage the bringup and teardown of the cpus
1036   */
1037  static int cpuhp_should_run(unsigned int cpu)
1038  {
1039  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1040  
1041  	return st->should_run;
1042  }
1043  
1044  /*
1045   * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
1046   * callbacks when a state gets [un]installed at runtime.
1047   *
1048   * Each invocation of this function by the smpboot thread does a single AP
1049   * state callback.
1050   *
1051   * It has 3 modes of operation:
1052   *  - single: runs st->cb_state
1053   *  - up:     runs ++st->state, while st->state < st->target
1054   *  - down:   runs st->state--, while st->state > st->target
1055   *
1056   * When complete or on error, should_run is cleared and the completion is fired.
1057   */
1058  static void cpuhp_thread_fun(unsigned int cpu)
1059  {
1060  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1061  	bool bringup = st->bringup;
1062  	enum cpuhp_state state;
1063  
1064  	if (WARN_ON_ONCE(!st->should_run))
1065  		return;
1066  
1067  	/*
1068  	 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
1069  	 * that if we see ->should_run we also see the rest of the state.
1070  	 */
1071  	smp_mb();
1072  
1073  	/*
1074  	 * The BP holds the hotplug lock, but we're now running on the AP,
1075  	 * ensure that anybody asserting the lock is held, will actually find
1076  	 * it so.
1077  	 */
1078  	lockdep_acquire_cpus_lock();
1079  	cpuhp_lock_acquire(bringup);
1080  
1081  	if (st->single) {
1082  		state = st->cb_state;
1083  		st->should_run = false;
1084  	} else {
1085  		st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
1086  		if (!st->should_run)
1087  			goto end;
1088  	}
1089  
1090  	WARN_ON_ONCE(!cpuhp_is_ap_state(state));
1091  
1092  	if (cpuhp_is_atomic_state(state)) {
1093  		local_irq_disable();
1094  		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
1095  		local_irq_enable();
1096  
1097  		/*
1098  		 * STARTING/DYING must not fail!
1099  		 */
1100  		WARN_ON_ONCE(st->result);
1101  	} else {
1102  		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
1103  	}
1104  
1105  	if (st->result) {
1106  		/*
1107  		 * If we fail on a rollback, we're up a creek without no
1108  		 * paddle, no way forward, no way back. We loose, thanks for
1109  		 * playing.
1110  		 */
1111  		WARN_ON_ONCE(st->rollback);
1112  		st->should_run = false;
1113  	}
1114  
1115  end:
1116  	cpuhp_lock_release(bringup);
1117  	lockdep_release_cpus_lock();
1118  
1119  	if (!st->should_run)
1120  		complete_ap_thread(st, bringup);
1121  }
1122  
1123  /* Invoke a single callback on a remote cpu */
1124  static int
1125  cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
1126  			 struct hlist_node *node)
1127  {
1128  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1129  	int ret;
1130  
1131  	if (!cpu_online(cpu))
1132  		return 0;
1133  
1134  	cpuhp_lock_acquire(false);
1135  	cpuhp_lock_release(false);
1136  
1137  	cpuhp_lock_acquire(true);
1138  	cpuhp_lock_release(true);
1139  
1140  	/*
1141  	 * If we are up and running, use the hotplug thread. For early calls
1142  	 * we invoke the thread function directly.
1143  	 */
1144  	if (!st->thread)
1145  		return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1146  
1147  	st->rollback = false;
1148  	st->last = NULL;
1149  
1150  	st->node = node;
1151  	st->bringup = bringup;
1152  	st->cb_state = state;
1153  	st->single = true;
1154  
1155  	__cpuhp_kick_ap(st);
1156  
1157  	/*
1158  	 * If we failed and did a partial, do a rollback.
1159  	 */
1160  	if ((ret = st->result) && st->last) {
1161  		st->rollback = true;
1162  		st->bringup = !bringup;
1163  
1164  		__cpuhp_kick_ap(st);
1165  	}
1166  
1167  	/*
1168  	 * Clean up the leftovers so the next hotplug operation wont use stale
1169  	 * data.
1170  	 */
1171  	st->node = st->last = NULL;
1172  	return ret;
1173  }
1174  
1175  static int cpuhp_kick_ap_work(unsigned int cpu)
1176  {
1177  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1178  	enum cpuhp_state prev_state = st->state;
1179  	int ret;
1180  
1181  	cpuhp_lock_acquire(false);
1182  	cpuhp_lock_release(false);
1183  
1184  	cpuhp_lock_acquire(true);
1185  	cpuhp_lock_release(true);
1186  
1187  	trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
1188  	ret = cpuhp_kick_ap(cpu, st, st->target);
1189  	trace_cpuhp_exit(cpu, st->state, prev_state, ret);
1190  
1191  	return ret;
1192  }
1193  
1194  static struct smp_hotplug_thread cpuhp_threads = {
1195  	.store			= &cpuhp_state.thread,
1196  	.thread_should_run	= cpuhp_should_run,
1197  	.thread_fn		= cpuhp_thread_fun,
1198  	.thread_comm		= "cpuhp/%u",
1199  	.selfparking		= true,
1200  };
1201  
1202  static __init void cpuhp_init_state(void)
1203  {
1204  	struct cpuhp_cpu_state *st;
1205  	int cpu;
1206  
1207  	for_each_possible_cpu(cpu) {
1208  		st = per_cpu_ptr(&cpuhp_state, cpu);
1209  		init_completion(&st->done_up);
1210  		init_completion(&st->done_down);
1211  	}
1212  }
1213  
1214  void __init cpuhp_threads_init(void)
1215  {
1216  	cpuhp_init_state();
1217  	BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
1218  	kthread_unpark(this_cpu_read(cpuhp_state.thread));
1219  }
1220  
1221  #ifdef CONFIG_HOTPLUG_CPU
1222  #ifndef arch_clear_mm_cpumask_cpu
1223  #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
1224  #endif
1225  
1226  /**
1227   * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
1228   * @cpu: a CPU id
1229   *
1230   * This function walks all processes, finds a valid mm struct for each one and
1231   * then clears a corresponding bit in mm's cpumask.  While this all sounds
1232   * trivial, there are various non-obvious corner cases, which this function
1233   * tries to solve in a safe manner.
1234   *
1235   * Also note that the function uses a somewhat relaxed locking scheme, so it may
1236   * be called only for an already offlined CPU.
1237   */
1238  void clear_tasks_mm_cpumask(int cpu)
1239  {
1240  	struct task_struct *p;
1241  
1242  	/*
1243  	 * This function is called after the cpu is taken down and marked
1244  	 * offline, so its not like new tasks will ever get this cpu set in
1245  	 * their mm mask. -- Peter Zijlstra
1246  	 * Thus, we may use rcu_read_lock() here, instead of grabbing
1247  	 * full-fledged tasklist_lock.
1248  	 */
1249  	WARN_ON(cpu_online(cpu));
1250  	rcu_read_lock();
1251  	for_each_process(p) {
1252  		struct task_struct *t;
1253  
1254  		/*
1255  		 * Main thread might exit, but other threads may still have
1256  		 * a valid mm. Find one.
1257  		 */
1258  		t = find_lock_task_mm(p);
1259  		if (!t)
1260  			continue;
1261  		arch_clear_mm_cpumask_cpu(cpu, t->mm);
1262  		task_unlock(t);
1263  	}
1264  	rcu_read_unlock();
1265  }
1266  
1267  /* Take this CPU down. */
1268  static int take_cpu_down(void *_param)
1269  {
1270  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1271  	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
1272  	int err, cpu = smp_processor_id();
1273  
1274  	/* Ensure this CPU doesn't handle any more interrupts. */
1275  	err = __cpu_disable();
1276  	if (err < 0)
1277  		return err;
1278  
1279  	/*
1280  	 * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
1281  	 * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
1282  	 */
1283  	WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
1284  
1285  	/*
1286  	 * Invoke the former CPU_DYING callbacks. DYING must not fail!
1287  	 */
1288  	cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
1289  
1290  	/* Park the stopper thread */
1291  	stop_machine_park(cpu);
1292  	return 0;
1293  }
1294  
1295  static int takedown_cpu(unsigned int cpu)
1296  {
1297  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1298  	int err;
1299  
1300  	/* Park the smpboot threads */
1301  	kthread_park(st->thread);
1302  
1303  	/*
1304  	 * Prevent irq alloc/free while the dying cpu reorganizes the
1305  	 * interrupt affinities.
1306  	 */
1307  	irq_lock_sparse();
1308  
1309  	/*
1310  	 * So now all preempt/rcu users must observe !cpu_active().
1311  	 */
1312  	err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
1313  	if (err) {
1314  		/* CPU refused to die */
1315  		irq_unlock_sparse();
1316  		/* Unpark the hotplug thread so we can rollback there */
1317  		kthread_unpark(st->thread);
1318  		return err;
1319  	}
1320  	BUG_ON(cpu_online(cpu));
1321  
1322  	/*
1323  	 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
1324  	 * all runnable tasks from the CPU, there's only the idle task left now
1325  	 * that the migration thread is done doing the stop_machine thing.
1326  	 *
1327  	 * Wait for the stop thread to go away.
1328  	 */
1329  	wait_for_ap_thread(st, false);
1330  	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1331  
1332  	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
1333  	irq_unlock_sparse();
1334  
1335  	hotplug_cpu__broadcast_tick_pull(cpu);
1336  	/* This actually kills the CPU. */
1337  	__cpu_die(cpu);
1338  
1339  	cpuhp_bp_sync_dead(cpu);
1340  
1341  	lockdep_cleanup_dead_cpu(cpu, idle_thread_get(cpu));
1342  
1343  	/*
1344  	 * Callbacks must be re-integrated right away to the RCU state machine.
1345  	 * Otherwise an RCU callback could block a further teardown function
1346  	 * waiting for its completion.
1347  	 */
1348  	rcutree_migrate_callbacks(cpu);
1349  
1350  	return 0;
1351  }
1352  
1353  static void cpuhp_complete_idle_dead(void *arg)
1354  {
1355  	struct cpuhp_cpu_state *st = arg;
1356  
1357  	complete_ap_thread(st, false);
1358  }
1359  
1360  void cpuhp_report_idle_dead(void)
1361  {
1362  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1363  
1364  	BUG_ON(st->state != CPUHP_AP_OFFLINE);
1365  	tick_assert_timekeeping_handover();
1366  	rcutree_report_cpu_dead();
1367  	st->state = CPUHP_AP_IDLE_DEAD;
1368  	/*
1369  	 * We cannot call complete after rcutree_report_cpu_dead() so we delegate it
1370  	 * to an online cpu.
1371  	 */
1372  	smp_call_function_single(cpumask_first(cpu_online_mask),
1373  				 cpuhp_complete_idle_dead, st, 0);
1374  }
1375  
1376  static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1377  				enum cpuhp_state target)
1378  {
1379  	enum cpuhp_state prev_state = st->state;
1380  	int ret = 0;
1381  
1382  	ret = cpuhp_invoke_callback_range(false, cpu, st, target);
1383  	if (ret) {
1384  		pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
1385  			 ret, cpu, cpuhp_get_step(st->state)->name,
1386  			 st->state);
1387  
1388  		cpuhp_reset_state(cpu, st, prev_state);
1389  
1390  		if (st->state < prev_state)
1391  			WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1392  							    prev_state));
1393  	}
1394  
1395  	return ret;
1396  }
1397  
1398  /* Requires cpu_add_remove_lock to be held */
1399  static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1400  			   enum cpuhp_state target)
1401  {
1402  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1403  	int prev_state, ret = 0;
1404  
1405  	if (num_online_cpus() == 1)
1406  		return -EBUSY;
1407  
1408  	if (!cpu_present(cpu))
1409  		return -EINVAL;
1410  
1411  	cpus_write_lock();
1412  
1413  	cpuhp_tasks_frozen = tasks_frozen;
1414  
1415  	prev_state = cpuhp_set_state(cpu, st, target);
1416  	/*
1417  	 * If the current CPU state is in the range of the AP hotplug thread,
1418  	 * then we need to kick the thread.
1419  	 */
1420  	if (st->state > CPUHP_TEARDOWN_CPU) {
1421  		st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1422  		ret = cpuhp_kick_ap_work(cpu);
1423  		/*
1424  		 * The AP side has done the error rollback already. Just
1425  		 * return the error code..
1426  		 */
1427  		if (ret)
1428  			goto out;
1429  
1430  		/*
1431  		 * We might have stopped still in the range of the AP hotplug
1432  		 * thread. Nothing to do anymore.
1433  		 */
1434  		if (st->state > CPUHP_TEARDOWN_CPU)
1435  			goto out;
1436  
1437  		st->target = target;
1438  	}
1439  	/*
1440  	 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1441  	 * to do the further cleanups.
1442  	 */
1443  	ret = cpuhp_down_callbacks(cpu, st, target);
1444  	if (ret && st->state < prev_state) {
1445  		if (st->state == CPUHP_TEARDOWN_CPU) {
1446  			cpuhp_reset_state(cpu, st, prev_state);
1447  			__cpuhp_kick_ap(st);
1448  		} else {
1449  			WARN(1, "DEAD callback error for CPU%d", cpu);
1450  		}
1451  	}
1452  
1453  out:
1454  	cpus_write_unlock();
1455  	/*
1456  	 * Do post unplug cleanup. This is still protected against
1457  	 * concurrent CPU hotplug via cpu_add_remove_lock.
1458  	 */
1459  	lockup_detector_cleanup();
1460  	arch_smt_update();
1461  	return ret;
1462  }
1463  
1464  struct cpu_down_work {
1465  	unsigned int		cpu;
1466  	enum cpuhp_state	target;
1467  };
1468  
1469  static long __cpu_down_maps_locked(void *arg)
1470  {
1471  	struct cpu_down_work *work = arg;
1472  
1473  	return _cpu_down(work->cpu, 0, work->target);
1474  }
1475  
1476  static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1477  {
1478  	struct cpu_down_work work = { .cpu = cpu, .target = target, };
1479  
1480  	/*
1481  	 * If the platform does not support hotplug, report it explicitly to
1482  	 * differentiate it from a transient offlining failure.
1483  	 */
1484  	if (cpu_hotplug_offline_disabled)
1485  		return -EOPNOTSUPP;
1486  	if (cpu_hotplug_disabled)
1487  		return -EBUSY;
1488  
1489  	/*
1490  	 * Ensure that the control task does not run on the to be offlined
1491  	 * CPU to prevent a deadlock against cfs_b->period_timer.
1492  	 * Also keep at least one housekeeping cpu onlined to avoid generating
1493  	 * an empty sched_domain span.
1494  	 */
1495  	for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
1496  		if (cpu != work.cpu)
1497  			return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
1498  	}
1499  	return -EBUSY;
1500  }
1501  
1502  static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1503  {
1504  	int err;
1505  
1506  	cpu_maps_update_begin();
1507  	err = cpu_down_maps_locked(cpu, target);
1508  	cpu_maps_update_done();
1509  	return err;
1510  }
1511  
1512  /**
1513   * cpu_device_down - Bring down a cpu device
1514   * @dev: Pointer to the cpu device to offline
1515   *
1516   * This function is meant to be used by device core cpu subsystem only.
1517   *
1518   * Other subsystems should use remove_cpu() instead.
1519   *
1520   * Return: %0 on success or a negative errno code
1521   */
1522  int cpu_device_down(struct device *dev)
1523  {
1524  	return cpu_down(dev->id, CPUHP_OFFLINE);
1525  }
1526  
1527  int remove_cpu(unsigned int cpu)
1528  {
1529  	int ret;
1530  
1531  	lock_device_hotplug();
1532  	ret = device_offline(get_cpu_device(cpu));
1533  	unlock_device_hotplug();
1534  
1535  	return ret;
1536  }
1537  EXPORT_SYMBOL_GPL(remove_cpu);
1538  
1539  void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1540  {
1541  	unsigned int cpu;
1542  	int error;
1543  
1544  	cpu_maps_update_begin();
1545  
1546  	/*
1547  	 * Make certain the cpu I'm about to reboot on is online.
1548  	 *
1549  	 * This is inline to what migrate_to_reboot_cpu() already do.
1550  	 */
1551  	if (!cpu_online(primary_cpu))
1552  		primary_cpu = cpumask_first(cpu_online_mask);
1553  
1554  	for_each_online_cpu(cpu) {
1555  		if (cpu == primary_cpu)
1556  			continue;
1557  
1558  		error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1559  		if (error) {
1560  			pr_err("Failed to offline CPU%d - error=%d",
1561  				cpu, error);
1562  			break;
1563  		}
1564  	}
1565  
1566  	/*
1567  	 * Ensure all but the reboot CPU are offline.
1568  	 */
1569  	BUG_ON(num_online_cpus() > 1);
1570  
1571  	/*
1572  	 * Make sure the CPUs won't be enabled by someone else after this
1573  	 * point. Kexec will reboot to a new kernel shortly resetting
1574  	 * everything along the way.
1575  	 */
1576  	cpu_hotplug_disabled++;
1577  
1578  	cpu_maps_update_done();
1579  }
1580  
1581  #else
1582  #define takedown_cpu		NULL
1583  #endif /*CONFIG_HOTPLUG_CPU*/
1584  
1585  /**
1586   * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1587   * @cpu: cpu that just started
1588   *
1589   * It must be called by the arch code on the new cpu, before the new cpu
1590   * enables interrupts and before the "boot" cpu returns from __cpu_up().
1591   */
1592  void notify_cpu_starting(unsigned int cpu)
1593  {
1594  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1595  	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1596  
1597  	rcutree_report_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
1598  	cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1599  
1600  	/*
1601  	 * STARTING must not fail!
1602  	 */
1603  	cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
1604  }
1605  
1606  /*
1607   * Called from the idle task. Wake up the controlling task which brings the
1608   * hotplug thread of the upcoming CPU up and then delegates the rest of the
1609   * online bringup to the hotplug thread.
1610   */
1611  void cpuhp_online_idle(enum cpuhp_state state)
1612  {
1613  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1614  
1615  	/* Happens for the boot cpu */
1616  	if (state != CPUHP_AP_ONLINE_IDLE)
1617  		return;
1618  
1619  	cpuhp_ap_update_sync_state(SYNC_STATE_ONLINE);
1620  
1621  	/*
1622  	 * Unpark the stopper thread before we start the idle loop (and start
1623  	 * scheduling); this ensures the stopper task is always available.
1624  	 */
1625  	stop_machine_unpark(smp_processor_id());
1626  
1627  	st->state = CPUHP_AP_ONLINE_IDLE;
1628  	complete_ap_thread(st, true);
1629  }
1630  
1631  /* Requires cpu_add_remove_lock to be held */
1632  static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1633  {
1634  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1635  	struct task_struct *idle;
1636  	int ret = 0;
1637  
1638  	cpus_write_lock();
1639  
1640  	if (!cpu_present(cpu)) {
1641  		ret = -EINVAL;
1642  		goto out;
1643  	}
1644  
1645  	/*
1646  	 * The caller of cpu_up() might have raced with another
1647  	 * caller. Nothing to do.
1648  	 */
1649  	if (st->state >= target)
1650  		goto out;
1651  
1652  	if (st->state == CPUHP_OFFLINE) {
1653  		/* Let it fail before we try to bring the cpu up */
1654  		idle = idle_thread_get(cpu);
1655  		if (IS_ERR(idle)) {
1656  			ret = PTR_ERR(idle);
1657  			goto out;
1658  		}
1659  
1660  		/*
1661  		 * Reset stale stack state from the last time this CPU was online.
1662  		 */
1663  		scs_task_reset(idle);
1664  		kasan_unpoison_task_stack(idle);
1665  	}
1666  
1667  	cpuhp_tasks_frozen = tasks_frozen;
1668  
1669  	cpuhp_set_state(cpu, st, target);
1670  	/*
1671  	 * If the current CPU state is in the range of the AP hotplug thread,
1672  	 * then we need to kick the thread once more.
1673  	 */
1674  	if (st->state > CPUHP_BRINGUP_CPU) {
1675  		ret = cpuhp_kick_ap_work(cpu);
1676  		/*
1677  		 * The AP side has done the error rollback already. Just
1678  		 * return the error code..
1679  		 */
1680  		if (ret)
1681  			goto out;
1682  	}
1683  
1684  	/*
1685  	 * Try to reach the target state. We max out on the BP at
1686  	 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1687  	 * responsible for bringing it up to the target state.
1688  	 */
1689  	target = min((int)target, CPUHP_BRINGUP_CPU);
1690  	ret = cpuhp_up_callbacks(cpu, st, target);
1691  out:
1692  	cpus_write_unlock();
1693  	arch_smt_update();
1694  	return ret;
1695  }
1696  
1697  static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1698  {
1699  	int err = 0;
1700  
1701  	if (!cpu_possible(cpu)) {
1702  		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1703  		       cpu);
1704  		return -EINVAL;
1705  	}
1706  
1707  	err = try_online_node(cpu_to_node(cpu));
1708  	if (err)
1709  		return err;
1710  
1711  	cpu_maps_update_begin();
1712  
1713  	if (cpu_hotplug_disabled) {
1714  		err = -EBUSY;
1715  		goto out;
1716  	}
1717  	if (!cpu_bootable(cpu)) {
1718  		err = -EPERM;
1719  		goto out;
1720  	}
1721  
1722  	err = _cpu_up(cpu, 0, target);
1723  out:
1724  	cpu_maps_update_done();
1725  	return err;
1726  }
1727  
1728  /**
1729   * cpu_device_up - Bring up a cpu device
1730   * @dev: Pointer to the cpu device to online
1731   *
1732   * This function is meant to be used by device core cpu subsystem only.
1733   *
1734   * Other subsystems should use add_cpu() instead.
1735   *
1736   * Return: %0 on success or a negative errno code
1737   */
1738  int cpu_device_up(struct device *dev)
1739  {
1740  	return cpu_up(dev->id, CPUHP_ONLINE);
1741  }
1742  
1743  int add_cpu(unsigned int cpu)
1744  {
1745  	int ret;
1746  
1747  	lock_device_hotplug();
1748  	ret = device_online(get_cpu_device(cpu));
1749  	unlock_device_hotplug();
1750  
1751  	return ret;
1752  }
1753  EXPORT_SYMBOL_GPL(add_cpu);
1754  
1755  /**
1756   * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1757   * @sleep_cpu: The cpu we hibernated on and should be brought up.
1758   *
1759   * On some architectures like arm64, we can hibernate on any CPU, but on
1760   * wake up the CPU we hibernated on might be offline as a side effect of
1761   * using maxcpus= for example.
1762   *
1763   * Return: %0 on success or a negative errno code
1764   */
1765  int bringup_hibernate_cpu(unsigned int sleep_cpu)
1766  {
1767  	int ret;
1768  
1769  	if (!cpu_online(sleep_cpu)) {
1770  		pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1771  		ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1772  		if (ret) {
1773  			pr_err("Failed to bring hibernate-CPU up!\n");
1774  			return ret;
1775  		}
1776  	}
1777  	return 0;
1778  }
1779  
1780  static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int ncpus,
1781  				      enum cpuhp_state target)
1782  {
1783  	unsigned int cpu;
1784  
1785  	for_each_cpu(cpu, mask) {
1786  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1787  
1788  		if (cpu_up(cpu, target) && can_rollback_cpu(st)) {
1789  			/*
1790  			 * If this failed then cpu_up() might have only
1791  			 * rolled back to CPUHP_BP_KICK_AP for the final
1792  			 * online. Clean it up. NOOP if already rolled back.
1793  			 */
1794  			WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE));
1795  		}
1796  
1797  		if (!--ncpus)
1798  			break;
1799  	}
1800  }
1801  
1802  #ifdef CONFIG_HOTPLUG_PARALLEL
1803  static bool __cpuhp_parallel_bringup __ro_after_init = true;
1804  
1805  static int __init parallel_bringup_parse_param(char *arg)
1806  {
1807  	return kstrtobool(arg, &__cpuhp_parallel_bringup);
1808  }
1809  early_param("cpuhp.parallel", parallel_bringup_parse_param);
1810  
1811  #ifdef CONFIG_HOTPLUG_SMT
1812  static inline bool cpuhp_smt_aware(void)
1813  {
1814  	return cpu_smt_max_threads > 1;
1815  }
1816  
1817  static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
1818  {
1819  	return cpu_primary_thread_mask;
1820  }
1821  #else
1822  static inline bool cpuhp_smt_aware(void)
1823  {
1824  	return false;
1825  }
1826  static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
1827  {
1828  	return cpu_none_mask;
1829  }
1830  #endif
1831  
1832  bool __weak arch_cpuhp_init_parallel_bringup(void)
1833  {
1834  	return true;
1835  }
1836  
1837  /*
1838   * On architectures which have enabled parallel bringup this invokes all BP
1839   * prepare states for each of the to be onlined APs first. The last state
1840   * sends the startup IPI to the APs. The APs proceed through the low level
1841   * bringup code in parallel and then wait for the control CPU to release
1842   * them one by one for the final onlining procedure.
1843   *
1844   * This avoids waiting for each AP to respond to the startup IPI in
1845   * CPUHP_BRINGUP_CPU.
1846   */
1847  static bool __init cpuhp_bringup_cpus_parallel(unsigned int ncpus)
1848  {
1849  	const struct cpumask *mask = cpu_present_mask;
1850  
1851  	if (__cpuhp_parallel_bringup)
1852  		__cpuhp_parallel_bringup = arch_cpuhp_init_parallel_bringup();
1853  	if (!__cpuhp_parallel_bringup)
1854  		return false;
1855  
1856  	if (cpuhp_smt_aware()) {
1857  		const struct cpumask *pmask = cpuhp_get_primary_thread_mask();
1858  		static struct cpumask tmp_mask __initdata;
1859  
1860  		/*
1861  		 * X86 requires to prevent that SMT siblings stopped while
1862  		 * the primary thread does a microcode update for various
1863  		 * reasons. Bring the primary threads up first.
1864  		 */
1865  		cpumask_and(&tmp_mask, mask, pmask);
1866  		cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_BP_KICK_AP);
1867  		cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_ONLINE);
1868  		/* Account for the online CPUs */
1869  		ncpus -= num_online_cpus();
1870  		if (!ncpus)
1871  			return true;
1872  		/* Create the mask for secondary CPUs */
1873  		cpumask_andnot(&tmp_mask, mask, pmask);
1874  		mask = &tmp_mask;
1875  	}
1876  
1877  	/* Bring the not-yet started CPUs up */
1878  	cpuhp_bringup_mask(mask, ncpus, CPUHP_BP_KICK_AP);
1879  	cpuhp_bringup_mask(mask, ncpus, CPUHP_ONLINE);
1880  	return true;
1881  }
1882  #else
1883  static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; }
1884  #endif /* CONFIG_HOTPLUG_PARALLEL */
1885  
1886  void __init bringup_nonboot_cpus(unsigned int max_cpus)
1887  {
1888  	if (!max_cpus)
1889  		return;
1890  
1891  	/* Try parallel bringup optimization if enabled */
1892  	if (cpuhp_bringup_cpus_parallel(max_cpus))
1893  		return;
1894  
1895  	/* Full per CPU serialized bringup */
1896  	cpuhp_bringup_mask(cpu_present_mask, max_cpus, CPUHP_ONLINE);
1897  }
1898  
1899  #ifdef CONFIG_PM_SLEEP_SMP
1900  static cpumask_var_t frozen_cpus;
1901  
1902  int freeze_secondary_cpus(int primary)
1903  {
1904  	int cpu, error = 0;
1905  
1906  	cpu_maps_update_begin();
1907  	if (primary == -1) {
1908  		primary = cpumask_first(cpu_online_mask);
1909  		if (!housekeeping_cpu(primary, HK_TYPE_TIMER))
1910  			primary = housekeeping_any_cpu(HK_TYPE_TIMER);
1911  	} else {
1912  		if (!cpu_online(primary))
1913  			primary = cpumask_first(cpu_online_mask);
1914  	}
1915  
1916  	/*
1917  	 * We take down all of the non-boot CPUs in one shot to avoid races
1918  	 * with the userspace trying to use the CPU hotplug at the same time
1919  	 */
1920  	cpumask_clear(frozen_cpus);
1921  
1922  	pr_info("Disabling non-boot CPUs ...\n");
1923  	for (cpu = nr_cpu_ids - 1; cpu >= 0; cpu--) {
1924  		if (!cpu_online(cpu) || cpu == primary)
1925  			continue;
1926  
1927  		if (pm_wakeup_pending()) {
1928  			pr_info("Wakeup pending. Abort CPU freeze\n");
1929  			error = -EBUSY;
1930  			break;
1931  		}
1932  
1933  		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1934  		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1935  		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1936  		if (!error)
1937  			cpumask_set_cpu(cpu, frozen_cpus);
1938  		else {
1939  			pr_err("Error taking CPU%d down: %d\n", cpu, error);
1940  			break;
1941  		}
1942  	}
1943  
1944  	if (!error)
1945  		BUG_ON(num_online_cpus() > 1);
1946  	else
1947  		pr_err("Non-boot CPUs are not disabled\n");
1948  
1949  	/*
1950  	 * Make sure the CPUs won't be enabled by someone else. We need to do
1951  	 * this even in case of failure as all freeze_secondary_cpus() users are
1952  	 * supposed to do thaw_secondary_cpus() on the failure path.
1953  	 */
1954  	cpu_hotplug_disabled++;
1955  
1956  	cpu_maps_update_done();
1957  	return error;
1958  }
1959  
1960  void __weak arch_thaw_secondary_cpus_begin(void)
1961  {
1962  }
1963  
1964  void __weak arch_thaw_secondary_cpus_end(void)
1965  {
1966  }
1967  
1968  void thaw_secondary_cpus(void)
1969  {
1970  	int cpu, error;
1971  
1972  	/* Allow everyone to use the CPU hotplug again */
1973  	cpu_maps_update_begin();
1974  	__cpu_hotplug_enable();
1975  	if (cpumask_empty(frozen_cpus))
1976  		goto out;
1977  
1978  	pr_info("Enabling non-boot CPUs ...\n");
1979  
1980  	arch_thaw_secondary_cpus_begin();
1981  
1982  	for_each_cpu(cpu, frozen_cpus) {
1983  		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1984  		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1985  		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1986  		if (!error) {
1987  			pr_info("CPU%d is up\n", cpu);
1988  			continue;
1989  		}
1990  		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1991  	}
1992  
1993  	arch_thaw_secondary_cpus_end();
1994  
1995  	cpumask_clear(frozen_cpus);
1996  out:
1997  	cpu_maps_update_done();
1998  }
1999  
2000  static int __init alloc_frozen_cpus(void)
2001  {
2002  	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
2003  		return -ENOMEM;
2004  	return 0;
2005  }
2006  core_initcall(alloc_frozen_cpus);
2007  
2008  /*
2009   * When callbacks for CPU hotplug notifications are being executed, we must
2010   * ensure that the state of the system with respect to the tasks being frozen
2011   * or not, as reported by the notification, remains unchanged *throughout the
2012   * duration* of the execution of the callbacks.
2013   * Hence we need to prevent the freezer from racing with regular CPU hotplug.
2014   *
2015   * This synchronization is implemented by mutually excluding regular CPU
2016   * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
2017   * Hibernate notifications.
2018   */
2019  static int
2020  cpu_hotplug_pm_callback(struct notifier_block *nb,
2021  			unsigned long action, void *ptr)
2022  {
2023  	switch (action) {
2024  
2025  	case PM_SUSPEND_PREPARE:
2026  	case PM_HIBERNATION_PREPARE:
2027  		cpu_hotplug_disable();
2028  		break;
2029  
2030  	case PM_POST_SUSPEND:
2031  	case PM_POST_HIBERNATION:
2032  		cpu_hotplug_enable();
2033  		break;
2034  
2035  	default:
2036  		return NOTIFY_DONE;
2037  	}
2038  
2039  	return NOTIFY_OK;
2040  }
2041  
2042  
2043  static int __init cpu_hotplug_pm_sync_init(void)
2044  {
2045  	/*
2046  	 * cpu_hotplug_pm_callback has higher priority than x86
2047  	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
2048  	 * to disable cpu hotplug to avoid cpu hotplug race.
2049  	 */
2050  	pm_notifier(cpu_hotplug_pm_callback, 0);
2051  	return 0;
2052  }
2053  core_initcall(cpu_hotplug_pm_sync_init);
2054  
2055  #endif /* CONFIG_PM_SLEEP_SMP */
2056  
2057  int __boot_cpu_id;
2058  
2059  #endif /* CONFIG_SMP */
2060  
2061  /* Boot processor state steps */
2062  static struct cpuhp_step cpuhp_hp_states[] = {
2063  	[CPUHP_OFFLINE] = {
2064  		.name			= "offline",
2065  		.startup.single		= NULL,
2066  		.teardown.single	= NULL,
2067  	},
2068  #ifdef CONFIG_SMP
2069  	[CPUHP_CREATE_THREADS]= {
2070  		.name			= "threads:prepare",
2071  		.startup.single		= smpboot_create_threads,
2072  		.teardown.single	= NULL,
2073  		.cant_stop		= true,
2074  	},
2075  	[CPUHP_PERF_PREPARE] = {
2076  		.name			= "perf:prepare",
2077  		.startup.single		= perf_event_init_cpu,
2078  		.teardown.single	= perf_event_exit_cpu,
2079  	},
2080  	[CPUHP_RANDOM_PREPARE] = {
2081  		.name			= "random:prepare",
2082  		.startup.single		= random_prepare_cpu,
2083  		.teardown.single	= NULL,
2084  	},
2085  	[CPUHP_WORKQUEUE_PREP] = {
2086  		.name			= "workqueue:prepare",
2087  		.startup.single		= workqueue_prepare_cpu,
2088  		.teardown.single	= NULL,
2089  	},
2090  	[CPUHP_HRTIMERS_PREPARE] = {
2091  		.name			= "hrtimers:prepare",
2092  		.startup.single		= hrtimers_prepare_cpu,
2093  		.teardown.single	= NULL,
2094  	},
2095  	[CPUHP_SMPCFD_PREPARE] = {
2096  		.name			= "smpcfd:prepare",
2097  		.startup.single		= smpcfd_prepare_cpu,
2098  		.teardown.single	= smpcfd_dead_cpu,
2099  	},
2100  	[CPUHP_RELAY_PREPARE] = {
2101  		.name			= "relay:prepare",
2102  		.startup.single		= relay_prepare_cpu,
2103  		.teardown.single	= NULL,
2104  	},
2105  	[CPUHP_RCUTREE_PREP] = {
2106  		.name			= "RCU/tree:prepare",
2107  		.startup.single		= rcutree_prepare_cpu,
2108  		.teardown.single	= rcutree_dead_cpu,
2109  	},
2110  	/*
2111  	 * On the tear-down path, timers_dead_cpu() must be invoked
2112  	 * before blk_mq_queue_reinit_notify() from notify_dead(),
2113  	 * otherwise a RCU stall occurs.
2114  	 */
2115  	[CPUHP_TIMERS_PREPARE] = {
2116  		.name			= "timers:prepare",
2117  		.startup.single		= timers_prepare_cpu,
2118  		.teardown.single	= timers_dead_cpu,
2119  	},
2120  
2121  #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
2122  	/*
2123  	 * Kicks the AP alive. AP will wait in cpuhp_ap_sync_alive() until
2124  	 * the next step will release it.
2125  	 */
2126  	[CPUHP_BP_KICK_AP] = {
2127  		.name			= "cpu:kick_ap",
2128  		.startup.single		= cpuhp_kick_ap_alive,
2129  	},
2130  
2131  	/*
2132  	 * Waits for the AP to reach cpuhp_ap_sync_alive() and then
2133  	 * releases it for the complete bringup.
2134  	 */
2135  	[CPUHP_BRINGUP_CPU] = {
2136  		.name			= "cpu:bringup",
2137  		.startup.single		= cpuhp_bringup_ap,
2138  		.teardown.single	= finish_cpu,
2139  		.cant_stop		= true,
2140  	},
2141  #else
2142  	/*
2143  	 * All-in-one CPU bringup state which includes the kick alive.
2144  	 */
2145  	[CPUHP_BRINGUP_CPU] = {
2146  		.name			= "cpu:bringup",
2147  		.startup.single		= bringup_cpu,
2148  		.teardown.single	= finish_cpu,
2149  		.cant_stop		= true,
2150  	},
2151  #endif
2152  	/* Final state before CPU kills itself */
2153  	[CPUHP_AP_IDLE_DEAD] = {
2154  		.name			= "idle:dead",
2155  	},
2156  	/*
2157  	 * Last state before CPU enters the idle loop to die. Transient state
2158  	 * for synchronization.
2159  	 */
2160  	[CPUHP_AP_OFFLINE] = {
2161  		.name			= "ap:offline",
2162  		.cant_stop		= true,
2163  	},
2164  	/* First state is scheduler control. Interrupts are disabled */
2165  	[CPUHP_AP_SCHED_STARTING] = {
2166  		.name			= "sched:starting",
2167  		.startup.single		= sched_cpu_starting,
2168  		.teardown.single	= sched_cpu_dying,
2169  	},
2170  	[CPUHP_AP_RCUTREE_DYING] = {
2171  		.name			= "RCU/tree:dying",
2172  		.startup.single		= NULL,
2173  		.teardown.single	= rcutree_dying_cpu,
2174  	},
2175  	[CPUHP_AP_SMPCFD_DYING] = {
2176  		.name			= "smpcfd:dying",
2177  		.startup.single		= NULL,
2178  		.teardown.single	= smpcfd_dying_cpu,
2179  	},
2180  	[CPUHP_AP_HRTIMERS_DYING] = {
2181  		.name			= "hrtimers:dying",
2182  		.startup.single		= NULL,
2183  		.teardown.single	= hrtimers_cpu_dying,
2184  	},
2185  	[CPUHP_AP_TICK_DYING] = {
2186  		.name			= "tick:dying",
2187  		.startup.single		= NULL,
2188  		.teardown.single	= tick_cpu_dying,
2189  	},
2190  	/* Entry state on starting. Interrupts enabled from here on. Transient
2191  	 * state for synchronsization */
2192  	[CPUHP_AP_ONLINE] = {
2193  		.name			= "ap:online",
2194  	},
2195  	/*
2196  	 * Handled on control processor until the plugged processor manages
2197  	 * this itself.
2198  	 */
2199  	[CPUHP_TEARDOWN_CPU] = {
2200  		.name			= "cpu:teardown",
2201  		.startup.single		= NULL,
2202  		.teardown.single	= takedown_cpu,
2203  		.cant_stop		= true,
2204  	},
2205  
2206  	[CPUHP_AP_SCHED_WAIT_EMPTY] = {
2207  		.name			= "sched:waitempty",
2208  		.startup.single		= NULL,
2209  		.teardown.single	= sched_cpu_wait_empty,
2210  	},
2211  
2212  	/* Handle smpboot threads park/unpark */
2213  	[CPUHP_AP_SMPBOOT_THREADS] = {
2214  		.name			= "smpboot/threads:online",
2215  		.startup.single		= smpboot_unpark_threads,
2216  		.teardown.single	= smpboot_park_threads,
2217  	},
2218  	[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
2219  		.name			= "irq/affinity:online",
2220  		.startup.single		= irq_affinity_online_cpu,
2221  		.teardown.single	= NULL,
2222  	},
2223  	[CPUHP_AP_PERF_ONLINE] = {
2224  		.name			= "perf:online",
2225  		.startup.single		= perf_event_init_cpu,
2226  		.teardown.single	= perf_event_exit_cpu,
2227  	},
2228  	[CPUHP_AP_WATCHDOG_ONLINE] = {
2229  		.name			= "lockup_detector:online",
2230  		.startup.single		= lockup_detector_online_cpu,
2231  		.teardown.single	= lockup_detector_offline_cpu,
2232  	},
2233  	[CPUHP_AP_WORKQUEUE_ONLINE] = {
2234  		.name			= "workqueue:online",
2235  		.startup.single		= workqueue_online_cpu,
2236  		.teardown.single	= workqueue_offline_cpu,
2237  	},
2238  	[CPUHP_AP_RANDOM_ONLINE] = {
2239  		.name			= "random:online",
2240  		.startup.single		= random_online_cpu,
2241  		.teardown.single	= NULL,
2242  	},
2243  	[CPUHP_AP_RCUTREE_ONLINE] = {
2244  		.name			= "RCU/tree:online",
2245  		.startup.single		= rcutree_online_cpu,
2246  		.teardown.single	= rcutree_offline_cpu,
2247  	},
2248  #endif
2249  	/*
2250  	 * The dynamically registered state space is here
2251  	 */
2252  
2253  #ifdef CONFIG_SMP
2254  	/* Last state is scheduler control setting the cpu active */
2255  	[CPUHP_AP_ACTIVE] = {
2256  		.name			= "sched:active",
2257  		.startup.single		= sched_cpu_activate,
2258  		.teardown.single	= sched_cpu_deactivate,
2259  	},
2260  #endif
2261  
2262  	/* CPU is fully up and running. */
2263  	[CPUHP_ONLINE] = {
2264  		.name			= "online",
2265  		.startup.single		= NULL,
2266  		.teardown.single	= NULL,
2267  	},
2268  };
2269  
2270  /* Sanity check for callbacks */
2271  static int cpuhp_cb_check(enum cpuhp_state state)
2272  {
2273  	if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
2274  		return -EINVAL;
2275  	return 0;
2276  }
2277  
2278  /*
2279   * Returns a free for dynamic slot assignment of the Online state. The states
2280   * are protected by the cpuhp_slot_states mutex and an empty slot is identified
2281   * by having no name assigned.
2282   */
2283  static int cpuhp_reserve_state(enum cpuhp_state state)
2284  {
2285  	enum cpuhp_state i, end;
2286  	struct cpuhp_step *step;
2287  
2288  	switch (state) {
2289  	case CPUHP_AP_ONLINE_DYN:
2290  		step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
2291  		end = CPUHP_AP_ONLINE_DYN_END;
2292  		break;
2293  	case CPUHP_BP_PREPARE_DYN:
2294  		step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
2295  		end = CPUHP_BP_PREPARE_DYN_END;
2296  		break;
2297  	default:
2298  		return -EINVAL;
2299  	}
2300  
2301  	for (i = state; i <= end; i++, step++) {
2302  		if (!step->name)
2303  			return i;
2304  	}
2305  	WARN(1, "No more dynamic states available for CPU hotplug\n");
2306  	return -ENOSPC;
2307  }
2308  
2309  static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
2310  				 int (*startup)(unsigned int cpu),
2311  				 int (*teardown)(unsigned int cpu),
2312  				 bool multi_instance)
2313  {
2314  	/* (Un)Install the callbacks for further cpu hotplug operations */
2315  	struct cpuhp_step *sp;
2316  	int ret = 0;
2317  
2318  	/*
2319  	 * If name is NULL, then the state gets removed.
2320  	 *
2321  	 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
2322  	 * the first allocation from these dynamic ranges, so the removal
2323  	 * would trigger a new allocation and clear the wrong (already
2324  	 * empty) state, leaving the callbacks of the to be cleared state
2325  	 * dangling, which causes wreckage on the next hotplug operation.
2326  	 */
2327  	if (name && (state == CPUHP_AP_ONLINE_DYN ||
2328  		     state == CPUHP_BP_PREPARE_DYN)) {
2329  		ret = cpuhp_reserve_state(state);
2330  		if (ret < 0)
2331  			return ret;
2332  		state = ret;
2333  	}
2334  	sp = cpuhp_get_step(state);
2335  	if (name && sp->name)
2336  		return -EBUSY;
2337  
2338  	sp->startup.single = startup;
2339  	sp->teardown.single = teardown;
2340  	sp->name = name;
2341  	sp->multi_instance = multi_instance;
2342  	INIT_HLIST_HEAD(&sp->list);
2343  	return ret;
2344  }
2345  
2346  static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
2347  {
2348  	return cpuhp_get_step(state)->teardown.single;
2349  }
2350  
2351  /*
2352   * Call the startup/teardown function for a step either on the AP or
2353   * on the current CPU.
2354   */
2355  static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
2356  			    struct hlist_node *node)
2357  {
2358  	struct cpuhp_step *sp = cpuhp_get_step(state);
2359  	int ret;
2360  
2361  	/*
2362  	 * If there's nothing to do, we done.
2363  	 * Relies on the union for multi_instance.
2364  	 */
2365  	if (cpuhp_step_empty(bringup, sp))
2366  		return 0;
2367  	/*
2368  	 * The non AP bound callbacks can fail on bringup. On teardown
2369  	 * e.g. module removal we crash for now.
2370  	 */
2371  #ifdef CONFIG_SMP
2372  	if (cpuhp_is_ap_state(state))
2373  		ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
2374  	else
2375  		ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2376  #else
2377  	ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2378  #endif
2379  	BUG_ON(ret && !bringup);
2380  	return ret;
2381  }
2382  
2383  /*
2384   * Called from __cpuhp_setup_state on a recoverable failure.
2385   *
2386   * Note: The teardown callbacks for rollback are not allowed to fail!
2387   */
2388  static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
2389  				   struct hlist_node *node)
2390  {
2391  	int cpu;
2392  
2393  	/* Roll back the already executed steps on the other cpus */
2394  	for_each_present_cpu(cpu) {
2395  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2396  		int cpustate = st->state;
2397  
2398  		if (cpu >= failedcpu)
2399  			break;
2400  
2401  		/* Did we invoke the startup call on that cpu ? */
2402  		if (cpustate >= state)
2403  			cpuhp_issue_call(cpu, state, false, node);
2404  	}
2405  }
2406  
2407  int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
2408  					  struct hlist_node *node,
2409  					  bool invoke)
2410  {
2411  	struct cpuhp_step *sp;
2412  	int cpu;
2413  	int ret;
2414  
2415  	lockdep_assert_cpus_held();
2416  
2417  	sp = cpuhp_get_step(state);
2418  	if (sp->multi_instance == false)
2419  		return -EINVAL;
2420  
2421  	mutex_lock(&cpuhp_state_mutex);
2422  
2423  	if (!invoke || !sp->startup.multi)
2424  		goto add_node;
2425  
2426  	/*
2427  	 * Try to call the startup callback for each present cpu
2428  	 * depending on the hotplug state of the cpu.
2429  	 */
2430  	for_each_present_cpu(cpu) {
2431  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2432  		int cpustate = st->state;
2433  
2434  		if (cpustate < state)
2435  			continue;
2436  
2437  		ret = cpuhp_issue_call(cpu, state, true, node);
2438  		if (ret) {
2439  			if (sp->teardown.multi)
2440  				cpuhp_rollback_install(cpu, state, node);
2441  			goto unlock;
2442  		}
2443  	}
2444  add_node:
2445  	ret = 0;
2446  	hlist_add_head(node, &sp->list);
2447  unlock:
2448  	mutex_unlock(&cpuhp_state_mutex);
2449  	return ret;
2450  }
2451  
2452  int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
2453  			       bool invoke)
2454  {
2455  	int ret;
2456  
2457  	cpus_read_lock();
2458  	ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
2459  	cpus_read_unlock();
2460  	return ret;
2461  }
2462  EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
2463  
2464  /**
2465   * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2466   * @state:		The state to setup
2467   * @name:		Name of the step
2468   * @invoke:		If true, the startup function is invoked for cpus where
2469   *			cpu state >= @state
2470   * @startup:		startup callback function
2471   * @teardown:		teardown callback function
2472   * @multi_instance:	State is set up for multiple instances which get
2473   *			added afterwards.
2474   *
2475   * The caller needs to hold cpus read locked while calling this function.
2476   * Return:
2477   *   On success:
2478   *      Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN;
2479   *      0 for all other states
2480   *   On failure: proper (negative) error code
2481   */
2482  int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
2483  				   const char *name, bool invoke,
2484  				   int (*startup)(unsigned int cpu),
2485  				   int (*teardown)(unsigned int cpu),
2486  				   bool multi_instance)
2487  {
2488  	int cpu, ret = 0;
2489  	bool dynstate;
2490  
2491  	lockdep_assert_cpus_held();
2492  
2493  	if (cpuhp_cb_check(state) || !name)
2494  		return -EINVAL;
2495  
2496  	mutex_lock(&cpuhp_state_mutex);
2497  
2498  	ret = cpuhp_store_callbacks(state, name, startup, teardown,
2499  				    multi_instance);
2500  
2501  	dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN;
2502  	if (ret > 0 && dynstate) {
2503  		state = ret;
2504  		ret = 0;
2505  	}
2506  
2507  	if (ret || !invoke || !startup)
2508  		goto out;
2509  
2510  	/*
2511  	 * Try to call the startup callback for each present cpu
2512  	 * depending on the hotplug state of the cpu.
2513  	 */
2514  	for_each_present_cpu(cpu) {
2515  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2516  		int cpustate = st->state;
2517  
2518  		if (cpustate < state)
2519  			continue;
2520  
2521  		ret = cpuhp_issue_call(cpu, state, true, NULL);
2522  		if (ret) {
2523  			if (teardown)
2524  				cpuhp_rollback_install(cpu, state, NULL);
2525  			cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2526  			goto out;
2527  		}
2528  	}
2529  out:
2530  	mutex_unlock(&cpuhp_state_mutex);
2531  	/*
2532  	 * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN,
2533  	 * return the dynamically allocated state in case of success.
2534  	 */
2535  	if (!ret && dynstate)
2536  		return state;
2537  	return ret;
2538  }
2539  EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
2540  
2541  int __cpuhp_setup_state(enum cpuhp_state state,
2542  			const char *name, bool invoke,
2543  			int (*startup)(unsigned int cpu),
2544  			int (*teardown)(unsigned int cpu),
2545  			bool multi_instance)
2546  {
2547  	int ret;
2548  
2549  	cpus_read_lock();
2550  	ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2551  					     teardown, multi_instance);
2552  	cpus_read_unlock();
2553  	return ret;
2554  }
2555  EXPORT_SYMBOL(__cpuhp_setup_state);
2556  
2557  int __cpuhp_state_remove_instance(enum cpuhp_state state,
2558  				  struct hlist_node *node, bool invoke)
2559  {
2560  	struct cpuhp_step *sp = cpuhp_get_step(state);
2561  	int cpu;
2562  
2563  	BUG_ON(cpuhp_cb_check(state));
2564  
2565  	if (!sp->multi_instance)
2566  		return -EINVAL;
2567  
2568  	cpus_read_lock();
2569  	mutex_lock(&cpuhp_state_mutex);
2570  
2571  	if (!invoke || !cpuhp_get_teardown_cb(state))
2572  		goto remove;
2573  	/*
2574  	 * Call the teardown callback for each present cpu depending
2575  	 * on the hotplug state of the cpu. This function is not
2576  	 * allowed to fail currently!
2577  	 */
2578  	for_each_present_cpu(cpu) {
2579  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2580  		int cpustate = st->state;
2581  
2582  		if (cpustate >= state)
2583  			cpuhp_issue_call(cpu, state, false, node);
2584  	}
2585  
2586  remove:
2587  	hlist_del(node);
2588  	mutex_unlock(&cpuhp_state_mutex);
2589  	cpus_read_unlock();
2590  
2591  	return 0;
2592  }
2593  EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2594  
2595  /**
2596   * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2597   * @state:	The state to remove
2598   * @invoke:	If true, the teardown function is invoked for cpus where
2599   *		cpu state >= @state
2600   *
2601   * The caller needs to hold cpus read locked while calling this function.
2602   * The teardown callback is currently not allowed to fail. Think
2603   * about module removal!
2604   */
2605  void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2606  {
2607  	struct cpuhp_step *sp = cpuhp_get_step(state);
2608  	int cpu;
2609  
2610  	BUG_ON(cpuhp_cb_check(state));
2611  
2612  	lockdep_assert_cpus_held();
2613  
2614  	mutex_lock(&cpuhp_state_mutex);
2615  	if (sp->multi_instance) {
2616  		WARN(!hlist_empty(&sp->list),
2617  		     "Error: Removing state %d which has instances left.\n",
2618  		     state);
2619  		goto remove;
2620  	}
2621  
2622  	if (!invoke || !cpuhp_get_teardown_cb(state))
2623  		goto remove;
2624  
2625  	/*
2626  	 * Call the teardown callback for each present cpu depending
2627  	 * on the hotplug state of the cpu. This function is not
2628  	 * allowed to fail currently!
2629  	 */
2630  	for_each_present_cpu(cpu) {
2631  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2632  		int cpustate = st->state;
2633  
2634  		if (cpustate >= state)
2635  			cpuhp_issue_call(cpu, state, false, NULL);
2636  	}
2637  remove:
2638  	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2639  	mutex_unlock(&cpuhp_state_mutex);
2640  }
2641  EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2642  
2643  void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2644  {
2645  	cpus_read_lock();
2646  	__cpuhp_remove_state_cpuslocked(state, invoke);
2647  	cpus_read_unlock();
2648  }
2649  EXPORT_SYMBOL(__cpuhp_remove_state);
2650  
2651  #ifdef CONFIG_HOTPLUG_SMT
2652  static void cpuhp_offline_cpu_device(unsigned int cpu)
2653  {
2654  	struct device *dev = get_cpu_device(cpu);
2655  
2656  	dev->offline = true;
2657  	/* Tell user space about the state change */
2658  	kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2659  }
2660  
2661  static void cpuhp_online_cpu_device(unsigned int cpu)
2662  {
2663  	struct device *dev = get_cpu_device(cpu);
2664  
2665  	dev->offline = false;
2666  	/* Tell user space about the state change */
2667  	kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2668  }
2669  
2670  int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2671  {
2672  	int cpu, ret = 0;
2673  
2674  	cpu_maps_update_begin();
2675  	for_each_online_cpu(cpu) {
2676  		if (topology_is_primary_thread(cpu))
2677  			continue;
2678  		/*
2679  		 * Disable can be called with CPU_SMT_ENABLED when changing
2680  		 * from a higher to lower number of SMT threads per core.
2681  		 */
2682  		if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
2683  			continue;
2684  		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2685  		if (ret)
2686  			break;
2687  		/*
2688  		 * As this needs to hold the cpu maps lock it's impossible
2689  		 * to call device_offline() because that ends up calling
2690  		 * cpu_down() which takes cpu maps lock. cpu maps lock
2691  		 * needs to be held as this might race against in kernel
2692  		 * abusers of the hotplug machinery (thermal management).
2693  		 *
2694  		 * So nothing would update device:offline state. That would
2695  		 * leave the sysfs entry stale and prevent onlining after
2696  		 * smt control has been changed to 'off' again. This is
2697  		 * called under the sysfs hotplug lock, so it is properly
2698  		 * serialized against the regular offline usage.
2699  		 */
2700  		cpuhp_offline_cpu_device(cpu);
2701  	}
2702  	if (!ret)
2703  		cpu_smt_control = ctrlval;
2704  	cpu_maps_update_done();
2705  	return ret;
2706  }
2707  
2708  /* Check if the core a CPU belongs to is online */
2709  #if !defined(topology_is_core_online)
2710  static inline bool topology_is_core_online(unsigned int cpu)
2711  {
2712  	return true;
2713  }
2714  #endif
2715  
2716  int cpuhp_smt_enable(void)
2717  {
2718  	int cpu, ret = 0;
2719  
2720  	cpu_maps_update_begin();
2721  	cpu_smt_control = CPU_SMT_ENABLED;
2722  	for_each_present_cpu(cpu) {
2723  		/* Skip online CPUs and CPUs on offline nodes */
2724  		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2725  			continue;
2726  		if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu))
2727  			continue;
2728  		ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2729  		if (ret)
2730  			break;
2731  		/* See comment in cpuhp_smt_disable() */
2732  		cpuhp_online_cpu_device(cpu);
2733  	}
2734  	cpu_maps_update_done();
2735  	return ret;
2736  }
2737  #endif
2738  
2739  #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2740  static ssize_t state_show(struct device *dev,
2741  			  struct device_attribute *attr, char *buf)
2742  {
2743  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2744  
2745  	return sprintf(buf, "%d\n", st->state);
2746  }
2747  static DEVICE_ATTR_RO(state);
2748  
2749  static ssize_t target_store(struct device *dev, struct device_attribute *attr,
2750  			    const char *buf, size_t count)
2751  {
2752  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2753  	struct cpuhp_step *sp;
2754  	int target, ret;
2755  
2756  	ret = kstrtoint(buf, 10, &target);
2757  	if (ret)
2758  		return ret;
2759  
2760  #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2761  	if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2762  		return -EINVAL;
2763  #else
2764  	if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2765  		return -EINVAL;
2766  #endif
2767  
2768  	ret = lock_device_hotplug_sysfs();
2769  	if (ret)
2770  		return ret;
2771  
2772  	mutex_lock(&cpuhp_state_mutex);
2773  	sp = cpuhp_get_step(target);
2774  	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2775  	mutex_unlock(&cpuhp_state_mutex);
2776  	if (ret)
2777  		goto out;
2778  
2779  	if (st->state < target)
2780  		ret = cpu_up(dev->id, target);
2781  	else if (st->state > target)
2782  		ret = cpu_down(dev->id, target);
2783  	else if (WARN_ON(st->target != target))
2784  		st->target = target;
2785  out:
2786  	unlock_device_hotplug();
2787  	return ret ? ret : count;
2788  }
2789  
2790  static ssize_t target_show(struct device *dev,
2791  			   struct device_attribute *attr, char *buf)
2792  {
2793  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2794  
2795  	return sprintf(buf, "%d\n", st->target);
2796  }
2797  static DEVICE_ATTR_RW(target);
2798  
2799  static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
2800  			  const char *buf, size_t count)
2801  {
2802  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2803  	struct cpuhp_step *sp;
2804  	int fail, ret;
2805  
2806  	ret = kstrtoint(buf, 10, &fail);
2807  	if (ret)
2808  		return ret;
2809  
2810  	if (fail == CPUHP_INVALID) {
2811  		st->fail = fail;
2812  		return count;
2813  	}
2814  
2815  	if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2816  		return -EINVAL;
2817  
2818  	/*
2819  	 * Cannot fail STARTING/DYING callbacks.
2820  	 */
2821  	if (cpuhp_is_atomic_state(fail))
2822  		return -EINVAL;
2823  
2824  	/*
2825  	 * DEAD callbacks cannot fail...
2826  	 * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
2827  	 * triggering STARTING callbacks, a failure in this state would
2828  	 * hinder rollback.
2829  	 */
2830  	if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
2831  		return -EINVAL;
2832  
2833  	/*
2834  	 * Cannot fail anything that doesn't have callbacks.
2835  	 */
2836  	mutex_lock(&cpuhp_state_mutex);
2837  	sp = cpuhp_get_step(fail);
2838  	if (!sp->startup.single && !sp->teardown.single)
2839  		ret = -EINVAL;
2840  	mutex_unlock(&cpuhp_state_mutex);
2841  	if (ret)
2842  		return ret;
2843  
2844  	st->fail = fail;
2845  
2846  	return count;
2847  }
2848  
2849  static ssize_t fail_show(struct device *dev,
2850  			 struct device_attribute *attr, char *buf)
2851  {
2852  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2853  
2854  	return sprintf(buf, "%d\n", st->fail);
2855  }
2856  
2857  static DEVICE_ATTR_RW(fail);
2858  
2859  static struct attribute *cpuhp_cpu_attrs[] = {
2860  	&dev_attr_state.attr,
2861  	&dev_attr_target.attr,
2862  	&dev_attr_fail.attr,
2863  	NULL
2864  };
2865  
2866  static const struct attribute_group cpuhp_cpu_attr_group = {
2867  	.attrs = cpuhp_cpu_attrs,
2868  	.name = "hotplug",
2869  };
2870  
2871  static ssize_t states_show(struct device *dev,
2872  				 struct device_attribute *attr, char *buf)
2873  {
2874  	ssize_t cur, res = 0;
2875  	int i;
2876  
2877  	mutex_lock(&cpuhp_state_mutex);
2878  	for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2879  		struct cpuhp_step *sp = cpuhp_get_step(i);
2880  
2881  		if (sp->name) {
2882  			cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2883  			buf += cur;
2884  			res += cur;
2885  		}
2886  	}
2887  	mutex_unlock(&cpuhp_state_mutex);
2888  	return res;
2889  }
2890  static DEVICE_ATTR_RO(states);
2891  
2892  static struct attribute *cpuhp_cpu_root_attrs[] = {
2893  	&dev_attr_states.attr,
2894  	NULL
2895  };
2896  
2897  static const struct attribute_group cpuhp_cpu_root_attr_group = {
2898  	.attrs = cpuhp_cpu_root_attrs,
2899  	.name = "hotplug",
2900  };
2901  
2902  #ifdef CONFIG_HOTPLUG_SMT
2903  
2904  static bool cpu_smt_num_threads_valid(unsigned int threads)
2905  {
2906  	if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC))
2907  		return threads >= 1 && threads <= cpu_smt_max_threads;
2908  	return threads == 1 || threads == cpu_smt_max_threads;
2909  }
2910  
2911  static ssize_t
2912  __store_smt_control(struct device *dev, struct device_attribute *attr,
2913  		    const char *buf, size_t count)
2914  {
2915  	int ctrlval, ret, num_threads, orig_threads;
2916  	bool force_off;
2917  
2918  	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2919  		return -EPERM;
2920  
2921  	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2922  		return -ENODEV;
2923  
2924  	if (sysfs_streq(buf, "on")) {
2925  		ctrlval = CPU_SMT_ENABLED;
2926  		num_threads = cpu_smt_max_threads;
2927  	} else if (sysfs_streq(buf, "off")) {
2928  		ctrlval = CPU_SMT_DISABLED;
2929  		num_threads = 1;
2930  	} else if (sysfs_streq(buf, "forceoff")) {
2931  		ctrlval = CPU_SMT_FORCE_DISABLED;
2932  		num_threads = 1;
2933  	} else if (kstrtoint(buf, 10, &num_threads) == 0) {
2934  		if (num_threads == 1)
2935  			ctrlval = CPU_SMT_DISABLED;
2936  		else if (cpu_smt_num_threads_valid(num_threads))
2937  			ctrlval = CPU_SMT_ENABLED;
2938  		else
2939  			return -EINVAL;
2940  	} else {
2941  		return -EINVAL;
2942  	}
2943  
2944  	ret = lock_device_hotplug_sysfs();
2945  	if (ret)
2946  		return ret;
2947  
2948  	orig_threads = cpu_smt_num_threads;
2949  	cpu_smt_num_threads = num_threads;
2950  
2951  	force_off = ctrlval != cpu_smt_control && ctrlval == CPU_SMT_FORCE_DISABLED;
2952  
2953  	if (num_threads > orig_threads)
2954  		ret = cpuhp_smt_enable();
2955  	else if (num_threads < orig_threads || force_off)
2956  		ret = cpuhp_smt_disable(ctrlval);
2957  
2958  	unlock_device_hotplug();
2959  	return ret ? ret : count;
2960  }
2961  
2962  #else /* !CONFIG_HOTPLUG_SMT */
2963  static ssize_t
2964  __store_smt_control(struct device *dev, struct device_attribute *attr,
2965  		    const char *buf, size_t count)
2966  {
2967  	return -ENODEV;
2968  }
2969  #endif /* CONFIG_HOTPLUG_SMT */
2970  
2971  static const char *smt_states[] = {
2972  	[CPU_SMT_ENABLED]		= "on",
2973  	[CPU_SMT_DISABLED]		= "off",
2974  	[CPU_SMT_FORCE_DISABLED]	= "forceoff",
2975  	[CPU_SMT_NOT_SUPPORTED]		= "notsupported",
2976  	[CPU_SMT_NOT_IMPLEMENTED]	= "notimplemented",
2977  };
2978  
2979  static ssize_t control_show(struct device *dev,
2980  			    struct device_attribute *attr, char *buf)
2981  {
2982  	const char *state = smt_states[cpu_smt_control];
2983  
2984  #ifdef CONFIG_HOTPLUG_SMT
2985  	/*
2986  	 * If SMT is enabled but not all threads are enabled then show the
2987  	 * number of threads. If all threads are enabled show "on". Otherwise
2988  	 * show the state name.
2989  	 */
2990  	if (cpu_smt_control == CPU_SMT_ENABLED &&
2991  	    cpu_smt_num_threads != cpu_smt_max_threads)
2992  		return sysfs_emit(buf, "%d\n", cpu_smt_num_threads);
2993  #endif
2994  
2995  	return sysfs_emit(buf, "%s\n", state);
2996  }
2997  
2998  static ssize_t control_store(struct device *dev, struct device_attribute *attr,
2999  			     const char *buf, size_t count)
3000  {
3001  	return __store_smt_control(dev, attr, buf, count);
3002  }
3003  static DEVICE_ATTR_RW(control);
3004  
3005  static ssize_t active_show(struct device *dev,
3006  			   struct device_attribute *attr, char *buf)
3007  {
3008  	return sysfs_emit(buf, "%d\n", sched_smt_active());
3009  }
3010  static DEVICE_ATTR_RO(active);
3011  
3012  static struct attribute *cpuhp_smt_attrs[] = {
3013  	&dev_attr_control.attr,
3014  	&dev_attr_active.attr,
3015  	NULL
3016  };
3017  
3018  static const struct attribute_group cpuhp_smt_attr_group = {
3019  	.attrs = cpuhp_smt_attrs,
3020  	.name = "smt",
3021  };
3022  
3023  static int __init cpu_smt_sysfs_init(void)
3024  {
3025  	struct device *dev_root;
3026  	int ret = -ENODEV;
3027  
3028  	dev_root = bus_get_dev_root(&cpu_subsys);
3029  	if (dev_root) {
3030  		ret = sysfs_create_group(&dev_root->kobj, &cpuhp_smt_attr_group);
3031  		put_device(dev_root);
3032  	}
3033  	return ret;
3034  }
3035  
3036  static int __init cpuhp_sysfs_init(void)
3037  {
3038  	struct device *dev_root;
3039  	int cpu, ret;
3040  
3041  	ret = cpu_smt_sysfs_init();
3042  	if (ret)
3043  		return ret;
3044  
3045  	dev_root = bus_get_dev_root(&cpu_subsys);
3046  	if (dev_root) {
3047  		ret = sysfs_create_group(&dev_root->kobj, &cpuhp_cpu_root_attr_group);
3048  		put_device(dev_root);
3049  		if (ret)
3050  			return ret;
3051  	}
3052  
3053  	for_each_possible_cpu(cpu) {
3054  		struct device *dev = get_cpu_device(cpu);
3055  
3056  		if (!dev)
3057  			continue;
3058  		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
3059  		if (ret)
3060  			return ret;
3061  	}
3062  	return 0;
3063  }
3064  device_initcall(cpuhp_sysfs_init);
3065  #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
3066  
3067  /*
3068   * cpu_bit_bitmap[] is a special, "compressed" data structure that
3069   * represents all NR_CPUS bits binary values of 1<<nr.
3070   *
3071   * It is used by cpumask_of() to get a constant address to a CPU
3072   * mask value that has a single bit set only.
3073   */
3074  
3075  /* cpu_bit_bitmap[0] is empty - so we can back into it */
3076  #define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
3077  #define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
3078  #define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
3079  #define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
3080  
3081  const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
3082  
3083  	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
3084  	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
3085  #if BITS_PER_LONG > 32
3086  	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
3087  	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
3088  #endif
3089  };
3090  EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
3091  
3092  const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
3093  EXPORT_SYMBOL(cpu_all_bits);
3094  
3095  #ifdef CONFIG_INIT_ALL_POSSIBLE
3096  struct cpumask __cpu_possible_mask __ro_after_init
3097  	= {CPU_BITS_ALL};
3098  #else
3099  struct cpumask __cpu_possible_mask __ro_after_init;
3100  #endif
3101  EXPORT_SYMBOL(__cpu_possible_mask);
3102  
3103  struct cpumask __cpu_online_mask __read_mostly;
3104  EXPORT_SYMBOL(__cpu_online_mask);
3105  
3106  struct cpumask __cpu_enabled_mask __read_mostly;
3107  EXPORT_SYMBOL(__cpu_enabled_mask);
3108  
3109  struct cpumask __cpu_present_mask __read_mostly;
3110  EXPORT_SYMBOL(__cpu_present_mask);
3111  
3112  struct cpumask __cpu_active_mask __read_mostly;
3113  EXPORT_SYMBOL(__cpu_active_mask);
3114  
3115  struct cpumask __cpu_dying_mask __read_mostly;
3116  EXPORT_SYMBOL(__cpu_dying_mask);
3117  
3118  atomic_t __num_online_cpus __read_mostly;
3119  EXPORT_SYMBOL(__num_online_cpus);
3120  
3121  void init_cpu_present(const struct cpumask *src)
3122  {
3123  	cpumask_copy(&__cpu_present_mask, src);
3124  }
3125  
3126  void init_cpu_possible(const struct cpumask *src)
3127  {
3128  	cpumask_copy(&__cpu_possible_mask, src);
3129  }
3130  
3131  void init_cpu_online(const struct cpumask *src)
3132  {
3133  	cpumask_copy(&__cpu_online_mask, src);
3134  }
3135  
3136  void set_cpu_online(unsigned int cpu, bool online)
3137  {
3138  	/*
3139  	 * atomic_inc/dec() is required to handle the horrid abuse of this
3140  	 * function by the reboot and kexec code which invoke it from
3141  	 * IPI/NMI broadcasts when shutting down CPUs. Invocation from
3142  	 * regular CPU hotplug is properly serialized.
3143  	 *
3144  	 * Note, that the fact that __num_online_cpus is of type atomic_t
3145  	 * does not protect readers which are not serialized against
3146  	 * concurrent hotplug operations.
3147  	 */
3148  	if (online) {
3149  		if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
3150  			atomic_inc(&__num_online_cpus);
3151  	} else {
3152  		if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
3153  			atomic_dec(&__num_online_cpus);
3154  	}
3155  }
3156  
3157  /*
3158   * Activate the first processor.
3159   */
3160  void __init boot_cpu_init(void)
3161  {
3162  	int cpu = smp_processor_id();
3163  
3164  	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
3165  	set_cpu_online(cpu, true);
3166  	set_cpu_active(cpu, true);
3167  	set_cpu_present(cpu, true);
3168  	set_cpu_possible(cpu, true);
3169  
3170  #ifdef CONFIG_SMP
3171  	__boot_cpu_id = cpu;
3172  #endif
3173  }
3174  
3175  /*
3176   * Must be called _AFTER_ setting up the per_cpu areas
3177   */
3178  void __init boot_cpu_hotplug_init(void)
3179  {
3180  #ifdef CONFIG_SMP
3181  	cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
3182  	atomic_set(this_cpu_ptr(&cpuhp_state.ap_sync_state), SYNC_STATE_ONLINE);
3183  #endif
3184  	this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
3185  	this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
3186  }
3187  
3188  #ifdef CONFIG_CPU_MITIGATIONS
3189  /*
3190   * These are used for a global "mitigations=" cmdline option for toggling
3191   * optional CPU mitigations.
3192   */
3193  enum cpu_mitigations {
3194  	CPU_MITIGATIONS_OFF,
3195  	CPU_MITIGATIONS_AUTO,
3196  	CPU_MITIGATIONS_AUTO_NOSMT,
3197  };
3198  
3199  static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
3200  
3201  static int __init mitigations_parse_cmdline(char *arg)
3202  {
3203  	if (!strcmp(arg, "off"))
3204  		cpu_mitigations = CPU_MITIGATIONS_OFF;
3205  	else if (!strcmp(arg, "auto"))
3206  		cpu_mitigations = CPU_MITIGATIONS_AUTO;
3207  	else if (!strcmp(arg, "auto,nosmt"))
3208  		cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
3209  	else
3210  		pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
3211  			arg);
3212  
3213  	return 0;
3214  }
3215  
3216  /* mitigations=off */
3217  bool cpu_mitigations_off(void)
3218  {
3219  	return cpu_mitigations == CPU_MITIGATIONS_OFF;
3220  }
3221  EXPORT_SYMBOL_GPL(cpu_mitigations_off);
3222  
3223  /* mitigations=auto,nosmt */
3224  bool cpu_mitigations_auto_nosmt(void)
3225  {
3226  	return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
3227  }
3228  EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
3229  #else
3230  static int __init mitigations_parse_cmdline(char *arg)
3231  {
3232  	pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n");
3233  	return 0;
3234  }
3235  #endif
3236  early_param("mitigations", mitigations_parse_cmdline);
3237