Lines Matching +full:cpu +full:- +full:nr

1 // SPDX-License-Identifier: GPL-2.0
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
34 #include <linux/cpu.h>
40 #include <asm/access-regs.h>
41 #include <asm/asm-offsets.h>
82 * Pointer to the pcpu area of the boot CPU. This is required when a restart
83 * interrupt is triggered on an offline CPU. For that case accessing percpu
99 static unsigned int smp_max_threads __initdata = -1U;
135 cc = __pcpu_sigp(pcpu->address, order, parm, NULL); in pcpu_sigp_retry()
148 if (__pcpu_sigp(pcpu->address, SIGP_SENSE, in pcpu_stopped()
156 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, in pcpu_running()
159 /* Status stored condition code is equivalent to cpu not running. */ in pcpu_running()
164 * Find struct pcpu by cpu address.
168 int cpu; in pcpu_find_address() local
170 for_each_cpu(cpu, mask) in pcpu_find_address()
171 if (per_cpu(pcpu_devices, cpu).address == address) in pcpu_find_address()
172 return &per_cpu(pcpu_devices, cpu); in pcpu_find_address()
178 if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) in pcpu_ec_call()
180 pcpu->ec_clk = get_tod_clock_fast(); in pcpu_ec_call()
184 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) in pcpu_alloc_lowcore() argument
196 memset((char *) lc + 512, 0, sizeof(*lc) - 512); in pcpu_alloc_lowcore()
197 lc->async_stack = async_stack + STACK_INIT_OFFSET; in pcpu_alloc_lowcore()
198 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET; in pcpu_alloc_lowcore()
199 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET; in pcpu_alloc_lowcore()
200 lc->cpu_nr = cpu; in pcpu_alloc_lowcore()
201 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_alloc_lowcore()
202 lc->spinlock_index = 0; in pcpu_alloc_lowcore()
203 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); in pcpu_alloc_lowcore()
204 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); in pcpu_alloc_lowcore()
205 lc->preempt_count = PREEMPT_DISABLED; in pcpu_alloc_lowcore()
206 if (nmi_alloc_mcesa(&lc->mcesad)) in pcpu_alloc_lowcore()
208 if (abs_lowcore_map(cpu, lc, true)) in pcpu_alloc_lowcore()
210 lowcore_ptr[cpu] = lc; in pcpu_alloc_lowcore()
215 nmi_free_mcesa(&lc->mcesad); in pcpu_alloc_lowcore()
221 return -ENOMEM; in pcpu_alloc_lowcore()
224 static void pcpu_free_lowcore(struct pcpu *pcpu, int cpu) in pcpu_free_lowcore() argument
229 lc = lowcore_ptr[cpu]; in pcpu_free_lowcore()
230 nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET; in pcpu_free_lowcore()
231 async_stack = lc->async_stack - STACK_INIT_OFFSET; in pcpu_free_lowcore()
232 mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET; in pcpu_free_lowcore()
234 lowcore_ptr[cpu] = NULL; in pcpu_free_lowcore()
235 abs_lowcore_unmap(cpu); in pcpu_free_lowcore()
236 nmi_free_mcesa(&lc->mcesad); in pcpu_free_lowcore()
243 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) in pcpu_prepare_secondary() argument
247 lc = lowcore_ptr[cpu]; in pcpu_prepare_secondary()
248 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); in pcpu_prepare_secondary()
249 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); in pcpu_prepare_secondary()
250 lc->cpu_nr = cpu; in pcpu_prepare_secondary()
251 lc->pcpu = (unsigned long)pcpu; in pcpu_prepare_secondary()
252 lc->restart_flags = RESTART_FLAG_CTLREGS; in pcpu_prepare_secondary()
253 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_prepare_secondary()
254 lc->spinlock_index = 0; in pcpu_prepare_secondary()
255 lc->percpu_offset = __per_cpu_offset[cpu]; in pcpu_prepare_secondary()
256 lc->kernel_asce = get_lowcore()->kernel_asce; in pcpu_prepare_secondary()
257 lc->user_asce = s390_invalid_asce; in pcpu_prepare_secondary()
258 lc->user_timer = lc->system_timer = in pcpu_prepare_secondary()
259 lc->steal_timer = lc->avg_steal_timer = 0; in pcpu_prepare_secondary()
261 memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area)); in pcpu_prepare_secondary()
263 lc->cregs_save_area[1] = lc->user_asce; in pcpu_prepare_secondary()
264 lc->cregs_save_area[7] = lc->user_asce; in pcpu_prepare_secondary()
265 save_access_regs((unsigned int *) lc->access_regs_save_area); in pcpu_prepare_secondary()
266 arch_spin_lock_setup(cpu); in pcpu_prepare_secondary()
269 static void pcpu_attach_task(int cpu, struct task_struct *tsk) in pcpu_attach_task() argument
273 lc = lowcore_ptr[cpu]; in pcpu_attach_task()
274 lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET; in pcpu_attach_task()
275 lc->current_task = (unsigned long)tsk; in pcpu_attach_task()
276 lc->lpp = LPP_MAGIC; in pcpu_attach_task()
277 lc->current_pid = tsk->pid; in pcpu_attach_task()
278 lc->user_timer = tsk->thread.user_timer; in pcpu_attach_task()
279 lc->guest_timer = tsk->thread.guest_timer; in pcpu_attach_task()
280 lc->system_timer = tsk->thread.system_timer; in pcpu_attach_task()
281 lc->hardirq_timer = tsk->thread.hardirq_timer; in pcpu_attach_task()
282 lc->softirq_timer = tsk->thread.softirq_timer; in pcpu_attach_task()
283 lc->steal_timer = 0; in pcpu_attach_task()
286 static void pcpu_start_fn(int cpu, void (*func)(void *), void *data) in pcpu_start_fn() argument
290 lc = lowcore_ptr[cpu]; in pcpu_start_fn()
291 lc->restart_stack = lc->kernel_stack; in pcpu_start_fn()
292 lc->restart_fn = (unsigned long) func; in pcpu_start_fn()
293 lc->restart_data = (unsigned long) data; in pcpu_start_fn()
294 lc->restart_source = -1U; in pcpu_start_fn()
295 pcpu_sigp_retry(per_cpu_ptr(&pcpu_devices, cpu), SIGP_RESTART, 0); in pcpu_start_fn()
301 * Call function via PSW restart on pcpu and stop the current cpu.
308 static void pcpu_delegate(struct pcpu *pcpu, int cpu, in pcpu_delegate() argument
315 lc = lowcore_ptr[cpu]; in pcpu_delegate()
318 if (pcpu->address == source_cpu) { in pcpu_delegate()
322 /* Stop target cpu (if func returns this stops the current cpu). */ in pcpu_delegate()
325 /* Restart func on the target cpu and stop the current cpu. */ in pcpu_delegate()
327 lc->restart_stack = stack; in pcpu_delegate()
328 lc->restart_fn = (unsigned long)func; in pcpu_delegate()
329 lc->restart_data = (unsigned long)data; in pcpu_delegate()
330 lc->restart_source = source_cpu; in pcpu_delegate()
333 abs_lc->restart_stack = stack; in pcpu_delegate()
334 abs_lc->restart_fn = (unsigned long)func; in pcpu_delegate()
335 abs_lc->restart_data = (unsigned long)data; in pcpu_delegate()
336 abs_lc->restart_source = source_cpu; in pcpu_delegate()
340 "0: sigp 0,%0,%2 # sigp restart to target cpu\n" in pcpu_delegate()
342 "1: sigp 0,%1,%3 # sigp stop to current cpu\n" in pcpu_delegate()
344 : : "d" (pcpu->address), "d" (source_cpu), in pcpu_delegate()
351 * Enable additional logical cpus for multi-threading.
371 * Call function on the ipl CPU.
377 if (ipl_pcpu->address == stap()) in smp_call_ipl_cpu()
380 pcpu_delegate(ipl_pcpu, 0, func, data, lc->nodat_stack); in smp_call_ipl_cpu()
385 int cpu; in smp_find_processor_id() local
387 for_each_present_cpu(cpu) in smp_find_processor_id()
388 if (per_cpu(pcpu_devices, cpu).address == address) in smp_find_processor_id()
389 return cpu; in smp_find_processor_id()
390 return -1; in smp_find_processor_id()
398 bool notrace arch_vcpu_is_preempted(int cpu) in arch_vcpu_is_preempted() argument
400 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu)) in arch_vcpu_is_preempted()
402 if (pcpu_running(per_cpu_ptr(&pcpu_devices, cpu))) in arch_vcpu_is_preempted()
408 void notrace smp_yield_cpu(int cpu) in smp_yield_cpu() argument
414 : : "d" (per_cpu(pcpu_devices, cpu).address)); in smp_yield_cpu()
427 int cpu; in smp_emergency_stop() local
434 for_each_cpu(cpu, &cpumask) { in smp_emergency_stop()
435 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_emergency_stop()
436 set_bit(ec_stop_cpu, &pcpu->ec_mask); in smp_emergency_stop()
437 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, in smp_emergency_stop()
443 for_each_cpu(cpu, &cpumask) in smp_emergency_stop()
444 if (pcpu_stopped(per_cpu_ptr(&pcpu_devices, cpu))) in smp_emergency_stop()
445 cpumask_clear_cpu(cpu, &cpumask); in smp_emergency_stop()
460 int cpu; in smp_send_stop() local
472 for_each_online_cpu(cpu) { in smp_send_stop()
473 if (cpu == smp_processor_id()) in smp_send_stop()
475 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_send_stop()
513 int cpu; in arch_send_call_function_ipi_mask() local
515 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
516 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single); in arch_send_call_function_ipi_mask()
519 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
521 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single); in arch_send_call_function_single_ipi()
525 * this function sends a 'reschedule' IPI to another CPU.
529 void arch_smp_send_reschedule(int cpu) in arch_smp_send_reschedule() argument
531 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_schedule); in arch_smp_send_reschedule()
543 int smp_store_status(int cpu) in smp_store_status() argument
549 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_store_status()
550 lc = lowcore_ptr[cpu]; in smp_store_status()
551 pa = __pa(&lc->floating_pt_save_area); in smp_store_status()
552 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS, in smp_store_status()
554 return -EIO; in smp_store_status()
557 pa = lc->mcesad & MCESA_ORIGIN_MASK; in smp_store_status()
559 pa |= lc->mcesad & MCESA_LC_MASK; in smp_store_status()
560 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, in smp_store_status()
562 return -EIO; in smp_store_status()
567 * Collect CPU state of the previous, crashed system.
571 * The state for all CPUs except the boot CPU needs to be collected
572 * with sigp stop-and-store-status. The boot CPU state is located in
574 * will copy the boot CPU state from the HSA.
575 * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
577 * The state for all CPUs except the boot CPU needs to be collected
578 * with sigp stop-and-store-status. The firmware or the boot-loader
579 * stored the registers of the boot CPU in the absolute lowcore in the
581 * 3) kdump or stand-alone kdump for DASD
583 * The state for all CPUs except the boot CPU needs to be collected
584 * with sigp stop-and-store-status. The kexec code or the boot-loader
585 * stored the registers of the boot CPU in the memory of the old system.
587 * Note that the legacy kdump mode where the old kernel stored the CPU states
627 /* Set multi-threading state to the previous system. */ in smp_save_dump_secondary_cpus()
651 void smp_cpu_set_polarization(int cpu, int val) in smp_cpu_set_polarization() argument
653 per_cpu(pcpu_devices, cpu).polarization = val; in smp_cpu_set_polarization()
656 int smp_cpu_get_polarization(int cpu) in smp_cpu_get_polarization() argument
658 return per_cpu(pcpu_devices, cpu).polarization; in smp_cpu_get_polarization()
661 void smp_cpu_set_capacity(int cpu, unsigned long val) in smp_cpu_set_capacity() argument
663 per_cpu(pcpu_devices, cpu).capacity = val; in smp_cpu_set_capacity()
666 unsigned long smp_cpu_get_capacity(int cpu) in smp_cpu_get_capacity() argument
668 return per_cpu(pcpu_devices, cpu).capacity; in smp_cpu_get_capacity()
671 void smp_set_core_capacity(int cpu, unsigned long val) in smp_set_core_capacity() argument
675 cpu = smp_get_base_cpu(cpu); in smp_set_core_capacity()
676 for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++) in smp_set_core_capacity()
680 int smp_cpu_get_cpu_address(int cpu) in smp_cpu_get_cpu_address() argument
682 return per_cpu(pcpu_devices, cpu).address; in smp_cpu_get_cpu_address()
698 info->core[info->configured].core_id = in smp_get_core_info()
700 info->configured++; in smp_get_core_info()
702 info->combined = info->configured; in smp_get_core_info()
710 int cpu, nr, i; in smp_add_core() local
713 nr = 0; in smp_add_core()
714 if (sclp.has_core_type && core->type != boot_core_type) in smp_add_core()
715 return nr; in smp_add_core()
716 cpu = cpumask_first(avail); in smp_add_core()
717 address = core->core_id << smp_cpu_mt_shift; in smp_add_core()
718 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) { in smp_add_core()
721 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_add_core()
722 pcpu->address = address + i; in smp_add_core()
724 pcpu->state = CPU_STATE_CONFIGURED; in smp_add_core()
726 pcpu->state = CPU_STATE_STANDBY; in smp_add_core()
727 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); in smp_add_core()
728 smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH); in smp_add_core()
729 set_cpu_present(cpu, true); in smp_add_core()
730 if (!early && arch_register_cpu(cpu)) in smp_add_core()
731 set_cpu_present(cpu, false); in smp_add_core()
733 nr++; in smp_add_core()
734 cpumask_clear_cpu(cpu, avail); in smp_add_core()
735 cpu = cpumask_next(cpu, avail); in smp_add_core()
737 return nr; in smp_add_core()
746 int nr, i; in __smp_rescan_cpus() local
750 nr = 0; in __smp_rescan_cpus()
753 * Add IPL core first (which got logical CPU number 0) to make sure in __smp_rescan_cpus()
754 * that all SMT threads get subsequent logical CPU numbers. in __smp_rescan_cpus()
758 for (i = 0; i < info->configured; i++) { in __smp_rescan_cpus()
759 core = &info->core[i]; in __smp_rescan_cpus()
760 if (core->core_id == core_id) { in __smp_rescan_cpus()
761 nr += smp_add_core(core, &avail, true, early); in __smp_rescan_cpus()
766 for (i = 0; i < info->combined; i++) { in __smp_rescan_cpus()
767 configured = i < info->configured; in __smp_rescan_cpus()
768 nr += smp_add_core(&info->core[i], &avail, configured, early); in __smp_rescan_cpus()
772 return nr; in __smp_rescan_cpus()
777 unsigned int cpu, mtid, c_cpus, s_cpus; in smp_detect_cpus() local
781 /* Get CPU information */ in smp_detect_cpus()
784 /* Find boot CPU type */ in smp_detect_cpus()
787 for (cpu = 0; cpu < info->combined; cpu++) in smp_detect_cpus()
788 if (info->core[cpu].core_id == address) { in smp_detect_cpus()
789 /* The boot cpu dictates the cpu type. */ in smp_detect_cpus()
790 boot_core_type = info->core[cpu].type; in smp_detect_cpus()
793 if (cpu >= info->combined) in smp_detect_cpus()
794 panic("Could not find boot CPU type"); in smp_detect_cpus()
797 /* Set multi-threading state for the current system */ in smp_detect_cpus()
799 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1; in smp_detect_cpus()
805 for (cpu = 0; cpu < info->combined; cpu++) { in smp_detect_cpus()
807 info->core[cpu].type != boot_core_type) in smp_detect_cpus()
809 if (cpu < info->configured) in smp_detect_cpus()
824 int cpu = raw_smp_processor_id(); in smp_start_secondary() local
826 lc->last_update_clock = get_tod_clock(); in smp_start_secondary()
827 lc->restart_stack = (unsigned long)restart_stack; in smp_start_secondary()
828 lc->restart_fn = (unsigned long)do_restart; in smp_start_secondary()
829 lc->restart_data = 0; in smp_start_secondary()
830 lc->restart_source = -1U; in smp_start_secondary()
831 lc->restart_flags = 0; in smp_start_secondary()
832 restore_access_regs(lc->access_regs_save_area); in smp_start_secondary()
834 rcutree_report_cpu_starting(cpu); in smp_start_secondary()
839 cpumask_set_cpu(cpu, &cpu_setup_mask); in smp_start_secondary()
841 notify_cpu_starting(cpu); in smp_start_secondary()
842 if (topology_cpu_dedicated(cpu)) in smp_start_secondary()
846 set_cpu_online(cpu, true); in smp_start_secondary()
853 int __cpu_up(unsigned int cpu, struct task_struct *tidle) in __cpu_up() argument
855 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu); in __cpu_up()
858 if (pcpu->state != CPU_STATE_CONFIGURED) in __cpu_up()
859 return -EIO; in __cpu_up()
862 return -EIO; in __cpu_up()
864 rc = pcpu_alloc_lowcore(pcpu, cpu); in __cpu_up()
869 * until new CPU has initialized control registers. in __cpu_up()
872 pcpu_prepare_secondary(pcpu, cpu); in __cpu_up()
873 pcpu_attach_task(cpu, tidle); in __cpu_up()
874 pcpu_start_fn(cpu, smp_start_secondary, NULL); in __cpu_up()
875 /* Wait until cpu puts itself in the online & active maps */ in __cpu_up()
876 while (!cpu_online(cpu)) in __cpu_up()
894 int cpu; in __cpu_disable() local
898 cpu = smp_processor_id(); in __cpu_disable()
899 set_cpu_online(cpu, false); in __cpu_disable()
900 cpumask_clear_cpu(cpu, &cpu_setup_mask); in __cpu_disable()
902 /* Disable pseudo page faults on this cpu. */ in __cpu_disable()
914 void __cpu_die(unsigned int cpu) in __cpu_die() argument
918 /* Wait until target cpu is down */ in __cpu_die()
919 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in __cpu_die()
922 pcpu_free_lowcore(pcpu, cpu); in __cpu_die()
923 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); in __cpu_die()
924 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); in __cpu_die()
925 pcpu->flags = 0; in __cpu_die()
937 unsigned int possible, sclp_max, cpu; in smp_fill_possible_mask() local
944 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) in smp_fill_possible_mask()
945 set_cpu_possible(cpu, true); in smp_fill_possible_mask()
964 lc->percpu_offset = __per_cpu_offset[0]; in smp_prepare_boot_cpu()
966 ipl_pcpu->state = CPU_STATE_CONFIGURED; in smp_prepare_boot_cpu()
967 lc->pcpu = (unsigned long)ipl_pcpu; in smp_prepare_boot_cpu()
976 lc->cpu_nr = 0; in smp_setup_processor_id()
978 lc->spinlock_lockval = arch_spin_lockval(0); in smp_setup_processor_id()
979 lc->spinlock_index = 0; in smp_setup_processor_id()
999 count = sysfs_emit(buf, "%d\n", per_cpu(pcpu_devices, dev->id).state); in cpu_configure_show()
1009 int cpu, val, rc, i; in cpu_configure_store() local
1013 return -EINVAL; in cpu_configure_store()
1015 return -EINVAL; in cpu_configure_store()
1018 rc = -EBUSY; in cpu_configure_store()
1020 cpu = dev->id; in cpu_configure_store()
1021 cpu = smp_get_base_cpu(cpu); in cpu_configure_store()
1023 if (cpu_online(cpu + i)) in cpu_configure_store()
1025 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in cpu_configure_store()
1029 if (pcpu->state != CPU_STATE_CONFIGURED) in cpu_configure_store()
1031 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift); in cpu_configure_store()
1035 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
1037 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_STANDBY; in cpu_configure_store()
1038 smp_cpu_set_polarization(cpu + i, in cpu_configure_store()
1044 if (pcpu->state != CPU_STATE_STANDBY) in cpu_configure_store()
1046 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift); in cpu_configure_store()
1050 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
1052 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_CONFIGURED; in cpu_configure_store()
1053 smp_cpu_set_polarization(cpu + i, in cpu_configure_store()
1071 return sysfs_emit(buf, "%d\n", per_cpu(pcpu_devices, dev->id).address); in show_cpu_address()
1095 static int smp_cpu_online(unsigned int cpu) in smp_cpu_online() argument
1097 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu); in smp_cpu_online()
1099 return sysfs_create_group(&c->dev.kobj, &cpu_online_attr_group); in smp_cpu_online()
1102 static int smp_cpu_pre_down(unsigned int cpu) in smp_cpu_pre_down() argument
1104 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu); in smp_cpu_pre_down()
1106 sysfs_remove_group(&c->dev.kobj, &cpu_online_attr_group); in smp_cpu_pre_down()
1110 bool arch_cpu_is_hotpluggable(int cpu) in arch_cpu_is_hotpluggable() argument
1112 return !!cpu; in arch_cpu_is_hotpluggable()
1115 int arch_register_cpu(int cpu) in arch_register_cpu() argument
1117 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu); in arch_register_cpu()
1120 c->hotpluggable = arch_cpu_is_hotpluggable(cpu); in arch_register_cpu()
1121 rc = register_cpu(c, cpu); in arch_register_cpu()
1124 rc = sysfs_create_group(&c->dev.kobj, &cpu_common_attr_group); in arch_register_cpu()
1133 sysfs_remove_group(&c->dev.kobj, &cpu_common_attr_group); in arch_register_cpu()
1143 int nr; in smp_rescan_cpus() local
1147 return -ENOMEM; in smp_rescan_cpus()
1149 nr = __smp_rescan_cpus(info, early); in smp_rescan_cpus()
1151 if (nr) in smp_rescan_cpus()