Lines Matching full:cpu

13  * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
34 #include <linux/cpu.h>
82 * Pointer to the pcpu area of the boot CPU. This is required when a restart
83 * interrupt is triggered on an offline CPU. For that case accessing percpu
159 /* Status stored condition code is equivalent to cpu not running. */ in pcpu_running()
164 * Find struct pcpu by cpu address.
168 int cpu; in pcpu_find_address() local
170 for_each_cpu(cpu, mask) in pcpu_find_address()
171 if (per_cpu(pcpu_devices, cpu).address == address) in pcpu_find_address()
172 return &per_cpu(pcpu_devices, cpu); in pcpu_find_address()
184 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) in pcpu_alloc_lowcore() argument
200 lc->cpu_nr = cpu; in pcpu_alloc_lowcore()
201 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_alloc_lowcore()
208 if (abs_lowcore_map(cpu, lc, true)) in pcpu_alloc_lowcore()
210 lowcore_ptr[cpu] = lc; in pcpu_alloc_lowcore()
224 static void pcpu_free_lowcore(struct pcpu *pcpu, int cpu) in pcpu_free_lowcore() argument
229 lc = lowcore_ptr[cpu]; in pcpu_free_lowcore()
234 lowcore_ptr[cpu] = NULL; in pcpu_free_lowcore()
235 abs_lowcore_unmap(cpu); in pcpu_free_lowcore()
243 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) in pcpu_prepare_secondary() argument
247 lc = lowcore_ptr[cpu]; in pcpu_prepare_secondary()
248 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); in pcpu_prepare_secondary()
249 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); in pcpu_prepare_secondary()
250 lc->cpu_nr = cpu; in pcpu_prepare_secondary()
253 lc->spinlock_lockval = arch_spin_lockval(cpu); in pcpu_prepare_secondary()
255 lc->percpu_offset = __per_cpu_offset[cpu]; in pcpu_prepare_secondary()
266 arch_spin_lock_setup(cpu); in pcpu_prepare_secondary()
269 static void pcpu_attach_task(int cpu, struct task_struct *tsk) in pcpu_attach_task() argument
273 lc = lowcore_ptr[cpu]; in pcpu_attach_task()
286 static void pcpu_start_fn(int cpu, void (*func)(void *), void *data) in pcpu_start_fn() argument
290 lc = lowcore_ptr[cpu]; in pcpu_start_fn()
295 pcpu_sigp_retry(per_cpu_ptr(&pcpu_devices, cpu), SIGP_RESTART, 0); in pcpu_start_fn()
301 * Call function via PSW restart on pcpu and stop the current cpu.
308 static void pcpu_delegate(struct pcpu *pcpu, int cpu, in pcpu_delegate() argument
315 lc = lowcore_ptr[cpu]; in pcpu_delegate()
322 /* Stop target cpu (if func returns this stops the current cpu). */ in pcpu_delegate()
325 /* Restart func on the target cpu and stop the current cpu. */ in pcpu_delegate()
340 "0: sigp 0,%0,%2 # sigp restart to target cpu\n" in pcpu_delegate()
342 "1: sigp 0,%1,%3 # sigp stop to current cpu\n" in pcpu_delegate()
371 * Call function on the ipl CPU.
385 int cpu; in smp_find_processor_id() local
387 for_each_present_cpu(cpu) in smp_find_processor_id()
388 if (per_cpu(pcpu_devices, cpu).address == address) in smp_find_processor_id()
389 return cpu; in smp_find_processor_id()
398 bool notrace arch_vcpu_is_preempted(int cpu) in arch_vcpu_is_preempted() argument
400 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu)) in arch_vcpu_is_preempted()
402 if (pcpu_running(per_cpu_ptr(&pcpu_devices, cpu))) in arch_vcpu_is_preempted()
408 void notrace smp_yield_cpu(int cpu) in smp_yield_cpu() argument
414 : : "d" (per_cpu(pcpu_devices, cpu).address)); in smp_yield_cpu()
427 int cpu; in smp_emergency_stop() local
434 for_each_cpu(cpu, &cpumask) { in smp_emergency_stop()
435 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_emergency_stop()
443 for_each_cpu(cpu, &cpumask) in smp_emergency_stop()
444 if (pcpu_stopped(per_cpu_ptr(&pcpu_devices, cpu))) in smp_emergency_stop()
445 cpumask_clear_cpu(cpu, &cpumask); in smp_emergency_stop()
460 int cpu; in smp_send_stop() local
472 for_each_online_cpu(cpu) { in smp_send_stop()
473 if (cpu == smp_processor_id()) in smp_send_stop()
475 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_send_stop()
513 int cpu; in arch_send_call_function_ipi_mask() local
515 for_each_cpu(cpu, mask) in arch_send_call_function_ipi_mask()
516 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single); in arch_send_call_function_ipi_mask()
519 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
521 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single); in arch_send_call_function_single_ipi()
525 * this function sends a 'reschedule' IPI to another CPU.
529 void arch_smp_send_reschedule(int cpu) in arch_smp_send_reschedule() argument
531 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_schedule); in arch_smp_send_reschedule()
543 int smp_store_status(int cpu) in smp_store_status() argument
549 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_store_status()
550 lc = lowcore_ptr[cpu]; in smp_store_status()
567 * Collect CPU state of the previous, crashed system.
571 * The state for all CPUs except the boot CPU needs to be collected
572 * with sigp stop-and-store-status. The boot CPU state is located in
574 * will copy the boot CPU state from the HSA.
577 * The state for all CPUs except the boot CPU needs to be collected
579 * stored the registers of the boot CPU in the absolute lowcore in the
583 * The state for all CPUs except the boot CPU needs to be collected
585 * stored the registers of the boot CPU in the memory of the old system.
587 * Note that the legacy kdump mode where the old kernel stored the CPU states
651 void smp_cpu_set_polarization(int cpu, int val) in smp_cpu_set_polarization() argument
653 per_cpu(pcpu_devices, cpu).polarization = val; in smp_cpu_set_polarization()
656 int smp_cpu_get_polarization(int cpu) in smp_cpu_get_polarization() argument
658 return per_cpu(pcpu_devices, cpu).polarization; in smp_cpu_get_polarization()
661 void smp_cpu_set_capacity(int cpu, unsigned long val) in smp_cpu_set_capacity() argument
663 per_cpu(pcpu_devices, cpu).capacity = val; in smp_cpu_set_capacity()
666 unsigned long smp_cpu_get_capacity(int cpu) in smp_cpu_get_capacity() argument
668 return per_cpu(pcpu_devices, cpu).capacity; in smp_cpu_get_capacity()
671 void smp_set_core_capacity(int cpu, unsigned long val) in smp_set_core_capacity() argument
675 cpu = smp_get_base_cpu(cpu); in smp_set_core_capacity()
676 for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++) in smp_set_core_capacity()
680 int smp_cpu_get_cpu_address(int cpu) in smp_cpu_get_cpu_address() argument
682 return per_cpu(pcpu_devices, cpu).address; in smp_cpu_get_cpu_address()
710 int cpu, nr, i; in smp_add_core() local
716 cpu = cpumask_first(avail); in smp_add_core()
718 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) { in smp_add_core()
721 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in smp_add_core()
727 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); in smp_add_core()
728 smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH); in smp_add_core()
729 set_cpu_present(cpu, true); in smp_add_core()
730 if (!early && arch_register_cpu(cpu)) in smp_add_core()
731 set_cpu_present(cpu, false); in smp_add_core()
734 cpumask_clear_cpu(cpu, avail); in smp_add_core()
735 cpu = cpumask_next(cpu, avail); in smp_add_core()
753 * Add IPL core first (which got logical CPU number 0) to make sure in __smp_rescan_cpus()
754 * that all SMT threads get subsequent logical CPU numbers. in __smp_rescan_cpus()
777 unsigned int cpu, mtid, c_cpus, s_cpus; in smp_detect_cpus() local
781 /* Get CPU information */ in smp_detect_cpus()
784 /* Find boot CPU type */ in smp_detect_cpus()
787 for (cpu = 0; cpu < info->combined; cpu++) in smp_detect_cpus()
788 if (info->core[cpu].core_id == address) { in smp_detect_cpus()
789 /* The boot cpu dictates the cpu type. */ in smp_detect_cpus()
790 boot_core_type = info->core[cpu].type; in smp_detect_cpus()
793 if (cpu >= info->combined) in smp_detect_cpus()
794 panic("Could not find boot CPU type"); in smp_detect_cpus()
805 for (cpu = 0; cpu < info->combined; cpu++) { in smp_detect_cpus()
807 info->core[cpu].type != boot_core_type) in smp_detect_cpus()
809 if (cpu < info->configured) in smp_detect_cpus()
824 int cpu = raw_smp_processor_id(); in smp_start_secondary() local
834 rcutree_report_cpu_starting(cpu); in smp_start_secondary()
839 cpumask_set_cpu(cpu, &cpu_setup_mask); in smp_start_secondary()
841 notify_cpu_starting(cpu); in smp_start_secondary()
842 if (topology_cpu_dedicated(cpu)) in smp_start_secondary()
846 set_cpu_online(cpu, true); in smp_start_secondary()
853 int __cpu_up(unsigned int cpu, struct task_struct *tidle) in __cpu_up() argument
855 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu); in __cpu_up()
864 rc = pcpu_alloc_lowcore(pcpu, cpu); in __cpu_up()
869 * until new CPU has initialized control registers. in __cpu_up()
872 pcpu_prepare_secondary(pcpu, cpu); in __cpu_up()
873 pcpu_attach_task(cpu, tidle); in __cpu_up()
874 pcpu_start_fn(cpu, smp_start_secondary, NULL); in __cpu_up()
875 /* Wait until cpu puts itself in the online & active maps */ in __cpu_up()
876 while (!cpu_online(cpu)) in __cpu_up()
894 int cpu; in __cpu_disable() local
898 cpu = smp_processor_id(); in __cpu_disable()
899 set_cpu_online(cpu, false); in __cpu_disable()
900 cpumask_clear_cpu(cpu, &cpu_setup_mask); in __cpu_disable()
902 /* Disable pseudo page faults on this cpu. */ in __cpu_disable()
914 void __cpu_die(unsigned int cpu) in __cpu_die() argument
918 /* Wait until target cpu is down */ in __cpu_die()
919 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in __cpu_die()
922 pcpu_free_lowcore(pcpu, cpu); in __cpu_die()
923 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); in __cpu_die()
924 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); in __cpu_die()
937 unsigned int possible, sclp_max, cpu; in smp_fill_possible_mask() local
944 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) in smp_fill_possible_mask()
945 set_cpu_possible(cpu, true); in smp_fill_possible_mask()
1009 int cpu, val, rc, i; in cpu_configure_store() local
1020 cpu = dev->id; in cpu_configure_store()
1021 cpu = smp_get_base_cpu(cpu); in cpu_configure_store()
1023 if (cpu_online(cpu + i)) in cpu_configure_store()
1025 pcpu = per_cpu_ptr(&pcpu_devices, cpu); in cpu_configure_store()
1035 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
1037 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_STANDBY; in cpu_configure_store()
1038 smp_cpu_set_polarization(cpu + i, in cpu_configure_store()
1050 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) in cpu_configure_store()
1052 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_CONFIGURED; in cpu_configure_store()
1053 smp_cpu_set_polarization(cpu + i, in cpu_configure_store()
1095 static int smp_cpu_online(unsigned int cpu) in smp_cpu_online() argument
1097 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu); in smp_cpu_online()
1102 static int smp_cpu_pre_down(unsigned int cpu) in smp_cpu_pre_down() argument
1104 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu); in smp_cpu_pre_down()
1110 bool arch_cpu_is_hotpluggable(int cpu) in arch_cpu_is_hotpluggable() argument
1112 return !!cpu; in arch_cpu_is_hotpluggable()
1115 int arch_register_cpu(int cpu) in arch_register_cpu() argument
1117 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu); in arch_register_cpu()
1120 c->hotpluggable = arch_cpu_is_hotpluggable(cpu); in arch_register_cpu()
1121 rc = register_cpu(c, cpu); in arch_register_cpu()