Lines Matching defs:cpu
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
33 #include <linux/cpu.h>
164 /* Status stored condition code is equivalent to cpu not running. */
169 * Find struct pcpu by cpu address.
173 int cpu;
175 for_each_cpu(cpu, mask)
176 if (per_cpu(pcpu_devices, cpu).address == address)
177 return &per_cpu(pcpu_devices, cpu);
192 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
208 lc->cpu_nr = cpu;
209 lc->spinlock_lockval = arch_spin_lockval(cpu);
216 if (abs_lowcore_map(cpu, lc, true))
218 lowcore_ptr[cpu] = lc;
232 static void pcpu_free_lowcore(struct pcpu *pcpu, int cpu)
237 lc = lowcore_ptr[cpu];
242 lowcore_ptr[cpu] = NULL;
243 abs_lowcore_unmap(cpu);
251 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
255 lc = lowcore_ptr[cpu];
256 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
257 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
258 lc->cpu_nr = cpu;
261 lc->spinlock_lockval = arch_spin_lockval(cpu);
263 lc->percpu_offset = __per_cpu_offset[cpu];
275 arch_spin_lock_setup(cpu);
278 static void pcpu_attach_task(int cpu, struct task_struct *tsk)
282 lc = lowcore_ptr[cpu];
295 static void pcpu_start_fn(int cpu, void (*func)(void *), void *data)
299 lc = lowcore_ptr[cpu];
304 pcpu_sigp_retry(per_cpu_ptr(&pcpu_devices, cpu), SIGP_RESTART, 0);
310 * Call function via PSW restart on pcpu and stop the current cpu.
317 static void pcpu_delegate(struct pcpu *pcpu, int cpu,
324 lc = lowcore_ptr[cpu];
331 /* Stop target cpu (if func returns this stops the current cpu). */
334 /* Restart func on the target cpu and stop the current cpu. */
349 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
351 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
394 int cpu;
396 for_each_present_cpu(cpu)
397 if (per_cpu(pcpu_devices, cpu).address == address)
398 return cpu;
407 bool notrace arch_vcpu_is_preempted(int cpu)
409 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
411 if (pcpu_running(per_cpu_ptr(&pcpu_devices, cpu)))
417 void notrace smp_yield_cpu(int cpu)
423 : : "d" (per_cpu(pcpu_devices, cpu).address));
436 int cpu;
443 for_each_cpu(cpu, &cpumask) {
444 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu);
452 for_each_cpu(cpu, &cpumask)
453 if (pcpu_stopped(per_cpu_ptr(&pcpu_devices, cpu)))
454 cpumask_clear_cpu(cpu, &cpumask);
469 int cpu;
481 for_each_online_cpu(cpu) {
482 if (cpu == smp_processor_id())
484 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
522 int cpu;
524 for_each_cpu(cpu, mask)
525 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single);
528 void arch_send_call_function_single_ipi(int cpu)
530 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single);
538 void arch_smp_send_reschedule(int cpu)
540 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_schedule);
552 int smp_store_status(int cpu)
558 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
559 lc = lowcore_ptr[cpu];
660 void smp_cpu_set_polarization(int cpu, int val)
662 per_cpu(pcpu_devices, cpu).polarization = val;
665 int smp_cpu_get_polarization(int cpu)
667 return per_cpu(pcpu_devices, cpu).polarization;
670 void smp_cpu_set_capacity(int cpu, unsigned long val)
672 per_cpu(pcpu_devices, cpu).capacity = val;
675 unsigned long smp_cpu_get_capacity(int cpu)
677 return per_cpu(pcpu_devices, cpu).capacity;
680 void smp_set_core_capacity(int cpu, unsigned long val)
684 cpu = smp_get_base_cpu(cpu);
685 for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++)
689 int smp_cpu_get_cpu_address(int cpu)
691 return per_cpu(pcpu_devices, cpu).address;
719 int cpu, nr, i;
725 cpu = cpumask_first(avail);
727 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
730 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
736 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
737 smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
738 set_cpu_present(cpu, true);
739 if (!early && arch_register_cpu(cpu))
740 set_cpu_present(cpu, false);
743 cpumask_clear_cpu(cpu, avail);
744 cpu = cpumask_next(cpu, avail);
786 unsigned int cpu, mtid, c_cpus, s_cpus;
796 for (cpu = 0; cpu < info->combined; cpu++)
797 if (info->core[cpu].core_id == address) {
798 /* The boot cpu dictates the cpu type. */
799 boot_core_type = info->core[cpu].type;
802 if (cpu >= info->combined)
813 for (cpu = 0; cpu < info->combined; cpu++) {
815 info->core[cpu].type != boot_core_type)
817 if (cpu < info->configured)
832 int cpu = raw_smp_processor_id();
842 rcutree_report_cpu_starting(cpu);
847 cpumask_set_cpu(cpu, &cpu_setup_mask);
849 notify_cpu_starting(cpu);
850 if (topology_cpu_dedicated(cpu))
854 set_cpu_online(cpu, true);
861 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
863 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu);
872 rc = pcpu_alloc_lowcore(pcpu, cpu);
880 pcpu_prepare_secondary(pcpu, cpu);
881 pcpu_attach_task(cpu, tidle);
882 pcpu_start_fn(cpu, smp_start_secondary, NULL);
883 /* Wait until cpu puts itself in the online & active maps */
884 while (!cpu_online(cpu))
902 int cpu;
906 cpu = smp_processor_id();
907 set_cpu_online(cpu, false);
908 cpumask_clear_cpu(cpu, &cpu_setup_mask);
910 /* Disable pseudo page faults on this cpu. */
922 void __cpu_die(unsigned int cpu)
926 /* Wait until target cpu is down */
927 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
930 pcpu_free_lowcore(pcpu, cpu);
931 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
932 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
945 unsigned int possible, sclp_max, cpu;
952 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
953 set_cpu_possible(cpu, true);
1017 int cpu, val, rc, i;
1028 cpu = dev->id;
1029 cpu = smp_get_base_cpu(cpu);
1031 if (cpu_online(cpu + i))
1033 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
1043 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1045 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_STANDBY;
1046 smp_cpu_set_polarization(cpu + i,
1058 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1060 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_CONFIGURED;
1061 smp_cpu_set_polarization(cpu + i,
1103 static int smp_cpu_online(unsigned int cpu)
1105 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
1110 static int smp_cpu_pre_down(unsigned int cpu)
1112 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
1118 bool arch_cpu_is_hotpluggable(int cpu)
1120 return !!cpu;
1123 int arch_register_cpu(int cpu)
1125 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
1128 c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
1129 rc = register_cpu(c, cpu);