1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 *
5 * Derived from MIPS:
6 * Copyright (C) 2000, 2001 Kanoj Sarcar
7 * Copyright (C) 2000, 2001 Ralf Baechle
8 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
9 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
10 */
11 #include <linux/acpi.h>
12 #include <linux/cpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq_work.h>
17 #include <linux/profile.h>
18 #include <linux/seq_file.h>
19 #include <linux/smp.h>
20 #include <linux/threads.h>
21 #include <linux/export.h>
22 #include <linux/suspend.h>
23 #include <linux/syscore_ops.h>
24 #include <linux/time.h>
25 #include <linux/tracepoint.h>
26 #include <linux/sched/hotplug.h>
27 #include <linux/sched/task_stack.h>
28
29 #include <asm/cpu.h>
30 #include <asm/idle.h>
31 #include <asm/loongson.h>
32 #include <asm/mmu_context.h>
33 #include <asm/numa.h>
34 #include <asm/paravirt.h>
35 #include <asm/processor.h>
36 #include <asm/setup.h>
37 #include <asm/time.h>
38
39 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
40 EXPORT_SYMBOL(__cpu_number_map);
41
42 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
43 EXPORT_SYMBOL(__cpu_logical_map);
44
45 /* Representing the threads (siblings) of each logical CPU */
46 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
47 EXPORT_SYMBOL(cpu_sibling_map);
48
49 /* Representing the last level cache shared map of each logical CPU */
50 cpumask_t cpu_llc_shared_map[NR_CPUS] __read_mostly;
51 EXPORT_SYMBOL(cpu_llc_shared_map);
52
53 /* Representing the core map of multi-core chips of each logical CPU */
54 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
55 EXPORT_SYMBOL(cpu_core_map);
56
57 static DECLARE_COMPLETION(cpu_starting);
58 static DECLARE_COMPLETION(cpu_running);
59
60 /*
61 * A logcal cpu mask containing only one VPE per core to
62 * reduce the number of IPIs on large MT systems.
63 */
64 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
65 EXPORT_SYMBOL(cpu_foreign_map);
66
67 /* representing cpus for which sibling maps can be computed */
68 static cpumask_t cpu_sibling_setup_map;
69
70 /* representing cpus for which llc shared maps can be computed */
71 static cpumask_t cpu_llc_shared_setup_map;
72
73 /* representing cpus for which core maps can be computed */
74 static cpumask_t cpu_core_setup_map;
75
76 struct secondary_data cpuboot_data;
77 static DEFINE_PER_CPU(int, cpu_state);
78
79 static const char *ipi_types[NR_IPI] __tracepoint_string = {
80 [IPI_RESCHEDULE] = "Rescheduling interrupts",
81 [IPI_CALL_FUNCTION] = "Function call interrupts",
82 [IPI_IRQ_WORK] = "IRQ work interrupts",
83 [IPI_CLEAR_VECTOR] = "Clear vector interrupts",
84 };
85
show_ipi_list(struct seq_file * p,int prec)86 void show_ipi_list(struct seq_file *p, int prec)
87 {
88 unsigned int cpu, i;
89
90 for (i = 0; i < NR_IPI; i++) {
91 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
92 for_each_online_cpu(cpu)
93 seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, cpu).ipi_irqs[i], 10);
94 seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
95 }
96 }
97
set_cpu_core_map(int cpu)98 static inline void set_cpu_core_map(int cpu)
99 {
100 int i;
101
102 cpumask_set_cpu(cpu, &cpu_core_setup_map);
103
104 for_each_cpu(i, &cpu_core_setup_map) {
105 if (cpu_data[cpu].package == cpu_data[i].package) {
106 cpumask_set_cpu(i, &cpu_core_map[cpu]);
107 cpumask_set_cpu(cpu, &cpu_core_map[i]);
108 }
109 }
110 }
111
set_cpu_llc_shared_map(int cpu)112 static inline void set_cpu_llc_shared_map(int cpu)
113 {
114 int i;
115
116 cpumask_set_cpu(cpu, &cpu_llc_shared_setup_map);
117
118 for_each_cpu(i, &cpu_llc_shared_setup_map) {
119 if (cpu_to_node(cpu) == cpu_to_node(i)) {
120 cpumask_set_cpu(i, &cpu_llc_shared_map[cpu]);
121 cpumask_set_cpu(cpu, &cpu_llc_shared_map[i]);
122 }
123 }
124 }
125
clear_cpu_llc_shared_map(int cpu)126 static inline void clear_cpu_llc_shared_map(int cpu)
127 {
128 int i;
129
130 for_each_cpu(i, &cpu_llc_shared_setup_map) {
131 if (cpu_to_node(cpu) == cpu_to_node(i)) {
132 cpumask_clear_cpu(i, &cpu_llc_shared_map[cpu]);
133 cpumask_clear_cpu(cpu, &cpu_llc_shared_map[i]);
134 }
135 }
136
137 cpumask_clear_cpu(cpu, &cpu_llc_shared_setup_map);
138 }
139
set_cpu_sibling_map(int cpu)140 static inline void set_cpu_sibling_map(int cpu)
141 {
142 int i;
143
144 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
145
146 for_each_cpu(i, &cpu_sibling_setup_map) {
147 if (cpus_are_siblings(cpu, i)) {
148 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
149 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
150 }
151 }
152 }
153
clear_cpu_sibling_map(int cpu)154 static inline void clear_cpu_sibling_map(int cpu)
155 {
156 int i;
157
158 for_each_cpu(i, &cpu_sibling_setup_map) {
159 if (cpus_are_siblings(cpu, i)) {
160 cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
161 cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
162 }
163 }
164
165 cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
166 }
167
168 /*
169 * Calculate a new cpu_foreign_map mask whenever a
170 * new cpu appears or disappears.
171 */
calculate_cpu_foreign_map(void)172 void calculate_cpu_foreign_map(void)
173 {
174 int i, k, core_present;
175 cpumask_t temp_foreign_map;
176
177 /* Re-calculate the mask */
178 cpumask_clear(&temp_foreign_map);
179 for_each_online_cpu(i) {
180 core_present = 0;
181 for_each_cpu(k, &temp_foreign_map)
182 if (cpus_are_siblings(i, k))
183 core_present = 1;
184 if (!core_present)
185 cpumask_set_cpu(i, &temp_foreign_map);
186 }
187
188 for_each_online_cpu(i)
189 cpumask_andnot(&cpu_foreign_map[i],
190 &temp_foreign_map, &cpu_sibling_map[i]);
191 }
192
193 /* Send mailbox buffer via Mail_Send */
csr_mail_send(uint64_t data,int cpu,int mailbox)194 static void csr_mail_send(uint64_t data, int cpu, int mailbox)
195 {
196 uint64_t val;
197
198 /* Send high 32 bits */
199 val = IOCSR_MBUF_SEND_BLOCKING;
200 val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
201 val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
202 val |= (data & IOCSR_MBUF_SEND_H32_MASK);
203 iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
204
205 /* Send low 32 bits */
206 val = IOCSR_MBUF_SEND_BLOCKING;
207 val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
208 val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
209 val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
210 iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
211 };
212
ipi_read_clear(int cpu)213 static u32 ipi_read_clear(int cpu)
214 {
215 u32 action;
216
217 /* Load the ipi register to figure out what we're supposed to do */
218 action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
219 /* Clear the ipi register to clear the interrupt */
220 iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
221 wbflush();
222
223 return action;
224 }
225
ipi_write_action(int cpu,u32 action)226 static void ipi_write_action(int cpu, u32 action)
227 {
228 uint32_t val;
229
230 val = IOCSR_IPI_SEND_BLOCKING | action;
231 val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
232 iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
233 }
234
loongson_send_ipi_single(int cpu,unsigned int action)235 static void loongson_send_ipi_single(int cpu, unsigned int action)
236 {
237 ipi_write_action(cpu_logical_map(cpu), (u32)action);
238 }
239
loongson_send_ipi_mask(const struct cpumask * mask,unsigned int action)240 static void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
241 {
242 unsigned int i;
243
244 for_each_cpu(i, mask)
245 ipi_write_action(cpu_logical_map(i), (u32)action);
246 }
247
248 /*
249 * This function sends a 'reschedule' IPI to another CPU.
250 * it goes straight through and wastes no time serializing
251 * anything. Worst case is that we lose a reschedule ...
252 */
arch_smp_send_reschedule(int cpu)253 void arch_smp_send_reschedule(int cpu)
254 {
255 mp_ops.send_ipi_single(cpu, ACTION_RESCHEDULE);
256 }
257 EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
258
259 #ifdef CONFIG_IRQ_WORK
arch_irq_work_raise(void)260 void arch_irq_work_raise(void)
261 {
262 mp_ops.send_ipi_single(smp_processor_id(), ACTION_IRQ_WORK);
263 }
264 #endif
265
loongson_ipi_interrupt(int irq,void * dev)266 static irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
267 {
268 unsigned int action;
269 unsigned int cpu = smp_processor_id();
270
271 action = ipi_read_clear(cpu_logical_map(cpu));
272
273 if (action & SMP_RESCHEDULE) {
274 scheduler_ipi();
275 per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
276 }
277
278 if (action & SMP_CALL_FUNCTION) {
279 generic_smp_call_function_interrupt();
280 per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
281 }
282
283 if (action & SMP_IRQ_WORK) {
284 irq_work_run();
285 per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++;
286 }
287
288 if (action & SMP_CLEAR_VECTOR) {
289 complete_irq_moving();
290 per_cpu(irq_stat, cpu).ipi_irqs[IPI_CLEAR_VECTOR]++;
291 }
292
293 return IRQ_HANDLED;
294 }
295
loongson_init_ipi(void)296 static void loongson_init_ipi(void)
297 {
298 int r, ipi_irq;
299
300 ipi_irq = get_percpu_irq(INT_IPI);
301 if (ipi_irq < 0)
302 panic("IPI IRQ mapping failed\n");
303
304 irq_set_percpu_devid(ipi_irq);
305 r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &irq_stat);
306 if (r < 0)
307 panic("IPI IRQ request failed\n");
308 }
309
310 struct smp_ops mp_ops = {
311 .init_ipi = loongson_init_ipi,
312 .send_ipi_single = loongson_send_ipi_single,
313 .send_ipi_mask = loongson_send_ipi_mask,
314 };
315
fdt_smp_setup(void)316 static void __init fdt_smp_setup(void)
317 {
318 #ifdef CONFIG_OF
319 unsigned int cpu, cpuid;
320 struct device_node *node = NULL;
321
322 for_each_of_cpu_node(node) {
323 if (!of_device_is_available(node))
324 continue;
325
326 cpuid = of_get_cpu_hwid(node, 0);
327 if (cpuid >= nr_cpu_ids)
328 continue;
329
330 if (cpuid == loongson_sysconf.boot_cpu_id)
331 cpu = 0;
332 else
333 cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
334
335 num_processors++;
336 set_cpu_possible(cpu, true);
337 set_cpu_present(cpu, true);
338 __cpu_number_map[cpuid] = cpu;
339 __cpu_logical_map[cpu] = cpuid;
340
341 early_numa_add_cpu(cpuid, 0);
342 set_cpuid_to_node(cpuid, 0);
343 }
344
345 loongson_sysconf.nr_cpus = num_processors;
346 set_bit(0, loongson_sysconf.cores_io_master);
347 #endif
348 }
349
loongson_smp_setup(void)350 void __init loongson_smp_setup(void)
351 {
352 fdt_smp_setup();
353
354 if (loongson_sysconf.cores_per_package == 0)
355 loongson_sysconf.cores_per_package = num_processors;
356
357 cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
358 cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
359
360 pv_ipi_init();
361 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
362 pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
363 }
364
loongson_prepare_cpus(unsigned int max_cpus)365 void __init loongson_prepare_cpus(unsigned int max_cpus)
366 {
367 int i = 0;
368
369 parse_acpi_topology();
370 cpu_data[0].global_id = cpu_logical_map(0);
371
372 for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
373 set_cpu_present(i, true);
374 csr_mail_send(0, __cpu_logical_map[i], 0);
375 }
376
377 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
378 }
379
380 /*
381 * Setup the PC, SP, and TP of a secondary processor and start it running!
382 */
loongson_boot_secondary(int cpu,struct task_struct * idle)383 void loongson_boot_secondary(int cpu, struct task_struct *idle)
384 {
385 unsigned long entry;
386
387 pr_info("Booting CPU#%d...\n", cpu);
388
389 entry = __pa_symbol((unsigned long)&smpboot_entry);
390 cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
391 cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
392
393 csr_mail_send(entry, cpu_logical_map(cpu), 0);
394
395 loongson_send_ipi_single(cpu, ACTION_BOOT_CPU);
396 }
397
398 /*
399 * SMP init and finish on secondary CPUs
400 */
loongson_init_secondary(void)401 void loongson_init_secondary(void)
402 {
403 unsigned int cpu = smp_processor_id();
404 unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
405 ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER | ECFGF_SIP0;
406
407 change_csr_ecfg(ECFG0_IM, imask);
408
409 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
410
411 #ifdef CONFIG_NUMA
412 numa_add_cpu(cpu);
413 #endif
414 per_cpu(cpu_state, cpu) = CPU_ONLINE;
415 cpu_data[cpu].package =
416 cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
417 cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
418 cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
419 cpu_data[cpu].global_id = cpu_logical_map(cpu);
420 }
421
loongson_smp_finish(void)422 void loongson_smp_finish(void)
423 {
424 local_irq_enable();
425 iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
426 pr_info("CPU#%d finished\n", smp_processor_id());
427 }
428
429 #ifdef CONFIG_HOTPLUG_CPU
430
loongson_cpu_disable(void)431 int loongson_cpu_disable(void)
432 {
433 unsigned long flags;
434 unsigned int cpu = smp_processor_id();
435
436 if (io_master(cpu))
437 return -EBUSY;
438
439 #ifdef CONFIG_NUMA
440 numa_remove_cpu(cpu);
441 #endif
442 set_cpu_online(cpu, false);
443 clear_cpu_sibling_map(cpu);
444 clear_cpu_llc_shared_map(cpu);
445 calculate_cpu_foreign_map();
446 local_irq_save(flags);
447 irq_migrate_all_off_this_cpu();
448 clear_csr_ecfg(ECFG0_IM);
449 local_irq_restore(flags);
450 local_flush_tlb_all();
451
452 return 0;
453 }
454
loongson_cpu_die(unsigned int cpu)455 void loongson_cpu_die(unsigned int cpu)
456 {
457 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
458 cpu_relax();
459
460 mb();
461 }
462
idle_play_dead(void)463 static void __noreturn idle_play_dead(void)
464 {
465 register uint64_t addr;
466 register void (*init_fn)(void);
467
468 idle_task_exit();
469 local_irq_enable();
470 set_csr_ecfg(ECFGF_IPI);
471 __this_cpu_write(cpu_state, CPU_DEAD);
472
473 __smp_mb();
474 do {
475 __asm__ __volatile__("idle 0\n\t");
476 addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
477 } while (addr == 0);
478
479 local_irq_disable();
480 init_fn = (void *)TO_CACHE(addr);
481 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
482
483 init_fn();
484 BUG();
485 }
486
487 #ifdef CONFIG_HIBERNATION
poll_play_dead(void)488 static void __noreturn poll_play_dead(void)
489 {
490 register uint64_t addr;
491 register void (*init_fn)(void);
492
493 idle_task_exit();
494 __this_cpu_write(cpu_state, CPU_DEAD);
495
496 __smp_mb();
497 do {
498 __asm__ __volatile__("nop\n\t");
499 addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
500 } while (addr == 0);
501
502 init_fn = (void *)TO_CACHE(addr);
503 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
504
505 init_fn();
506 BUG();
507 }
508 #endif
509
510 static void (*play_dead)(void) = idle_play_dead;
511
arch_cpu_idle_dead(void)512 void __noreturn arch_cpu_idle_dead(void)
513 {
514 play_dead();
515 BUG(); /* play_dead() doesn't return */
516 }
517
518 #ifdef CONFIG_HIBERNATION
hibernate_resume_nonboot_cpu_disable(void)519 int hibernate_resume_nonboot_cpu_disable(void)
520 {
521 int ret;
522
523 play_dead = poll_play_dead;
524 ret = suspend_disable_secondary_cpus();
525 play_dead = idle_play_dead;
526
527 return ret;
528 }
529 #endif
530
531 #endif
532
533 /*
534 * Power management
535 */
536 #ifdef CONFIG_PM
537
loongson_ipi_suspend(void)538 static int loongson_ipi_suspend(void)
539 {
540 return 0;
541 }
542
loongson_ipi_resume(void)543 static void loongson_ipi_resume(void)
544 {
545 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
546 }
547
548 static struct syscore_ops loongson_ipi_syscore_ops = {
549 .resume = loongson_ipi_resume,
550 .suspend = loongson_ipi_suspend,
551 };
552
553 /*
554 * Enable boot cpu ipi before enabling nonboot cpus
555 * during syscore_resume.
556 */
ipi_pm_init(void)557 static int __init ipi_pm_init(void)
558 {
559 register_syscore_ops(&loongson_ipi_syscore_ops);
560 return 0;
561 }
562
563 core_initcall(ipi_pm_init);
564 #endif
565
566 /* Preload SMP state for boot cpu */
smp_prepare_boot_cpu(void)567 void __init smp_prepare_boot_cpu(void)
568 {
569 unsigned int cpu, node, rr_node;
570
571 set_cpu_possible(0, true);
572 set_cpu_online(0, true);
573 set_my_cpu_offset(per_cpu_offset(0));
574 numa_add_cpu(0);
575
576 rr_node = first_node(node_online_map);
577 for_each_possible_cpu(cpu) {
578 node = early_cpu_to_node(cpu);
579
580 /*
581 * The mapping between present cpus and nodes has been
582 * built during MADT and SRAT parsing.
583 *
584 * If possible cpus = present cpus here, early_cpu_to_node
585 * will return valid node.
586 *
587 * If possible cpus > present cpus here (e.g. some possible
588 * cpus will be added by cpu-hotplug later), for possible but
589 * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
590 * and we just map them to online nodes in round-robin way.
591 * Once hotplugged, new correct mapping will be built for them.
592 */
593 if (node != NUMA_NO_NODE)
594 set_cpu_numa_node(cpu, node);
595 else {
596 set_cpu_numa_node(cpu, rr_node);
597 rr_node = next_node_in(rr_node, node_online_map);
598 }
599 }
600
601 pv_spinlock_init();
602 }
603
604 /* called from main before smp_init() */
smp_prepare_cpus(unsigned int max_cpus)605 void __init smp_prepare_cpus(unsigned int max_cpus)
606 {
607 init_new_context(current, &init_mm);
608 current_thread_info()->cpu = 0;
609 loongson_prepare_cpus(max_cpus);
610 set_cpu_sibling_map(0);
611 set_cpu_llc_shared_map(0);
612 set_cpu_core_map(0);
613 calculate_cpu_foreign_map();
614 #ifndef CONFIG_HOTPLUG_CPU
615 init_cpu_present(cpu_possible_mask);
616 #endif
617 }
618
__cpu_up(unsigned int cpu,struct task_struct * tidle)619 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
620 {
621 loongson_boot_secondary(cpu, tidle);
622
623 /* Wait for CPU to start and be ready to sync counters */
624 if (!wait_for_completion_timeout(&cpu_starting,
625 msecs_to_jiffies(5000))) {
626 pr_crit("CPU%u: failed to start\n", cpu);
627 return -EIO;
628 }
629
630 /* Wait for CPU to finish startup & mark itself online before return */
631 wait_for_completion(&cpu_running);
632
633 return 0;
634 }
635
636 /*
637 * First C code run on the secondary CPUs after being started up by
638 * the master.
639 */
start_secondary(void)640 asmlinkage void start_secondary(void)
641 {
642 unsigned int cpu;
643
644 sync_counter();
645 cpu = raw_smp_processor_id();
646 set_my_cpu_offset(per_cpu_offset(cpu));
647
648 cpu_probe();
649 constant_clockevent_init();
650 loongson_init_secondary();
651
652 set_cpu_sibling_map(cpu);
653 set_cpu_llc_shared_map(cpu);
654 set_cpu_core_map(cpu);
655
656 notify_cpu_starting(cpu);
657
658 /* Notify boot CPU that we're starting */
659 complete(&cpu_starting);
660
661 /* The CPU is running, now mark it online */
662 set_cpu_online(cpu, true);
663
664 calculate_cpu_foreign_map();
665
666 /*
667 * Notify boot CPU that we're up & online and it can safely return
668 * from __cpu_up()
669 */
670 complete(&cpu_running);
671
672 /*
673 * irq will be enabled in loongson_smp_finish(), enabling it too
674 * early is dangerous.
675 */
676 WARN_ON_ONCE(!irqs_disabled());
677 loongson_smp_finish();
678
679 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
680 }
681
smp_cpus_done(unsigned int max_cpus)682 void __init smp_cpus_done(unsigned int max_cpus)
683 {
684 }
685
stop_this_cpu(void * dummy)686 static void stop_this_cpu(void *dummy)
687 {
688 set_cpu_online(smp_processor_id(), false);
689 calculate_cpu_foreign_map();
690 local_irq_disable();
691 while (true);
692 }
693
smp_send_stop(void)694 void smp_send_stop(void)
695 {
696 smp_call_function(stop_this_cpu, NULL, 0);
697 }
698
699 #ifdef CONFIG_PROFILING
setup_profiling_timer(unsigned int multiplier)700 int setup_profiling_timer(unsigned int multiplier)
701 {
702 return 0;
703 }
704 #endif
705
flush_tlb_all_ipi(void * info)706 static void flush_tlb_all_ipi(void *info)
707 {
708 local_flush_tlb_all();
709 }
710
flush_tlb_all(void)711 void flush_tlb_all(void)
712 {
713 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
714 }
715
flush_tlb_mm_ipi(void * mm)716 static void flush_tlb_mm_ipi(void *mm)
717 {
718 local_flush_tlb_mm((struct mm_struct *)mm);
719 }
720
flush_tlb_mm(struct mm_struct * mm)721 void flush_tlb_mm(struct mm_struct *mm)
722 {
723 if (atomic_read(&mm->mm_users) == 0)
724 return; /* happens as a result of exit_mmap() */
725
726 preempt_disable();
727
728 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
729 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
730 } else {
731 unsigned int cpu;
732
733 for_each_online_cpu(cpu) {
734 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
735 cpu_context(cpu, mm) = 0;
736 }
737 local_flush_tlb_mm(mm);
738 }
739
740 preempt_enable();
741 }
742
743 struct flush_tlb_data {
744 struct vm_area_struct *vma;
745 unsigned long addr1;
746 unsigned long addr2;
747 };
748
flush_tlb_range_ipi(void * info)749 static void flush_tlb_range_ipi(void *info)
750 {
751 struct flush_tlb_data *fd = info;
752
753 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
754 }
755
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)756 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
757 {
758 struct mm_struct *mm = vma->vm_mm;
759
760 preempt_disable();
761 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
762 struct flush_tlb_data fd = {
763 .vma = vma,
764 .addr1 = start,
765 .addr2 = end,
766 };
767
768 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
769 } else {
770 unsigned int cpu;
771
772 for_each_online_cpu(cpu) {
773 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
774 cpu_context(cpu, mm) = 0;
775 }
776 local_flush_tlb_range(vma, start, end);
777 }
778 preempt_enable();
779 }
780
flush_tlb_kernel_range_ipi(void * info)781 static void flush_tlb_kernel_range_ipi(void *info)
782 {
783 struct flush_tlb_data *fd = info;
784
785 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
786 }
787
flush_tlb_kernel_range(unsigned long start,unsigned long end)788 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
789 {
790 struct flush_tlb_data fd = {
791 .addr1 = start,
792 .addr2 = end,
793 };
794
795 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
796 }
797
flush_tlb_page_ipi(void * info)798 static void flush_tlb_page_ipi(void *info)
799 {
800 struct flush_tlb_data *fd = info;
801
802 local_flush_tlb_page(fd->vma, fd->addr1);
803 }
804
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)805 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
806 {
807 preempt_disable();
808 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
809 struct flush_tlb_data fd = {
810 .vma = vma,
811 .addr1 = page,
812 };
813
814 on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
815 } else {
816 unsigned int cpu;
817
818 for_each_online_cpu(cpu) {
819 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
820 cpu_context(cpu, vma->vm_mm) = 0;
821 }
822 local_flush_tlb_page(vma, page);
823 }
824 preempt_enable();
825 }
826 EXPORT_SYMBOL(flush_tlb_page);
827
flush_tlb_one_ipi(void * info)828 static void flush_tlb_one_ipi(void *info)
829 {
830 unsigned long vaddr = (unsigned long) info;
831
832 local_flush_tlb_one(vaddr);
833 }
834
flush_tlb_one(unsigned long vaddr)835 void flush_tlb_one(unsigned long vaddr)
836 {
837 on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
838 }
839 EXPORT_SYMBOL(flush_tlb_one);
840