1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Copyright (C) 2000, 2001 Kanoj Sarcar
5 * Copyright (C) 2000, 2001 Ralf Baechle
6 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
7 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
8 */
9 #include <linux/cache.h>
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/profile.h>
14 #include <linux/smp.h>
15 #include <linux/spinlock.h>
16 #include <linux/threads.h>
17 #include <linux/export.h>
18 #include <linux/time.h>
19 #include <linux/timex.h>
20 #include <linux/sched/mm.h>
21 #include <linux/cpumask.h>
22 #include <linux/cpu.h>
23 #include <linux/err.h>
24 #include <linux/ftrace.h>
25 #include <linux/irqdomain.h>
26 #include <linux/of.h>
27 #include <linux/of_irq.h>
28
29 #include <linux/atomic.h>
30 #include <asm/cpu.h>
31 #include <asm/ginvt.h>
32 #include <asm/processor.h>
33 #include <asm/idle.h>
34 #include <asm/r4k-timer.h>
35 #include <asm/mips-cps.h>
36 #include <asm/mmu_context.h>
37 #include <asm/time.h>
38 #include <asm/setup.h>
39 #include <asm/maar.h>
40
41 int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */
42 EXPORT_SYMBOL(__cpu_number_map);
43
44 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
45 EXPORT_SYMBOL(__cpu_logical_map);
46
47 /* Number of TCs (or siblings in Intel speak) per CPU core */
48 int smp_num_siblings = 1;
49 EXPORT_SYMBOL(smp_num_siblings);
50
51 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
52 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
53 EXPORT_SYMBOL(cpu_sibling_map);
54
55 /* representing the core map of multi-core chips of each logical CPU */
56 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
57 EXPORT_SYMBOL(cpu_core_map);
58
59 #ifndef CONFIG_HOTPLUG_PARALLEL
60 static DECLARE_COMPLETION(cpu_starting);
61 static DECLARE_COMPLETION(cpu_running);
62 #endif
63
64 /*
65 * A logical cpu mask containing only one VPE per core to
66 * reduce the number of IPIs on large MT systems.
67 */
68 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
69 EXPORT_SYMBOL(cpu_foreign_map);
70
71 /* representing cpus for which sibling maps can be computed */
72 static cpumask_t cpu_sibling_setup_map;
73
74 /* representing cpus for which core maps can be computed */
75 static cpumask_t cpu_core_setup_map;
76
77 cpumask_t cpu_coherent_mask;
78
79 struct cpumask __cpu_primary_thread_mask __read_mostly;
80
81 unsigned int smp_max_threads __initdata = UINT_MAX;
82
early_nosmt(char * s)83 static int __init early_nosmt(char *s)
84 {
85 smp_max_threads = 1;
86 return 0;
87 }
88 early_param("nosmt", early_nosmt);
89
early_smt(char * s)90 static int __init early_smt(char *s)
91 {
92 get_option(&s, &smp_max_threads);
93 /* Ensure at least one thread is available */
94 smp_max_threads = clamp_val(smp_max_threads, 1U, UINT_MAX);
95 return 0;
96 }
97 early_param("smt", early_smt);
98
99 #ifdef CONFIG_GENERIC_IRQ_IPI
100 static struct irq_desc *call_desc;
101 static struct irq_desc *sched_desc;
102 #endif
103
set_cpu_sibling_map(int cpu)104 static inline void set_cpu_sibling_map(int cpu)
105 {
106 int i;
107
108 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
109
110 if (smp_num_siblings > 1) {
111 for_each_cpu(i, &cpu_sibling_setup_map) {
112 if (cpus_are_siblings(cpu, i)) {
113 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
114 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
115 }
116 }
117 } else
118 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
119 }
120
set_cpu_core_map(int cpu)121 static inline void set_cpu_core_map(int cpu)
122 {
123 int i;
124
125 cpumask_set_cpu(cpu, &cpu_core_setup_map);
126
127 for_each_cpu(i, &cpu_core_setup_map) {
128 if (cpu_data[cpu].package == cpu_data[i].package) {
129 cpumask_set_cpu(i, &cpu_core_map[cpu]);
130 cpumask_set_cpu(cpu, &cpu_core_map[i]);
131 }
132 }
133 }
134
135 /*
136 * Calculate a new cpu_foreign_map mask whenever a
137 * new cpu appears or disappears.
138 */
calculate_cpu_foreign_map(void)139 void calculate_cpu_foreign_map(void)
140 {
141 int i, k, core_present;
142 cpumask_t temp_foreign_map;
143
144 /* Re-calculate the mask */
145 cpumask_clear(&temp_foreign_map);
146 for_each_online_cpu(i) {
147 core_present = 0;
148 for_each_cpu(k, &temp_foreign_map)
149 if (cpus_are_siblings(i, k))
150 core_present = 1;
151 if (!core_present)
152 cpumask_set_cpu(i, &temp_foreign_map);
153 }
154
155 for_each_online_cpu(i)
156 cpumask_andnot(&cpu_foreign_map[i],
157 &temp_foreign_map, &cpu_sibling_map[i]);
158 }
159
160 const struct plat_smp_ops *mp_ops;
161 EXPORT_SYMBOL(mp_ops);
162
register_smp_ops(const struct plat_smp_ops * ops)163 void register_smp_ops(const struct plat_smp_ops *ops)
164 {
165 if (mp_ops)
166 printk(KERN_WARNING "Overriding previously set SMP ops\n");
167
168 mp_ops = ops;
169 }
170
171 #ifdef CONFIG_GENERIC_IRQ_IPI
mips_smp_send_ipi_single(int cpu,unsigned int action)172 void mips_smp_send_ipi_single(int cpu, unsigned int action)
173 {
174 mips_smp_send_ipi_mask(cpumask_of(cpu), action);
175 }
176
mips_smp_send_ipi_mask(const struct cpumask * mask,unsigned int action)177 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
178 {
179 unsigned long flags;
180 unsigned int core;
181 int cpu;
182
183 local_irq_save(flags);
184
185 switch (action) {
186 case SMP_CALL_FUNCTION:
187 __ipi_send_mask(call_desc, mask);
188 break;
189
190 case SMP_RESCHEDULE_YOURSELF:
191 __ipi_send_mask(sched_desc, mask);
192 break;
193
194 default:
195 BUG();
196 }
197
198 if (mips_cpc_present()) {
199 for_each_cpu(cpu, mask) {
200 if (cpus_are_siblings(cpu, smp_processor_id()))
201 continue;
202
203 core = cpu_core(&cpu_data[cpu]);
204
205 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
206 mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
207 mips_cpc_lock_other(core);
208 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
209 mips_cpc_unlock_other();
210 mips_cm_unlock_other();
211 }
212 }
213 }
214
215 local_irq_restore(flags);
216 }
217
218
ipi_resched_interrupt(int irq,void * dev_id)219 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
220 {
221 scheduler_ipi();
222
223 return IRQ_HANDLED;
224 }
225
ipi_call_interrupt(int irq,void * dev_id)226 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
227 {
228 generic_smp_call_function_interrupt();
229
230 return IRQ_HANDLED;
231 }
232
smp_ipi_init_one(unsigned int virq,const char * name,irq_handler_t handler)233 static void smp_ipi_init_one(unsigned int virq, const char *name,
234 irq_handler_t handler)
235 {
236 int ret;
237
238 irq_set_handler(virq, handle_percpu_irq);
239 ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
240 BUG_ON(ret);
241 }
242
243 static unsigned int call_virq, sched_virq;
244
mips_smp_ipi_allocate(const struct cpumask * mask)245 int mips_smp_ipi_allocate(const struct cpumask *mask)
246 {
247 int virq;
248 struct irq_domain *ipidomain;
249 struct device_node *node;
250
251 node = of_irq_find_parent(of_root);
252 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
253
254 /*
255 * Some platforms have half DT setup. So if we found irq node but
256 * didn't find an ipidomain, try to search for one that is not in the
257 * DT.
258 */
259 if (node && !ipidomain)
260 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
261
262 /*
263 * There are systems which use IPI IRQ domains, but only have one
264 * registered when some runtime condition is met. For example a Malta
265 * kernel may include support for GIC & CPU interrupt controller IPI
266 * IRQ domains, but if run on a system with no GIC & no MT ASE then
267 * neither will be supported or registered.
268 *
269 * We only have a problem if we're actually using multiple CPUs so fail
270 * loudly if that is the case. Otherwise simply return, skipping IPI
271 * setup, if we're running with only a single CPU.
272 */
273 if (!ipidomain) {
274 BUG_ON(num_present_cpus() > 1);
275 return 0;
276 }
277
278 virq = irq_reserve_ipi(ipidomain, mask);
279 BUG_ON(!virq);
280 if (!call_virq)
281 call_virq = virq;
282
283 virq = irq_reserve_ipi(ipidomain, mask);
284 BUG_ON(!virq);
285 if (!sched_virq)
286 sched_virq = virq;
287
288 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
289 int cpu;
290
291 for_each_cpu(cpu, mask) {
292 smp_ipi_init_one(call_virq + cpu, "IPI call",
293 ipi_call_interrupt);
294 smp_ipi_init_one(sched_virq + cpu, "IPI resched",
295 ipi_resched_interrupt);
296 }
297 } else {
298 smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
299 smp_ipi_init_one(sched_virq, "IPI resched",
300 ipi_resched_interrupt);
301 }
302
303 return 0;
304 }
305
mips_smp_ipi_free(const struct cpumask * mask)306 int mips_smp_ipi_free(const struct cpumask *mask)
307 {
308 struct irq_domain *ipidomain;
309 struct device_node *node;
310
311 node = of_irq_find_parent(of_root);
312 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
313
314 /*
315 * Some platforms have half DT setup. So if we found irq node but
316 * didn't find an ipidomain, try to search for one that is not in the
317 * DT.
318 */
319 if (node && !ipidomain)
320 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
321
322 BUG_ON(!ipidomain);
323
324 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
325 int cpu;
326
327 for_each_cpu(cpu, mask) {
328 free_irq(call_virq + cpu, NULL);
329 free_irq(sched_virq + cpu, NULL);
330 }
331 }
332 irq_destroy_ipi(call_virq, mask);
333 irq_destroy_ipi(sched_virq, mask);
334 return 0;
335 }
336
337
mips_smp_ipi_init(void)338 static int __init mips_smp_ipi_init(void)
339 {
340 if (num_possible_cpus() == 1)
341 return 0;
342
343 mips_smp_ipi_allocate(cpu_possible_mask);
344
345 call_desc = irq_to_desc(call_virq);
346 sched_desc = irq_to_desc(sched_virq);
347
348 return 0;
349 }
350 early_initcall(mips_smp_ipi_init);
351 #endif
352
353 /*
354 * First C code run on the secondary CPUs after being started up by
355 * the master.
356 */
start_secondary(void)357 asmlinkage void start_secondary(void)
358 {
359 unsigned int cpu = raw_smp_processor_id();
360
361 cpu_probe();
362 per_cpu_trap_init(false);
363 rcutree_report_cpu_starting(cpu);
364 mips_clockevent_init();
365 mp_ops->init_secondary();
366 cpu_report();
367 maar_init();
368
369 /*
370 * XXX parity protection should be folded in here when it's converted
371 * to an option instead of something based on .cputype
372 */
373
374 #ifdef CONFIG_HOTPLUG_PARALLEL
375 cpuhp_ap_sync_alive();
376 #endif
377 calibrate_delay();
378 cpu_data[cpu].udelay_val = loops_per_jiffy;
379
380 set_cpu_sibling_map(cpu);
381 set_cpu_core_map(cpu);
382
383 cpumask_set_cpu(cpu, &cpu_coherent_mask);
384 notify_cpu_starting(cpu);
385
386 #ifndef CONFIG_HOTPLUG_PARALLEL
387 /* Notify boot CPU that we're starting & ready to sync counters */
388 complete(&cpu_starting);
389 #endif
390
391 synchronise_count_slave(cpu);
392
393 /* The CPU is running and counters synchronised, now mark it online */
394 set_cpu_online(cpu, true);
395
396 calculate_cpu_foreign_map();
397
398 #ifndef CONFIG_HOTPLUG_PARALLEL
399 /*
400 * Notify boot CPU that we're up & online and it can safely return
401 * from __cpu_up
402 */
403 complete(&cpu_running);
404 #endif
405
406 /*
407 * irq will be enabled in ->smp_finish(), enabling it too early
408 * is dangerous.
409 */
410 WARN_ON_ONCE(!irqs_disabled());
411 mp_ops->smp_finish();
412
413 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
414 }
415
stop_this_cpu(void * dummy)416 static void stop_this_cpu(void *dummy)
417 {
418 /*
419 * Remove this CPU:
420 */
421
422 set_cpu_online(smp_processor_id(), false);
423 calculate_cpu_foreign_map();
424 local_irq_disable();
425 while (1);
426 }
427
smp_send_stop(void)428 void smp_send_stop(void)
429 {
430 smp_call_function(stop_this_cpu, NULL, 0);
431 }
432
smp_cpus_done(unsigned int max_cpus)433 void __init smp_cpus_done(unsigned int max_cpus)
434 {
435 }
436
437 /* called from main before smp_init() */
smp_prepare_cpus(unsigned int max_cpus)438 void __init smp_prepare_cpus(unsigned int max_cpus)
439 {
440 init_new_context(current, &init_mm);
441 current_thread_info()->cpu = 0;
442 mp_ops->prepare_cpus(max_cpus);
443 set_cpu_sibling_map(0);
444 set_cpu_core_map(0);
445 calculate_cpu_foreign_map();
446 #ifndef CONFIG_HOTPLUG_CPU
447 init_cpu_present(cpu_possible_mask);
448 #endif
449 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
450 }
451
452 /* preload SMP state for boot cpu */
smp_prepare_boot_cpu(void)453 void __init smp_prepare_boot_cpu(void)
454 {
455 if (mp_ops->prepare_boot_cpu)
456 mp_ops->prepare_boot_cpu();
457 set_cpu_possible(0, true);
458 set_cpu_online(0, true);
459 }
460
461 #ifdef CONFIG_HOTPLUG_PARALLEL
arch_cpuhp_kick_ap_alive(unsigned int cpu,struct task_struct * tidle)462 int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
463 {
464 return mp_ops->boot_secondary(cpu, tidle);
465 }
466 #else
__cpu_up(unsigned int cpu,struct task_struct * tidle)467 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
468 {
469 int err;
470
471 err = mp_ops->boot_secondary(cpu, tidle);
472 if (err)
473 return err;
474
475 /* Wait for CPU to start and be ready to sync counters */
476 if (!wait_for_completion_timeout(&cpu_starting,
477 msecs_to_jiffies(1000))) {
478 pr_crit("CPU%u: failed to start\n", cpu);
479 return -EIO;
480 }
481
482 /* Wait for CPU to finish startup & mark itself online before return */
483 wait_for_completion(&cpu_running);
484 return 0;
485 }
486 #endif
487
488 #ifdef CONFIG_PROFILING
489 /* Not really SMP stuff ... */
setup_profiling_timer(unsigned int multiplier)490 int setup_profiling_timer(unsigned int multiplier)
491 {
492 return 0;
493 }
494 #endif
495
flush_tlb_all_ipi(void * info)496 static void flush_tlb_all_ipi(void *info)
497 {
498 local_flush_tlb_all();
499 }
500
flush_tlb_all(void)501 void flush_tlb_all(void)
502 {
503 if (cpu_has_mmid) {
504 htw_stop();
505 ginvt_full();
506 sync_ginv();
507 instruction_hazard();
508 htw_start();
509 return;
510 }
511
512 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
513 }
514
flush_tlb_mm_ipi(void * mm)515 static void flush_tlb_mm_ipi(void *mm)
516 {
517 drop_mmu_context((struct mm_struct *)mm);
518 }
519
520 /*
521 * Special Variant of smp_call_function for use by TLB functions:
522 *
523 * o No return value
524 * o collapses to normal function call on UP kernels
525 * o collapses to normal function call on systems with a single shared
526 * primary cache.
527 */
smp_on_other_tlbs(void (* func)(void * info),void * info)528 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
529 {
530 smp_call_function(func, info, 1);
531 }
532
smp_on_each_tlb(void (* func)(void * info),void * info)533 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
534 {
535 preempt_disable();
536
537 smp_on_other_tlbs(func, info);
538 func(info);
539
540 preempt_enable();
541 }
542
543 /*
544 * The following tlb flush calls are invoked when old translations are
545 * being torn down, or pte attributes are changing. For single threaded
546 * address spaces, a new context is obtained on the current cpu, and tlb
547 * context on other cpus are invalidated to force a new context allocation
548 * at switch_mm time, should the mm ever be used on other cpus. For
549 * multithreaded address spaces, inter-CPU interrupts have to be sent.
550 * Another case where inter-CPU interrupts are required is when the target
551 * mm might be active on another cpu (eg debuggers doing the flushes on
552 * behalf of debugees, kswapd stealing pages from another process etc).
553 * Kanoj 07/00.
554 */
555
flush_tlb_mm(struct mm_struct * mm)556 void flush_tlb_mm(struct mm_struct *mm)
557 {
558 if (!mm)
559 return;
560
561 if (atomic_read(&mm->mm_users) == 0)
562 return; /* happens as a result of exit_mmap() */
563
564 preempt_disable();
565
566 if (cpu_has_mmid) {
567 /*
568 * No need to worry about other CPUs - the ginvt in
569 * drop_mmu_context() will be globalized.
570 */
571 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
572 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
573 } else {
574 unsigned int cpu;
575
576 for_each_online_cpu(cpu) {
577 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
578 set_cpu_context(cpu, mm, 0);
579 }
580 }
581 drop_mmu_context(mm);
582
583 preempt_enable();
584 }
585
586 struct flush_tlb_data {
587 struct vm_area_struct *vma;
588 unsigned long addr1;
589 unsigned long addr2;
590 };
591
flush_tlb_range_ipi(void * info)592 static void flush_tlb_range_ipi(void *info)
593 {
594 struct flush_tlb_data *fd = info;
595
596 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
597 }
598
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)599 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
600 {
601 struct mm_struct *mm = vma->vm_mm;
602 unsigned long addr;
603 u32 old_mmid;
604
605 preempt_disable();
606 if (cpu_has_mmid) {
607 htw_stop();
608 old_mmid = read_c0_memorymapid();
609 write_c0_memorymapid(cpu_asid(0, mm));
610 mtc0_tlbw_hazard();
611 addr = round_down(start, PAGE_SIZE * 2);
612 end = round_up(end, PAGE_SIZE * 2);
613 do {
614 ginvt_va_mmid(addr);
615 sync_ginv();
616 addr += PAGE_SIZE * 2;
617 } while (addr < end);
618 write_c0_memorymapid(old_mmid);
619 instruction_hazard();
620 htw_start();
621 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
622 struct flush_tlb_data fd = {
623 .vma = vma,
624 .addr1 = start,
625 .addr2 = end,
626 };
627
628 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
629 local_flush_tlb_range(vma, start, end);
630 } else {
631 unsigned int cpu;
632 int exec = vma->vm_flags & VM_EXEC;
633
634 for_each_online_cpu(cpu) {
635 /*
636 * flush_cache_range() will only fully flush icache if
637 * the VMA is executable, otherwise we must invalidate
638 * ASID without it appearing to has_valid_asid() as if
639 * mm has been completely unused by that CPU.
640 */
641 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
642 set_cpu_context(cpu, mm, !exec);
643 }
644 local_flush_tlb_range(vma, start, end);
645 }
646 preempt_enable();
647 }
648
flush_tlb_kernel_range_ipi(void * info)649 static void flush_tlb_kernel_range_ipi(void *info)
650 {
651 struct flush_tlb_data *fd = info;
652
653 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
654 }
655
flush_tlb_kernel_range(unsigned long start,unsigned long end)656 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
657 {
658 struct flush_tlb_data fd = {
659 .addr1 = start,
660 .addr2 = end,
661 };
662
663 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
664 }
665
flush_tlb_page_ipi(void * info)666 static void flush_tlb_page_ipi(void *info)
667 {
668 struct flush_tlb_data *fd = info;
669
670 local_flush_tlb_page(fd->vma, fd->addr1);
671 }
672
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)673 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
674 {
675 u32 old_mmid;
676
677 preempt_disable();
678 if (cpu_has_mmid) {
679 htw_stop();
680 old_mmid = read_c0_memorymapid();
681 write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
682 mtc0_tlbw_hazard();
683 ginvt_va_mmid(page);
684 sync_ginv();
685 write_c0_memorymapid(old_mmid);
686 instruction_hazard();
687 htw_start();
688 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
689 (current->mm != vma->vm_mm)) {
690 struct flush_tlb_data fd = {
691 .vma = vma,
692 .addr1 = page,
693 };
694
695 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
696 local_flush_tlb_page(vma, page);
697 } else {
698 unsigned int cpu;
699
700 for_each_online_cpu(cpu) {
701 /*
702 * flush_cache_page() only does partial flushes, so
703 * invalidate ASID without it appearing to
704 * has_valid_asid() as if mm has been completely unused
705 * by that CPU.
706 */
707 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
708 set_cpu_context(cpu, vma->vm_mm, 1);
709 }
710 local_flush_tlb_page(vma, page);
711 }
712 preempt_enable();
713 }
714
flush_tlb_one_ipi(void * info)715 static void flush_tlb_one_ipi(void *info)
716 {
717 unsigned long vaddr = (unsigned long) info;
718
719 local_flush_tlb_one(vaddr);
720 }
721
flush_tlb_one(unsigned long vaddr)722 void flush_tlb_one(unsigned long vaddr)
723 {
724 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
725 }
726
727 EXPORT_SYMBOL(flush_tlb_page);
728 EXPORT_SYMBOL(flush_tlb_one);
729
730 #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)731 void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
732 {
733 if (mp_ops->cleanup_dead_cpu)
734 mp_ops->cleanup_dead_cpu(cpu);
735 }
736 #endif
737
738 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
739
tick_broadcast_callee(void * info)740 static void tick_broadcast_callee(void *info)
741 {
742 tick_receive_broadcast();
743 }
744
745 static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
746 CSD_INIT(tick_broadcast_callee, NULL);
747
tick_broadcast(const struct cpumask * mask)748 void tick_broadcast(const struct cpumask *mask)
749 {
750 call_single_data_t *csd;
751 int cpu;
752
753 for_each_cpu(cpu, mask) {
754 csd = &per_cpu(tick_broadcast_csd, cpu);
755 smp_call_function_single_async(cpu, csd);
756 }
757 }
758
759 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
760