xref: /linux/arch/arm/kernel/smp_tlb.c (revision f97b12cce6dea51880a6a89d4607c29c70a6a841)
1 /*
2  *  linux/arch/arm/kernel/smp_tlb.c
3  *
4  *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/preempt.h>
11 #include <linux/smp.h>
12 
13 #include <asm/smp_plat.h>
14 #include <asm/tlbflush.h>
15 
16 static void on_each_cpu_mask(void (*func)(void *), void *info, int wait,
17 	const struct cpumask *mask)
18 {
19 	preempt_disable();
20 
21 	smp_call_function_many(mask, func, info, wait);
22 	if (cpumask_test_cpu(smp_processor_id(), mask))
23 		func(info);
24 
25 	preempt_enable();
26 }
27 
28 /**********************************************************************/
29 
30 /*
31  * TLB operations
32  */
33 struct tlb_args {
34 	struct vm_area_struct *ta_vma;
35 	unsigned long ta_start;
36 	unsigned long ta_end;
37 };
38 
39 static inline void ipi_flush_tlb_all(void *ignored)
40 {
41 	local_flush_tlb_all();
42 }
43 
44 static inline void ipi_flush_tlb_mm(void *arg)
45 {
46 	struct mm_struct *mm = (struct mm_struct *)arg;
47 
48 	local_flush_tlb_mm(mm);
49 }
50 
51 static inline void ipi_flush_tlb_page(void *arg)
52 {
53 	struct tlb_args *ta = (struct tlb_args *)arg;
54 
55 	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
56 }
57 
58 static inline void ipi_flush_tlb_kernel_page(void *arg)
59 {
60 	struct tlb_args *ta = (struct tlb_args *)arg;
61 
62 	local_flush_tlb_kernel_page(ta->ta_start);
63 }
64 
65 static inline void ipi_flush_tlb_range(void *arg)
66 {
67 	struct tlb_args *ta = (struct tlb_args *)arg;
68 
69 	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
70 }
71 
72 static inline void ipi_flush_tlb_kernel_range(void *arg)
73 {
74 	struct tlb_args *ta = (struct tlb_args *)arg;
75 
76 	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
77 }
78 
79 void flush_tlb_all(void)
80 {
81 	if (tlb_ops_need_broadcast())
82 		on_each_cpu(ipi_flush_tlb_all, NULL, 1);
83 	else
84 		local_flush_tlb_all();
85 }
86 
87 void flush_tlb_mm(struct mm_struct *mm)
88 {
89 	if (tlb_ops_need_broadcast())
90 		on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
91 	else
92 		local_flush_tlb_mm(mm);
93 }
94 
95 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
96 {
97 	if (tlb_ops_need_broadcast()) {
98 		struct tlb_args ta;
99 		ta.ta_vma = vma;
100 		ta.ta_start = uaddr;
101 		on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
102 	} else
103 		local_flush_tlb_page(vma, uaddr);
104 }
105 
106 void flush_tlb_kernel_page(unsigned long kaddr)
107 {
108 	if (tlb_ops_need_broadcast()) {
109 		struct tlb_args ta;
110 		ta.ta_start = kaddr;
111 		on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
112 	} else
113 		local_flush_tlb_kernel_page(kaddr);
114 }
115 
116 void flush_tlb_range(struct vm_area_struct *vma,
117                      unsigned long start, unsigned long end)
118 {
119 	if (tlb_ops_need_broadcast()) {
120 		struct tlb_args ta;
121 		ta.ta_vma = vma;
122 		ta.ta_start = start;
123 		ta.ta_end = end;
124 		on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
125 	} else
126 		local_flush_tlb_range(vma, start, end);
127 }
128 
129 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
130 {
131 	if (tlb_ops_need_broadcast()) {
132 		struct tlb_args ta;
133 		ta.ta_start = start;
134 		ta.ta_end = end;
135 		on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
136 	} else
137 		local_flush_tlb_kernel_range(start, end);
138 }
139 
140