xref: /linux/arch/csky/mm/tlb.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #include <linux/init.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
8 
9 #include <asm/mmu_context.h>
10 #include <asm/setup.h>
11 
12 /*
13  * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
14  * 1VPN -> 2PFN
15  */
16 #define TLB_ENTRY_SIZE		(PAGE_SIZE * 2)
17 #define TLB_ENTRY_SIZE_MASK	(PAGE_MASK << 1)
18 
19 void flush_tlb_all(void)
20 {
21 	tlb_invalid_all();
22 }
23 
24 void flush_tlb_mm(struct mm_struct *mm)
25 {
26 #ifdef CONFIG_CPU_HAS_TLBI
27 	sync_is();
28 	asm volatile(
29 		"tlbi.asids %0	\n"
30 		"sync.i		\n"
31 		:
32 		: "r" (cpu_asid(mm))
33 		: "memory");
34 #else
35 	tlb_invalid_all();
36 #endif
37 }
38 
39 /*
40  * MMU operation regs only could invalid tlb entry in jtlb and we
41  * need change asid field to invalid I-utlb & D-utlb.
42  */
43 #ifndef CONFIG_CPU_HAS_TLBI
44 #define restore_asid_inv_utlb(oldpid, newpid) \
45 do { \
46 	if (oldpid == newpid) \
47 		write_mmu_entryhi(oldpid + 1); \
48 	write_mmu_entryhi(oldpid); \
49 } while (0)
50 #endif
51 
52 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
53 			unsigned long end)
54 {
55 	unsigned long newpid = cpu_asid(vma->vm_mm);
56 
57 	start &= TLB_ENTRY_SIZE_MASK;
58 	end   += TLB_ENTRY_SIZE - 1;
59 	end   &= TLB_ENTRY_SIZE_MASK;
60 
61 #ifdef CONFIG_CPU_HAS_TLBI
62 	sync_is();
63 	while (start < end) {
64 		asm volatile(
65 			"tlbi.vas %0	\n"
66 			:
67 			: "r" (start | newpid)
68 			: "memory");
69 
70 		start += 2*PAGE_SIZE;
71 	}
72 	asm volatile("sync.i\n");
73 #else
74 	{
75 	unsigned long flags, oldpid;
76 
77 	local_irq_save(flags);
78 	oldpid = read_mmu_entryhi() & ASID_MASK;
79 	while (start < end) {
80 		int idx;
81 
82 		write_mmu_entryhi(start | newpid);
83 		start += 2*PAGE_SIZE;
84 		tlb_probe();
85 		idx = read_mmu_index();
86 		if (idx >= 0)
87 			tlb_invalid_indexed();
88 	}
89 	restore_asid_inv_utlb(oldpid, newpid);
90 	local_irq_restore(flags);
91 	}
92 #endif
93 }
94 
95 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
96 {
97 	start &= TLB_ENTRY_SIZE_MASK;
98 	end   += TLB_ENTRY_SIZE - 1;
99 	end   &= TLB_ENTRY_SIZE_MASK;
100 
101 #ifdef CONFIG_CPU_HAS_TLBI
102 	sync_is();
103 	while (start < end) {
104 		asm volatile(
105 			"tlbi.vaas %0	\n"
106 			:
107 			: "r" (start)
108 			: "memory");
109 
110 		start += 2*PAGE_SIZE;
111 	}
112 	asm volatile("sync.i\n");
113 #else
114 	{
115 	unsigned long flags, oldpid;
116 
117 	local_irq_save(flags);
118 	oldpid = read_mmu_entryhi() & ASID_MASK;
119 	while (start < end) {
120 		int idx;
121 
122 		write_mmu_entryhi(start | oldpid);
123 		start += 2*PAGE_SIZE;
124 		tlb_probe();
125 		idx = read_mmu_index();
126 		if (idx >= 0)
127 			tlb_invalid_indexed();
128 	}
129 	restore_asid_inv_utlb(oldpid, oldpid);
130 	local_irq_restore(flags);
131 	}
132 #endif
133 }
134 
135 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
136 {
137 	int newpid = cpu_asid(vma->vm_mm);
138 
139 	addr &= TLB_ENTRY_SIZE_MASK;
140 
141 #ifdef CONFIG_CPU_HAS_TLBI
142 	sync_is();
143 	asm volatile(
144 		"tlbi.vas %0	\n"
145 		"sync.i		\n"
146 		:
147 		: "r" (addr | newpid)
148 		: "memory");
149 #else
150 	{
151 	int oldpid, idx;
152 	unsigned long flags;
153 
154 	local_irq_save(flags);
155 	oldpid = read_mmu_entryhi() & ASID_MASK;
156 	write_mmu_entryhi(addr | newpid);
157 	tlb_probe();
158 	idx = read_mmu_index();
159 	if (idx >= 0)
160 		tlb_invalid_indexed();
161 
162 	restore_asid_inv_utlb(oldpid, newpid);
163 	local_irq_restore(flags);
164 	}
165 #endif
166 }
167 
168 void flush_tlb_one(unsigned long addr)
169 {
170 	addr &= TLB_ENTRY_SIZE_MASK;
171 
172 #ifdef CONFIG_CPU_HAS_TLBI
173 	sync_is();
174 	asm volatile(
175 		"tlbi.vaas %0	\n"
176 		"sync.i		\n"
177 		:
178 		: "r" (addr)
179 		: "memory");
180 #else
181 	{
182 	int oldpid, idx;
183 	unsigned long flags;
184 
185 	local_irq_save(flags);
186 	oldpid = read_mmu_entryhi() & ASID_MASK;
187 	write_mmu_entryhi(addr | oldpid);
188 	tlb_probe();
189 	idx = read_mmu_index();
190 	if (idx >= 0)
191 		tlb_invalid_indexed();
192 
193 	restore_asid_inv_utlb(oldpid, oldpid);
194 	local_irq_restore(flags);
195 	}
196 #endif
197 }
198 EXPORT_SYMBOL(flush_tlb_one);
199