xref: /linux/arch/arm/mm/fault-armv.c (revision 60e13231561b3a4c5269bfa1ef6c0569ad6f28ec)
1 /*
2  *  linux/arch/arm/mm/fault-armv.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Modifications for ARM processor (c) 1995-2002 Russell King
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/bitops.h>
16 #include <linux/vmalloc.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/gfp.h>
20 
21 #include <asm/bugs.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cachetype.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
26 
27 #include "mm.h"
28 
29 static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
30 
31 #if __LINUX_ARM_ARCH__ < 6
32 /*
33  * We take the easy way out of this problem - we make the
34  * PTE uncacheable.  However, we leave the write buffer on.
35  *
36  * Note that the pte lock held when calling update_mmu_cache must also
37  * guard the pte (somewhere else in the same mm) that we modify here.
38  * Therefore those configurations which might call adjust_pte (those
39  * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
40  */
41 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
42 	unsigned long pfn, pte_t *ptep)
43 {
44 	pte_t entry = *ptep;
45 	int ret;
46 
47 	/*
48 	 * If this page is present, it's actually being shared.
49 	 */
50 	ret = pte_present(entry);
51 
52 	/*
53 	 * If this page isn't present, or is already setup to
54 	 * fault (ie, is old), we can safely ignore any issues.
55 	 */
56 	if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
57 		flush_cache_page(vma, address, pfn);
58 		outer_flush_range((pfn << PAGE_SHIFT),
59 				  (pfn << PAGE_SHIFT) + PAGE_SIZE);
60 		pte_val(entry) &= ~L_PTE_MT_MASK;
61 		pte_val(entry) |= shared_pte_mask;
62 		set_pte_at(vma->vm_mm, address, ptep, entry);
63 		flush_tlb_page(vma, address);
64 	}
65 
66 	return ret;
67 }
68 
69 #if USE_SPLIT_PTLOCKS
70 /*
71  * If we are using split PTE locks, then we need to take the page
72  * lock here.  Otherwise we are using shared mm->page_table_lock
73  * which is already locked, thus cannot take it.
74  */
75 static inline void do_pte_lock(spinlock_t *ptl)
76 {
77 	/*
78 	 * Use nested version here to indicate that we are already
79 	 * holding one similar spinlock.
80 	 */
81 	spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
82 }
83 
84 static inline void do_pte_unlock(spinlock_t *ptl)
85 {
86 	spin_unlock(ptl);
87 }
88 #else /* !USE_SPLIT_PTLOCKS */
89 static inline void do_pte_lock(spinlock_t *ptl) {}
90 static inline void do_pte_unlock(spinlock_t *ptl) {}
91 #endif /* USE_SPLIT_PTLOCKS */
92 
93 static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
94 	unsigned long pfn)
95 {
96 	spinlock_t *ptl;
97 	pgd_t *pgd;
98 	pud_t *pud;
99 	pmd_t *pmd;
100 	pte_t *pte;
101 	int ret;
102 
103 	pgd = pgd_offset(vma->vm_mm, address);
104 	if (pgd_none_or_clear_bad(pgd))
105 		return 0;
106 
107 	pud = pud_offset(pgd, address);
108 	if (pud_none_or_clear_bad(pud))
109 		return 0;
110 
111 	pmd = pmd_offset(pud, address);
112 	if (pmd_none_or_clear_bad(pmd))
113 		return 0;
114 
115 	/*
116 	 * This is called while another page table is mapped, so we
117 	 * must use the nested version.  This also means we need to
118 	 * open-code the spin-locking.
119 	 */
120 	ptl = pte_lockptr(vma->vm_mm, pmd);
121 	pte = pte_offset_map(pmd, address);
122 	do_pte_lock(ptl);
123 
124 	ret = do_adjust_pte(vma, address, pfn, pte);
125 
126 	do_pte_unlock(ptl);
127 	pte_unmap(pte);
128 
129 	return ret;
130 }
131 
132 static void
133 make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
134 	unsigned long addr, pte_t *ptep, unsigned long pfn)
135 {
136 	struct mm_struct *mm = vma->vm_mm;
137 	struct vm_area_struct *mpnt;
138 	struct prio_tree_iter iter;
139 	unsigned long offset;
140 	pgoff_t pgoff;
141 	int aliases = 0;
142 
143 	pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
144 
145 	/*
146 	 * If we have any shared mappings that are in the same mm
147 	 * space, then we need to handle them specially to maintain
148 	 * cache coherency.
149 	 */
150 	flush_dcache_mmap_lock(mapping);
151 	vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
152 		/*
153 		 * If this VMA is not in our MM, we can ignore it.
154 		 * Note that we intentionally mask out the VMA
155 		 * that we are fixing up.
156 		 */
157 		if (mpnt->vm_mm != mm || mpnt == vma)
158 			continue;
159 		if (!(mpnt->vm_flags & VM_MAYSHARE))
160 			continue;
161 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
162 		aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
163 	}
164 	flush_dcache_mmap_unlock(mapping);
165 	if (aliases)
166 		do_adjust_pte(vma, addr, pfn, ptep);
167 }
168 
169 /*
170  * Take care of architecture specific things when placing a new PTE into
171  * a page table, or changing an existing PTE.  Basically, there are two
172  * things that we need to take care of:
173  *
174  *  1. If PG_dcache_clean is not set for the page, we need to ensure
175  *     that any cache entries for the kernels virtual memory
176  *     range are written back to the page.
177  *  2. If we have multiple shared mappings of the same space in
178  *     an object, we need to deal with the cache aliasing issues.
179  *
180  * Note that the pte lock will be held.
181  */
182 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
183 	pte_t *ptep)
184 {
185 	unsigned long pfn = pte_pfn(*ptep);
186 	struct address_space *mapping;
187 	struct page *page;
188 
189 	if (!pfn_valid(pfn))
190 		return;
191 
192 	/*
193 	 * The zero page is never written to, so never has any dirty
194 	 * cache lines, and therefore never needs to be flushed.
195 	 */
196 	page = pfn_to_page(pfn);
197 	if (page == ZERO_PAGE(0))
198 		return;
199 
200 	mapping = page_mapping(page);
201 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
202 		__flush_dcache_page(mapping, page);
203 	if (mapping) {
204 		if (cache_is_vivt())
205 			make_coherent(mapping, vma, addr, ptep, pfn);
206 		else if (vma->vm_flags & VM_EXEC)
207 			__flush_icache_all();
208 	}
209 }
210 #endif	/* __LINUX_ARM_ARCH__ < 6 */
211 
212 /*
213  * Check whether the write buffer has physical address aliasing
214  * issues.  If it has, we need to avoid them for the case where
215  * we have several shared mappings of the same object in user
216  * space.
217  */
218 static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
219 {
220 	register unsigned long zero = 0, one = 1, val;
221 
222 	local_irq_disable();
223 	mb();
224 	*p1 = one;
225 	mb();
226 	*p2 = zero;
227 	mb();
228 	val = *p1;
229 	mb();
230 	local_irq_enable();
231 	return val != zero;
232 }
233 
234 void __init check_writebuffer_bugs(void)
235 {
236 	struct page *page;
237 	const char *reason;
238 	unsigned long v = 1;
239 
240 	printk(KERN_INFO "CPU: Testing write buffer coherency: ");
241 
242 	page = alloc_page(GFP_KERNEL);
243 	if (page) {
244 		unsigned long *p1, *p2;
245 		pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
246 					L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
247 
248 		p1 = vmap(&page, 1, VM_IOREMAP, prot);
249 		p2 = vmap(&page, 1, VM_IOREMAP, prot);
250 
251 		if (p1 && p2) {
252 			v = check_writebuffer(p1, p2);
253 			reason = "enabling work-around";
254 		} else {
255 			reason = "unable to map memory\n";
256 		}
257 
258 		vunmap(p1);
259 		vunmap(p2);
260 		put_page(page);
261 	} else {
262 		reason = "unable to grab page\n";
263 	}
264 
265 	if (v) {
266 		printk("failed, %s\n", reason);
267 		shared_pte_mask = L_PTE_MT_UNCACHED;
268 	} else {
269 		printk("ok\n");
270 	}
271 }
272