xref: /linux/arch/sh/mm/cache.c (revision dde5e3ffb770ef2854bbc32c51a365e932919e19)
1 /*
2  * arch/sh/mm/pg-mmu.c
3  *
4  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5  * Copyright (C) 2002 - 2009  Paul Mundt
6  *
7  * Released under the terms of the GNU GPL v2.0.
8  */
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/fs.h>
13 #include <linux/highmem.h>
14 #include <linux/module.h>
15 #include <asm/mmu_context.h>
16 #include <asm/cacheflush.h>
17 
18 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
19 		       unsigned long vaddr, void *dst, const void *src,
20 		       unsigned long len)
21 {
22 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
23 	    !test_bit(PG_dcache_dirty, &page->flags)) {
24 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
25 		memcpy(vto, src, len);
26 		kunmap_coherent();
27 	} else {
28 		memcpy(dst, src, len);
29 		if (boot_cpu_data.dcache.n_aliases)
30 			set_bit(PG_dcache_dirty, &page->flags);
31 	}
32 
33 	if (vma->vm_flags & VM_EXEC)
34 		flush_cache_page(vma, vaddr, page_to_pfn(page));
35 }
36 
37 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
38 			 unsigned long vaddr, void *dst, const void *src,
39 			 unsigned long len)
40 {
41 	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
42 	    !test_bit(PG_dcache_dirty, &page->flags)) {
43 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
44 		memcpy(dst, vfrom, len);
45 		kunmap_coherent();
46 	} else {
47 		memcpy(dst, src, len);
48 		if (boot_cpu_data.dcache.n_aliases)
49 			set_bit(PG_dcache_dirty, &page->flags);
50 	}
51 }
52 
53 void copy_user_highpage(struct page *to, struct page *from,
54 			unsigned long vaddr, struct vm_area_struct *vma)
55 {
56 	void *vfrom, *vto;
57 
58 	vto = kmap_atomic(to, KM_USER1);
59 
60 	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
61 	    !test_bit(PG_dcache_dirty, &from->flags)) {
62 		vfrom = kmap_coherent(from, vaddr);
63 		copy_page(vto, vfrom);
64 		kunmap_coherent();
65 	} else {
66 		vfrom = kmap_atomic(from, KM_USER0);
67 		copy_page(vto, vfrom);
68 		kunmap_atomic(vfrom, KM_USER0);
69 	}
70 
71 	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
72 		__flush_wback_region(vto, PAGE_SIZE);
73 
74 	kunmap_atomic(vto, KM_USER1);
75 	/* Make sure this page is cleared on other CPU's too before using it */
76 	smp_wmb();
77 }
78 EXPORT_SYMBOL(copy_user_highpage);
79 
80 void clear_user_highpage(struct page *page, unsigned long vaddr)
81 {
82 	void *kaddr = kmap_atomic(page, KM_USER0);
83 
84 	clear_page(kaddr);
85 
86 	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
87 		__flush_wback_region(kaddr, PAGE_SIZE);
88 
89 	kunmap_atomic(kaddr, KM_USER0);
90 }
91 EXPORT_SYMBOL(clear_user_highpage);
92 
93 void __update_cache(struct vm_area_struct *vma,
94 		    unsigned long address, pte_t pte)
95 {
96 	struct page *page;
97 	unsigned long pfn = pte_pfn(pte);
98 
99 	if (!boot_cpu_data.dcache.n_aliases)
100 		return;
101 
102 	page = pfn_to_page(pfn);
103 	if (pfn_valid(pfn) && page_mapping(page)) {
104 		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
105 		if (dirty) {
106 			unsigned long addr = (unsigned long)page_address(page);
107 
108 			if (pages_do_alias(addr, address & PAGE_MASK))
109 				__flush_wback_region((void *)addr, PAGE_SIZE);
110 		}
111 	}
112 }
113 
114 void __flush_anon_page(struct page *page, unsigned long vmaddr)
115 {
116 	unsigned long addr = (unsigned long) page_address(page);
117 
118 	if (pages_do_alias(addr, vmaddr)) {
119 		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
120 		    !test_bit(PG_dcache_dirty, &page->flags)) {
121 			void *kaddr;
122 
123 			kaddr = kmap_coherent(page, vmaddr);
124 			__flush_wback_region((void *)kaddr, PAGE_SIZE);
125 			kunmap_coherent();
126 		} else
127 			__flush_wback_region((void *)addr, PAGE_SIZE);
128 	}
129 }
130