xref: /linux/mm/pgtable-generic.c (revision ff5599816711d2e67da2d7561fd36ac48debd433)
1 /*
2  *  mm/pgtable-generic.c
3  *
4  *  Generic pgtable methods declared in asm-generic/pgtable.h
5  *
6  *  Copyright (C) 2010  Linus Torvalds
7  */
8 
9 #include <linux/pagemap.h>
10 #include <asm/tlb.h>
11 #include <asm-generic/pgtable.h>
12 
13 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
14 /*
15  * Only sets the access flags (dirty, accessed), as well as write
16  * permission. Furthermore, we know it always gets set to a "more
17  * permissive" setting, which allows most architectures to optimize
18  * this. We return whether the PTE actually changed, which in turn
19  * instructs the caller to do things like update__mmu_cache.  This
20  * used to be done in the caller, but sparc needs minor faults to
21  * force that call on sun4c so we changed this macro slightly
22  */
23 int ptep_set_access_flags(struct vm_area_struct *vma,
24 			  unsigned long address, pte_t *ptep,
25 			  pte_t entry, int dirty)
26 {
27 	int changed = !pte_same(*ptep, entry);
28 	if (changed) {
29 		set_pte_at(vma->vm_mm, address, ptep, entry);
30 		flush_tlb_fix_spurious_fault(vma, address);
31 	}
32 	return changed;
33 }
34 #endif
35 
36 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
37 int pmdp_set_access_flags(struct vm_area_struct *vma,
38 			  unsigned long address, pmd_t *pmdp,
39 			  pmd_t entry, int dirty)
40 {
41 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
42 	int changed = !pmd_same(*pmdp, entry);
43 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
44 	if (changed) {
45 		set_pmd_at(vma->vm_mm, address, pmdp, entry);
46 		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
47 	}
48 	return changed;
49 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
50 	BUG();
51 	return 0;
52 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53 }
54 #endif
55 
56 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
57 int ptep_clear_flush_young(struct vm_area_struct *vma,
58 			   unsigned long address, pte_t *ptep)
59 {
60 	int young;
61 	young = ptep_test_and_clear_young(vma, address, ptep);
62 	if (young)
63 		flush_tlb_page(vma, address);
64 	return young;
65 }
66 #endif
67 
68 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
69 int pmdp_clear_flush_young(struct vm_area_struct *vma,
70 			   unsigned long address, pmd_t *pmdp)
71 {
72 	int young;
73 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
74 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
75 #else
76 	BUG();
77 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
78 	young = pmdp_test_and_clear_young(vma, address, pmdp);
79 	if (young)
80 		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
81 	return young;
82 }
83 #endif
84 
85 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
86 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
87 		       pte_t *ptep)
88 {
89 	pte_t pte;
90 	pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
91 	if (pte_accessible(pte))
92 		flush_tlb_page(vma, address);
93 	return pte;
94 }
95 #endif
96 
97 #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
98 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
99 pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
100 		       pmd_t *pmdp)
101 {
102 	pmd_t pmd;
103 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
104 	pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
105 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
106 	return pmd;
107 }
108 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
109 #endif
110 
111 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
112 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
113 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
114 			  pmd_t *pmdp)
115 {
116 	pmd_t pmd = pmd_mksplitting(*pmdp);
117 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
118 	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
119 	/* tlb flush only to serialize against gup-fast */
120 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
121 }
122 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
123 #endif
124 
125 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
126 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
127 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
128 				pgtable_t pgtable)
129 {
130 	assert_spin_locked(&mm->page_table_lock);
131 
132 	/* FIFO */
133 	if (!mm->pmd_huge_pte)
134 		INIT_LIST_HEAD(&pgtable->lru);
135 	else
136 		list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
137 	mm->pmd_huge_pte = pgtable;
138 }
139 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
140 #endif
141 
142 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
143 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
144 /* no "address" argument so destroys page coloring of some arch */
145 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
146 {
147 	pgtable_t pgtable;
148 
149 	assert_spin_locked(&mm->page_table_lock);
150 
151 	/* FIFO */
152 	pgtable = mm->pmd_huge_pte;
153 	if (list_empty(&pgtable->lru))
154 		mm->pmd_huge_pte = NULL;
155 	else {
156 		mm->pmd_huge_pte = list_entry(pgtable->lru.next,
157 					      struct page, lru);
158 		list_del(&pgtable->lru);
159 	}
160 	return pgtable;
161 }
162 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
163 #endif
164 
165 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
166 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
167 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
168 		     pmd_t *pmdp)
169 {
170 	set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
171 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
172 }
173 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
174 #endif
175