xref: /linux/arch/powerpc/mm/book3s64/hash_hugepage.c (revision 02680c23d7b3febe45ea3d4f9818c2b2dc89020a)
1 /*
2  * Copyright IBM Corporation, 2013
3  * Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2.1 of the GNU Lesser General Public License
7  * as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12  *
13  */
14 
15 /*
16  * PPC64 THP Support for hash based MMUs
17  */
18 #include <linux/mm.h>
19 #include <asm/machdep.h>
20 
21 int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
22 		    pmd_t *pmdp, unsigned long trap, unsigned long flags,
23 		    int ssize, unsigned int psize)
24 {
25 	unsigned int index, valid;
26 	unsigned char *hpte_slot_array;
27 	unsigned long rflags, pa, hidx;
28 	unsigned long old_pmd, new_pmd;
29 	int ret, lpsize = MMU_PAGE_16M;
30 	unsigned long vpn, hash, shift, slot;
31 
32 	/*
33 	 * atomically mark the linux large page PMD busy and dirty
34 	 */
35 	do {
36 		pmd_t pmd = READ_ONCE(*pmdp);
37 
38 		old_pmd = pmd_val(pmd);
39 		/* If PMD busy, retry the access */
40 		if (unlikely(old_pmd & H_PAGE_BUSY))
41 			return 0;
42 		/* If PMD permissions don't match, take page fault */
43 		if (unlikely(!check_pte_access(access, old_pmd)))
44 			return 1;
45 		/*
46 		 * Try to lock the PTE, add ACCESSED and DIRTY if it was
47 		 * a write access
48 		 */
49 		new_pmd = old_pmd | H_PAGE_BUSY | _PAGE_ACCESSED;
50 		if (access & _PAGE_WRITE)
51 			new_pmd |= _PAGE_DIRTY;
52 	} while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd)));
53 
54 	/*
55 	 * Make sure this is thp or devmap entry
56 	 */
57 	if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)))
58 		return 0;
59 
60 	rflags = htab_convert_pte_flags(new_pmd, flags);
61 
62 #if 0
63 	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
64 
65 		/*
66 		 * No CPU has hugepages but lacks no execute, so we
67 		 * don't need to worry about that case
68 		 */
69 		rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
70 	}
71 #endif
72 	/*
73 	 * Find the slot index details for this ea, using base page size.
74 	 */
75 	shift = mmu_psize_defs[psize].shift;
76 	index = (ea & ~HPAGE_PMD_MASK) >> shift;
77 	BUG_ON(index >= PTE_FRAG_SIZE);
78 
79 	vpn = hpt_vpn(ea, vsid, ssize);
80 	hpte_slot_array = get_hpte_slot_array(pmdp);
81 	if (psize == MMU_PAGE_4K) {
82 		/*
83 		 * invalidate the old hpte entry if we have that mapped via 64K
84 		 * base page size. This is because demote_segment won't flush
85 		 * hash page table entries.
86 		 */
87 		if ((old_pmd & H_PAGE_HASHPTE) && !(old_pmd & H_PAGE_COMBO)) {
88 			flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
89 					    ssize, flags);
90 			/*
91 			 * With THP, we also clear the slot information with
92 			 * respect to all the 64K hash pte mapping the 16MB
93 			 * page. They are all invalid now. This make sure we
94 			 * don't find the slot valid when we fault with 4k
95 			 * base page size.
96 			 *
97 			 */
98 			memset(hpte_slot_array, 0, PTE_FRAG_SIZE);
99 		}
100 	}
101 
102 	valid = hpte_valid(hpte_slot_array, index);
103 	if (valid) {
104 		/* update the hpte bits */
105 		hash = hpt_hash(vpn, shift, ssize);
106 		hidx =  hpte_hash_index(hpte_slot_array, index);
107 		if (hidx & _PTEIDX_SECONDARY)
108 			hash = ~hash;
109 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
110 		slot += hidx & _PTEIDX_GROUP_IX;
111 
112 		ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn,
113 						 psize, lpsize, ssize, flags);
114 		/*
115 		 * We failed to update, try to insert a new entry.
116 		 */
117 		if (ret == -1) {
118 			/*
119 			 * large pte is marked busy, so we can be sure
120 			 * nobody is looking at hpte_slot_array. hence we can
121 			 * safely update this here.
122 			 */
123 			valid = 0;
124 			hpte_slot_array[index] = 0;
125 		}
126 	}
127 
128 	if (!valid) {
129 		unsigned long hpte_group;
130 
131 		hash = hpt_hash(vpn, shift, ssize);
132 		/* insert new entry */
133 		pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
134 		new_pmd |= H_PAGE_HASHPTE;
135 
136 repeat:
137 		hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
138 
139 		/* Insert into the hash table, primary slot */
140 		slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
141 						psize, lpsize, ssize);
142 		/*
143 		 * Primary is full, try the secondary
144 		 */
145 		if (unlikely(slot == -1)) {
146 			hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
147 			slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
148 							rflags,
149 							HPTE_V_SECONDARY,
150 							psize, lpsize, ssize);
151 			if (slot == -1) {
152 				if (mftb() & 0x1)
153 					hpte_group = (hash & htab_hash_mask) *
154 							HPTES_PER_GROUP;
155 
156 				mmu_hash_ops.hpte_remove(hpte_group);
157 				goto repeat;
158 			}
159 		}
160 		/*
161 		 * Hypervisor failure. Restore old pmd and return -1
162 		 * similar to __hash_page_*
163 		 */
164 		if (unlikely(slot == -2)) {
165 			*pmdp = __pmd(old_pmd);
166 			hash_failure_debug(ea, access, vsid, trap, ssize,
167 					   psize, lpsize, old_pmd);
168 			return -1;
169 		}
170 		/*
171 		 * large pte is marked busy, so we can be sure
172 		 * nobody is looking at hpte_slot_array. hence we can
173 		 * safely update this here.
174 		 */
175 		mark_hpte_slot_valid(hpte_slot_array, index, slot);
176 	}
177 	/*
178 	 * Mark the pte with H_PAGE_COMBO, if we are trying to hash it with
179 	 * base page size 4k.
180 	 */
181 	if (psize == MMU_PAGE_4K)
182 		new_pmd |= H_PAGE_COMBO;
183 	/*
184 	 * The hpte valid is stored in the pgtable whose address is in the
185 	 * second half of the PMD. Order this against clearing of the busy bit in
186 	 * huge pmd.
187 	 */
188 	smp_wmb();
189 	*pmdp = __pmd(new_pmd & ~H_PAGE_BUSY);
190 	return 0;
191 }
192