xref: /linux/arch/powerpc/mm/book3s64/hash_hugepage.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /*
2  * Copyright IBM Corporation, 2013
3  * Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2.1 of the GNU Lesser General Public License
7  * as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12  *
13  */
14 
15 /*
16  * PPC64 THP Support for hash based MMUs
17  */
18 #include <linux/mm.h>
19 #include <asm/machdep.h>
20 
21 int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
22 		    pmd_t *pmdp, unsigned long trap, unsigned long flags,
23 		    int ssize, unsigned int psize)
24 {
25 	unsigned int index, valid;
26 	unsigned char *hpte_slot_array;
27 	unsigned long rflags, pa, hidx;
28 	unsigned long old_pmd, new_pmd;
29 	int ret, lpsize = MMU_PAGE_16M;
30 	unsigned long vpn, hash, shift, slot;
31 
32 	/*
33 	 * atomically mark the linux large page PMD busy and dirty
34 	 */
35 	do {
36 		pmd_t pmd = READ_ONCE(*pmdp);
37 
38 		old_pmd = pmd_val(pmd);
39 		/* If PMD busy, retry the access */
40 		if (unlikely(old_pmd & H_PAGE_BUSY))
41 			return 0;
42 		/* If PMD permissions don't match, take page fault */
43 		if (unlikely(!check_pte_access(access, old_pmd)))
44 			return 1;
45 		/*
46 		 * Try to lock the PTE, add ACCESSED and DIRTY if it was
47 		 * a write access
48 		 */
49 		new_pmd = old_pmd | H_PAGE_BUSY | _PAGE_ACCESSED;
50 		if (access & _PAGE_WRITE)
51 			new_pmd |= _PAGE_DIRTY;
52 	} while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd)));
53 
54 	/*
55 	 * Make sure this is thp or devmap entry
56 	 */
57 	if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)))
58 		return 0;
59 
60 	rflags = htab_convert_pte_flags(new_pmd, flags);
61 
62 	/*
63 	 * THPs are only supported on platforms that can do mixed page size
64 	 * segments (MPSS) and all such platforms have coherent icache. Hence we
65 	 * don't need to do lazy icache flush (hash_page_do_lazy_icache()) on
66 	 * noexecute fault.
67 	 */
68 
69 	/*
70 	 * Find the slot index details for this ea, using base page size.
71 	 */
72 	shift = mmu_psize_defs[psize].shift;
73 	index = (ea & ~HPAGE_PMD_MASK) >> shift;
74 	BUG_ON(index >= PTE_FRAG_SIZE);
75 
76 	vpn = hpt_vpn(ea, vsid, ssize);
77 	hpte_slot_array = get_hpte_slot_array(pmdp);
78 	if (psize == MMU_PAGE_4K) {
79 		/*
80 		 * invalidate the old hpte entry if we have that mapped via 64K
81 		 * base page size. This is because demote_segment won't flush
82 		 * hash page table entries.
83 		 */
84 		if ((old_pmd & H_PAGE_HASHPTE) && !(old_pmd & H_PAGE_COMBO)) {
85 			flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
86 					    ssize, flags);
87 			/*
88 			 * With THP, we also clear the slot information with
89 			 * respect to all the 64K hash pte mapping the 16MB
90 			 * page. They are all invalid now. This make sure we
91 			 * don't find the slot valid when we fault with 4k
92 			 * base page size.
93 			 *
94 			 */
95 			memset(hpte_slot_array, 0, PTE_FRAG_SIZE);
96 		}
97 	}
98 
99 	valid = hpte_valid(hpte_slot_array, index);
100 	if (valid) {
101 		/* update the hpte bits */
102 		hash = hpt_hash(vpn, shift, ssize);
103 		hidx =  hpte_hash_index(hpte_slot_array, index);
104 		if (hidx & _PTEIDX_SECONDARY)
105 			hash = ~hash;
106 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
107 		slot += hidx & _PTEIDX_GROUP_IX;
108 
109 		ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn,
110 						 psize, lpsize, ssize, flags);
111 		/*
112 		 * We failed to update, try to insert a new entry.
113 		 */
114 		if (ret == -1) {
115 			/*
116 			 * large pte is marked busy, so we can be sure
117 			 * nobody is looking at hpte_slot_array. hence we can
118 			 * safely update this here.
119 			 */
120 			valid = 0;
121 			hpte_slot_array[index] = 0;
122 		}
123 	}
124 
125 	if (!valid) {
126 		unsigned long hpte_group;
127 
128 		hash = hpt_hash(vpn, shift, ssize);
129 		/* insert new entry */
130 		pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
131 		new_pmd |= H_PAGE_HASHPTE;
132 
133 repeat:
134 		hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
135 
136 		/* Insert into the hash table, primary slot */
137 		slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
138 						psize, lpsize, ssize);
139 		/*
140 		 * Primary is full, try the secondary
141 		 */
142 		if (unlikely(slot == -1)) {
143 			hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
144 			slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
145 							rflags,
146 							HPTE_V_SECONDARY,
147 							psize, lpsize, ssize);
148 			if (slot == -1) {
149 				if (mftb() & 0x1)
150 					hpte_group = (hash & htab_hash_mask) *
151 							HPTES_PER_GROUP;
152 
153 				mmu_hash_ops.hpte_remove(hpte_group);
154 				goto repeat;
155 			}
156 		}
157 		/*
158 		 * Hypervisor failure. Restore old pmd and return -1
159 		 * similar to __hash_page_*
160 		 */
161 		if (unlikely(slot == -2)) {
162 			*pmdp = __pmd(old_pmd);
163 			hash_failure_debug(ea, access, vsid, trap, ssize,
164 					   psize, lpsize, old_pmd);
165 			return -1;
166 		}
167 		/*
168 		 * large pte is marked busy, so we can be sure
169 		 * nobody is looking at hpte_slot_array. hence we can
170 		 * safely update this here.
171 		 */
172 		mark_hpte_slot_valid(hpte_slot_array, index, slot);
173 	}
174 	/*
175 	 * Mark the pte with H_PAGE_COMBO, if we are trying to hash it with
176 	 * base page size 4k.
177 	 */
178 	if (psize == MMU_PAGE_4K)
179 		new_pmd |= H_PAGE_COMBO;
180 	/*
181 	 * The hpte valid is stored in the pgtable whose address is in the
182 	 * second half of the PMD. Order this against clearing of the busy bit in
183 	 * huge pmd.
184 	 */
185 	smp_wmb();
186 	*pmdp = __pmd(new_pmd & ~H_PAGE_BUSY);
187 	return 0;
188 }
189