1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Page table allocation functions
4 *
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/page-states.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16
crst_table_alloc_noprof(struct mm_struct * mm)17 unsigned long *crst_table_alloc_noprof(struct mm_struct *mm)
18 {
19 gfp_t gfp = GFP_KERNEL_ACCOUNT;
20 struct ptdesc *ptdesc;
21 unsigned long *table;
22
23 if (mm == &init_mm)
24 gfp &= ~__GFP_ACCOUNT;
25 ptdesc = pagetable_alloc_noprof(gfp, CRST_ALLOC_ORDER);
26 if (!ptdesc)
27 return NULL;
28 table = ptdesc_address(ptdesc);
29 __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
30 return table;
31 }
32
crst_table_free(struct mm_struct * mm,unsigned long * table)33 void crst_table_free(struct mm_struct *mm, unsigned long *table)
34 {
35 if (!table)
36 return;
37 pagetable_free(virt_to_ptdesc(table));
38 }
39
__crst_table_upgrade(void * arg)40 static void __crst_table_upgrade(void *arg)
41 {
42 struct mm_struct *mm = arg;
43 struct ctlreg asce;
44
45 /* change all active ASCEs to avoid the creation of new TLBs */
46 if (current->active_mm == mm) {
47 asce.val = mm->context.asce;
48 get_lowcore()->user_asce = asce;
49 local_ctl_load(7, &asce);
50 if (!test_thread_flag(TIF_ASCE_PRIMARY))
51 local_ctl_load(1, &asce);
52 }
53 __tlb_flush_local();
54 }
55
crst_table_upgrade(struct mm_struct * mm,unsigned long end)56 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
57 {
58 unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
59 unsigned long asce_limit = mm->context.asce_limit;
60
61 mmap_assert_write_locked(mm);
62
63 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
64 VM_BUG_ON(asce_limit < _REGION2_SIZE);
65
66 if (end <= asce_limit)
67 return 0;
68
69 if (asce_limit == _REGION2_SIZE) {
70 p4d = crst_table_alloc(mm);
71 if (unlikely(!p4d))
72 goto err_p4d;
73 crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
74 pagetable_p4d_ctor(virt_to_ptdesc(p4d));
75 }
76 if (end > _REGION1_SIZE) {
77 pgd = crst_table_alloc(mm);
78 if (unlikely(!pgd))
79 goto err_pgd;
80 crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
81 pagetable_pgd_ctor(virt_to_ptdesc(pgd));
82 }
83
84 spin_lock_bh(&mm->page_table_lock);
85
86 if (p4d) {
87 __pgd = (unsigned long *) mm->pgd;
88 p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
89 mm->pgd = (pgd_t *) p4d;
90 mm->context.asce_limit = _REGION1_SIZE;
91 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
92 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
93 mm_inc_nr_puds(mm);
94 }
95 if (pgd) {
96 __pgd = (unsigned long *) mm->pgd;
97 pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
98 mm->pgd = (pgd_t *) pgd;
99 mm->context.asce_limit = TASK_SIZE_MAX;
100 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
101 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
102 }
103
104 spin_unlock_bh(&mm->page_table_lock);
105
106 on_each_cpu(__crst_table_upgrade, mm, 0);
107
108 return 0;
109
110 err_pgd:
111 pagetable_dtor(virt_to_ptdesc(p4d));
112 crst_table_free(mm, p4d);
113 err_p4d:
114 return -ENOMEM;
115 }
116
117 #ifdef CONFIG_PGSTE
118
page_table_alloc_pgste_noprof(struct mm_struct * mm)119 struct ptdesc *page_table_alloc_pgste_noprof(struct mm_struct *mm)
120 {
121 struct ptdesc *ptdesc;
122 u64 *table;
123
124 ptdesc = pagetable_alloc_noprof(GFP_KERNEL_ACCOUNT, 0);
125 if (ptdesc) {
126 table = (u64 *)ptdesc_address(ptdesc);
127 __arch_set_page_dat(table, 1);
128 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
129 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
130 }
131 return ptdesc;
132 }
133
page_table_free_pgste(struct ptdesc * ptdesc)134 void page_table_free_pgste(struct ptdesc *ptdesc)
135 {
136 pagetable_free(ptdesc);
137 }
138
139 #endif /* CONFIG_PGSTE */
140
page_table_alloc_noprof(struct mm_struct * mm)141 unsigned long *page_table_alloc_noprof(struct mm_struct *mm)
142 {
143 gfp_t gfp = GFP_KERNEL_ACCOUNT;
144 struct ptdesc *ptdesc;
145 unsigned long *table;
146
147 if (mm == &init_mm)
148 gfp &= ~__GFP_ACCOUNT;
149 ptdesc = pagetable_alloc_noprof(gfp, 0);
150 if (!ptdesc)
151 return NULL;
152 if (!pagetable_pte_ctor(mm, ptdesc)) {
153 pagetable_free(ptdesc);
154 return NULL;
155 }
156 table = ptdesc_address(ptdesc);
157 __arch_set_page_dat(table, 1);
158 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
159 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
160 return table;
161 }
162
page_table_free(struct mm_struct * mm,unsigned long * table)163 void page_table_free(struct mm_struct *mm, unsigned long *table)
164 {
165 struct ptdesc *ptdesc = virt_to_ptdesc(table);
166
167 pagetable_dtor_free(ptdesc);
168 }
169
170 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pte_free_now(struct rcu_head * head)171 static void pte_free_now(struct rcu_head *head)
172 {
173 struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
174
175 pagetable_dtor_free(ptdesc);
176 }
177
pte_free_defer(struct mm_struct * mm,pgtable_t pgtable)178 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
179 {
180 struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
181
182 call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
183 }
184 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
185
186 /*
187 * Base infrastructure required to generate basic asces, region, segment,
188 * and page tables that do not make use of enhanced features like EDAT1.
189 */
190
191 static struct kmem_cache *base_pgt_cache;
192
base_pgt_alloc(void)193 static unsigned long *base_pgt_alloc(void)
194 {
195 unsigned long *table;
196
197 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
198 if (table)
199 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
200 return table;
201 }
202
base_pgt_free(unsigned long * table)203 static void base_pgt_free(unsigned long *table)
204 {
205 kmem_cache_free(base_pgt_cache, table);
206 }
207
base_crst_alloc(unsigned long val)208 static unsigned long *base_crst_alloc(unsigned long val)
209 {
210 unsigned long *table;
211 struct ptdesc *ptdesc;
212
213 ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
214 if (!ptdesc)
215 return NULL;
216 table = ptdesc_address(ptdesc);
217 crst_table_init(table, val);
218 return table;
219 }
220
base_crst_free(unsigned long * table)221 static void base_crst_free(unsigned long *table)
222 {
223 if (!table)
224 return;
225 pagetable_free(virt_to_ptdesc(table));
226 }
227
228 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
229 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
230 unsigned long end) \
231 { \
232 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
233 \
234 return (next - 1) < (end - 1) ? next : end; \
235 }
236
BASE_ADDR_END_FUNC(page,PAGE_SIZE)237 BASE_ADDR_END_FUNC(page, PAGE_SIZE)
238 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
239 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
240 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
241 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
242
243 static inline unsigned long base_lra(unsigned long address)
244 {
245 unsigned long real;
246
247 asm volatile(
248 " lra %0,0(%1)\n"
249 : "=d" (real) : "a" (address) : "cc");
250 return real;
251 }
252
base_page_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)253 static int base_page_walk(unsigned long *origin, unsigned long addr,
254 unsigned long end, int alloc)
255 {
256 unsigned long *pte, next;
257
258 if (!alloc)
259 return 0;
260 pte = origin;
261 pte += (addr & _PAGE_INDEX) >> PAGE_SHIFT;
262 do {
263 next = base_page_addr_end(addr, end);
264 *pte = base_lra(addr);
265 } while (pte++, addr = next, addr < end);
266 return 0;
267 }
268
base_segment_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)269 static int base_segment_walk(unsigned long *origin, unsigned long addr,
270 unsigned long end, int alloc)
271 {
272 unsigned long *ste, next, *table;
273 int rc;
274
275 ste = origin;
276 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
277 do {
278 next = base_segment_addr_end(addr, end);
279 if (*ste & _SEGMENT_ENTRY_INVALID) {
280 if (!alloc)
281 continue;
282 table = base_pgt_alloc();
283 if (!table)
284 return -ENOMEM;
285 *ste = __pa(table) | _SEGMENT_ENTRY;
286 }
287 table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
288 rc = base_page_walk(table, addr, next, alloc);
289 if (rc)
290 return rc;
291 if (!alloc)
292 base_pgt_free(table);
293 cond_resched();
294 } while (ste++, addr = next, addr < end);
295 return 0;
296 }
297
base_region3_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)298 static int base_region3_walk(unsigned long *origin, unsigned long addr,
299 unsigned long end, int alloc)
300 {
301 unsigned long *rtte, next, *table;
302 int rc;
303
304 rtte = origin;
305 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
306 do {
307 next = base_region3_addr_end(addr, end);
308 if (*rtte & _REGION_ENTRY_INVALID) {
309 if (!alloc)
310 continue;
311 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
312 if (!table)
313 return -ENOMEM;
314 *rtte = __pa(table) | _REGION3_ENTRY;
315 }
316 table = __va(*rtte & _REGION_ENTRY_ORIGIN);
317 rc = base_segment_walk(table, addr, next, alloc);
318 if (rc)
319 return rc;
320 if (!alloc)
321 base_crst_free(table);
322 } while (rtte++, addr = next, addr < end);
323 return 0;
324 }
325
base_region2_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)326 static int base_region2_walk(unsigned long *origin, unsigned long addr,
327 unsigned long end, int alloc)
328 {
329 unsigned long *rste, next, *table;
330 int rc;
331
332 rste = origin;
333 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
334 do {
335 next = base_region2_addr_end(addr, end);
336 if (*rste & _REGION_ENTRY_INVALID) {
337 if (!alloc)
338 continue;
339 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
340 if (!table)
341 return -ENOMEM;
342 *rste = __pa(table) | _REGION2_ENTRY;
343 }
344 table = __va(*rste & _REGION_ENTRY_ORIGIN);
345 rc = base_region3_walk(table, addr, next, alloc);
346 if (rc)
347 return rc;
348 if (!alloc)
349 base_crst_free(table);
350 } while (rste++, addr = next, addr < end);
351 return 0;
352 }
353
base_region1_walk(unsigned long * origin,unsigned long addr,unsigned long end,int alloc)354 static int base_region1_walk(unsigned long *origin, unsigned long addr,
355 unsigned long end, int alloc)
356 {
357 unsigned long *rfte, next, *table;
358 int rc;
359
360 rfte = origin;
361 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
362 do {
363 next = base_region1_addr_end(addr, end);
364 if (*rfte & _REGION_ENTRY_INVALID) {
365 if (!alloc)
366 continue;
367 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
368 if (!table)
369 return -ENOMEM;
370 *rfte = __pa(table) | _REGION1_ENTRY;
371 }
372 table = __va(*rfte & _REGION_ENTRY_ORIGIN);
373 rc = base_region2_walk(table, addr, next, alloc);
374 if (rc)
375 return rc;
376 if (!alloc)
377 base_crst_free(table);
378 } while (rfte++, addr = next, addr < end);
379 return 0;
380 }
381
382 /**
383 * base_asce_free - free asce and tables returned from base_asce_alloc()
384 * @asce: asce to be freed
385 *
386 * Frees all region, segment, and page tables that were allocated with a
387 * corresponding base_asce_alloc() call.
388 */
base_asce_free(unsigned long asce)389 void base_asce_free(unsigned long asce)
390 {
391 unsigned long *table = __va(asce & _ASCE_ORIGIN);
392
393 if (!asce)
394 return;
395 switch (asce & _ASCE_TYPE_MASK) {
396 case _ASCE_TYPE_SEGMENT:
397 base_segment_walk(table, 0, _REGION3_SIZE, 0);
398 break;
399 case _ASCE_TYPE_REGION3:
400 base_region3_walk(table, 0, _REGION2_SIZE, 0);
401 break;
402 case _ASCE_TYPE_REGION2:
403 base_region2_walk(table, 0, _REGION1_SIZE, 0);
404 break;
405 case _ASCE_TYPE_REGION1:
406 base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
407 break;
408 }
409 base_crst_free(table);
410 }
411
base_pgt_cache_init(void)412 static int base_pgt_cache_init(void)
413 {
414 static DEFINE_MUTEX(base_pgt_cache_mutex);
415 unsigned long sz = _PAGE_TABLE_SIZE;
416
417 if (base_pgt_cache)
418 return 0;
419 mutex_lock(&base_pgt_cache_mutex);
420 if (!base_pgt_cache)
421 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
422 mutex_unlock(&base_pgt_cache_mutex);
423 return base_pgt_cache ? 0 : -ENOMEM;
424 }
425
426 /**
427 * base_asce_alloc - create kernel mapping without enhanced DAT features
428 * @addr: virtual start address of kernel mapping
429 * @num_pages: number of consecutive pages
430 *
431 * Generate an asce, including all required region, segment and page tables,
432 * that can be used to access the virtual kernel mapping. The difference is
433 * that the returned asce does not make use of any enhanced DAT features like
434 * e.g. large pages. This is required for some I/O functions that pass an
435 * asce, like e.g. some service call requests.
436 *
437 * Note: the returned asce may NEVER be attached to any cpu. It may only be
438 * used for I/O requests. tlb entries that might result because the
439 * asce was attached to a cpu won't be cleared.
440 */
base_asce_alloc(unsigned long addr,unsigned long num_pages)441 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
442 {
443 unsigned long asce, *table, end;
444 int rc;
445
446 if (base_pgt_cache_init())
447 return 0;
448 end = addr + num_pages * PAGE_SIZE;
449 if (end <= _REGION3_SIZE) {
450 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
451 if (!table)
452 return 0;
453 rc = base_segment_walk(table, addr, end, 1);
454 asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
455 } else if (end <= _REGION2_SIZE) {
456 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
457 if (!table)
458 return 0;
459 rc = base_region3_walk(table, addr, end, 1);
460 asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
461 } else if (end <= _REGION1_SIZE) {
462 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
463 if (!table)
464 return 0;
465 rc = base_region2_walk(table, addr, end, 1);
466 asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
467 } else {
468 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
469 if (!table)
470 return 0;
471 rc = base_region1_walk(table, addr, end, 1);
472 asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
473 }
474 if (rc) {
475 base_asce_free(asce);
476 asce = 0;
477 }
478 return asce;
479 }
480