xref: /linux/arch/s390/mm/pgalloc.c (revision c02ce1735b150cf7c3b43790b48e23dcd17c0d46)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/page-states.h>
14 #include <asm/pgalloc.h>
15 #include <asm/gmap.h>
16 #include <asm/tlb.h>
17 #include <asm/tlbflush.h>
18 
19 #ifdef CONFIG_PGSTE
20 
21 int page_table_allocate_pgste = 0;
22 EXPORT_SYMBOL(page_table_allocate_pgste);
23 
24 static struct ctl_table page_table_sysctl[] = {
25 	{
26 		.procname	= "allocate_pgste",
27 		.data		= &page_table_allocate_pgste,
28 		.maxlen		= sizeof(int),
29 		.mode		= S_IRUGO | S_IWUSR,
30 		.proc_handler	= proc_dointvec_minmax,
31 		.extra1		= SYSCTL_ZERO,
32 		.extra2		= SYSCTL_ONE,
33 	},
34 };
35 
36 static int __init page_table_register_sysctl(void)
37 {
38 	return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
39 }
40 __initcall(page_table_register_sysctl);
41 
42 #endif /* CONFIG_PGSTE */
43 
44 unsigned long *crst_table_alloc(struct mm_struct *mm)
45 {
46 	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
47 	unsigned long *table;
48 
49 	if (!ptdesc)
50 		return NULL;
51 	table = ptdesc_to_virt(ptdesc);
52 	__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
53 	return table;
54 }
55 
56 void crst_table_free(struct mm_struct *mm, unsigned long *table)
57 {
58 	pagetable_free(virt_to_ptdesc(table));
59 }
60 
61 static void __crst_table_upgrade(void *arg)
62 {
63 	struct mm_struct *mm = arg;
64 
65 	/* change all active ASCEs to avoid the creation of new TLBs */
66 	if (current->active_mm == mm) {
67 		S390_lowcore.user_asce.val = mm->context.asce;
68 		local_ctl_load(7, &S390_lowcore.user_asce);
69 	}
70 	__tlb_flush_local();
71 }
72 
73 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
74 {
75 	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
76 	unsigned long asce_limit = mm->context.asce_limit;
77 
78 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
79 	VM_BUG_ON(asce_limit < _REGION2_SIZE);
80 
81 	if (end <= asce_limit)
82 		return 0;
83 
84 	if (asce_limit == _REGION2_SIZE) {
85 		p4d = crst_table_alloc(mm);
86 		if (unlikely(!p4d))
87 			goto err_p4d;
88 		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
89 	}
90 	if (end > _REGION1_SIZE) {
91 		pgd = crst_table_alloc(mm);
92 		if (unlikely(!pgd))
93 			goto err_pgd;
94 		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
95 	}
96 
97 	spin_lock_bh(&mm->page_table_lock);
98 
99 	/*
100 	 * This routine gets called with mmap_lock lock held and there is
101 	 * no reason to optimize for the case of otherwise. However, if
102 	 * that would ever change, the below check will let us know.
103 	 */
104 	VM_BUG_ON(asce_limit != mm->context.asce_limit);
105 
106 	if (p4d) {
107 		__pgd = (unsigned long *) mm->pgd;
108 		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
109 		mm->pgd = (pgd_t *) p4d;
110 		mm->context.asce_limit = _REGION1_SIZE;
111 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
112 			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
113 		mm_inc_nr_puds(mm);
114 	}
115 	if (pgd) {
116 		__pgd = (unsigned long *) mm->pgd;
117 		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
118 		mm->pgd = (pgd_t *) pgd;
119 		mm->context.asce_limit = TASK_SIZE_MAX;
120 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
121 			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
122 	}
123 
124 	spin_unlock_bh(&mm->page_table_lock);
125 
126 	on_each_cpu(__crst_table_upgrade, mm, 0);
127 
128 	return 0;
129 
130 err_pgd:
131 	crst_table_free(mm, p4d);
132 err_p4d:
133 	return -ENOMEM;
134 }
135 
136 #ifdef CONFIG_PGSTE
137 
138 struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm)
139 {
140 	struct ptdesc *ptdesc;
141 	u64 *table;
142 
143 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
144 	if (ptdesc) {
145 		table = (u64 *)ptdesc_to_virt(ptdesc);
146 		__arch_set_page_dat(table, 1);
147 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
148 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
149 	}
150 	return ptdesc;
151 }
152 
153 void page_table_free_pgste(struct ptdesc *ptdesc)
154 {
155 	pagetable_free(ptdesc);
156 }
157 
158 #endif /* CONFIG_PGSTE */
159 
160 unsigned long *page_table_alloc(struct mm_struct *mm)
161 {
162 	struct ptdesc *ptdesc;
163 	unsigned long *table;
164 
165 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
166 	if (!ptdesc)
167 		return NULL;
168 	if (!pagetable_pte_ctor(ptdesc)) {
169 		pagetable_free(ptdesc);
170 		return NULL;
171 	}
172 	table = ptdesc_to_virt(ptdesc);
173 	__arch_set_page_dat(table, 1);
174 	/* pt_list is used by gmap only */
175 	INIT_LIST_HEAD(&ptdesc->pt_list);
176 	memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
177 	memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
178 	return table;
179 }
180 
181 static void pagetable_pte_dtor_free(struct ptdesc *ptdesc)
182 {
183 	pagetable_pte_dtor(ptdesc);
184 	pagetable_free(ptdesc);
185 }
186 
187 void page_table_free(struct mm_struct *mm, unsigned long *table)
188 {
189 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
190 
191 	pagetable_pte_dtor_free(ptdesc);
192 }
193 
194 void __tlb_remove_table(void *table)
195 {
196 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
197 	struct page *page = ptdesc_page(ptdesc);
198 
199 	if (compound_order(page) == CRST_ALLOC_ORDER) {
200 		/* pmd, pud, or p4d */
201 		pagetable_free(ptdesc);
202 		return;
203 	}
204 	pagetable_pte_dtor_free(ptdesc);
205 }
206 
207 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
208 static void pte_free_now(struct rcu_head *head)
209 {
210 	struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
211 
212 	pagetable_pte_dtor_free(ptdesc);
213 }
214 
215 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
216 {
217 	struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
218 
219 	call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
220 	/*
221 	 * THPs are not allowed for KVM guests. Warn if pgste ever reaches here.
222 	 * Turn to the generic pte_free_defer() version once gmap is removed.
223 	 */
224 	WARN_ON_ONCE(mm_has_pgste(mm));
225 }
226 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
227 
228 /*
229  * Base infrastructure required to generate basic asces, region, segment,
230  * and page tables that do not make use of enhanced features like EDAT1.
231  */
232 
233 static struct kmem_cache *base_pgt_cache;
234 
235 static unsigned long *base_pgt_alloc(void)
236 {
237 	unsigned long *table;
238 
239 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
240 	if (table)
241 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
242 	return table;
243 }
244 
245 static void base_pgt_free(unsigned long *table)
246 {
247 	kmem_cache_free(base_pgt_cache, table);
248 }
249 
250 static unsigned long *base_crst_alloc(unsigned long val)
251 {
252 	unsigned long *table;
253 	struct ptdesc *ptdesc;
254 
255 	ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
256 	if (!ptdesc)
257 		return NULL;
258 	table = ptdesc_address(ptdesc);
259 	crst_table_init(table, val);
260 	return table;
261 }
262 
263 static void base_crst_free(unsigned long *table)
264 {
265 	pagetable_free(virt_to_ptdesc(table));
266 }
267 
268 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
269 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
270 						   unsigned long end)	\
271 {									\
272 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
273 									\
274 	return (next - 1) < (end - 1) ? next : end;			\
275 }
276 
277 BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
278 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
279 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
280 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
281 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
282 
283 static inline unsigned long base_lra(unsigned long address)
284 {
285 	unsigned long real;
286 
287 	asm volatile(
288 		"	lra	%0,0(%1)\n"
289 		: "=d" (real) : "a" (address) : "cc");
290 	return real;
291 }
292 
293 static int base_page_walk(unsigned long *origin, unsigned long addr,
294 			  unsigned long end, int alloc)
295 {
296 	unsigned long *pte, next;
297 
298 	if (!alloc)
299 		return 0;
300 	pte = origin;
301 	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
302 	do {
303 		next = base_page_addr_end(addr, end);
304 		*pte = base_lra(addr);
305 	} while (pte++, addr = next, addr < end);
306 	return 0;
307 }
308 
309 static int base_segment_walk(unsigned long *origin, unsigned long addr,
310 			     unsigned long end, int alloc)
311 {
312 	unsigned long *ste, next, *table;
313 	int rc;
314 
315 	ste = origin;
316 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
317 	do {
318 		next = base_segment_addr_end(addr, end);
319 		if (*ste & _SEGMENT_ENTRY_INVALID) {
320 			if (!alloc)
321 				continue;
322 			table = base_pgt_alloc();
323 			if (!table)
324 				return -ENOMEM;
325 			*ste = __pa(table) | _SEGMENT_ENTRY;
326 		}
327 		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
328 		rc = base_page_walk(table, addr, next, alloc);
329 		if (rc)
330 			return rc;
331 		if (!alloc)
332 			base_pgt_free(table);
333 		cond_resched();
334 	} while (ste++, addr = next, addr < end);
335 	return 0;
336 }
337 
338 static int base_region3_walk(unsigned long *origin, unsigned long addr,
339 			     unsigned long end, int alloc)
340 {
341 	unsigned long *rtte, next, *table;
342 	int rc;
343 
344 	rtte = origin;
345 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
346 	do {
347 		next = base_region3_addr_end(addr, end);
348 		if (*rtte & _REGION_ENTRY_INVALID) {
349 			if (!alloc)
350 				continue;
351 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
352 			if (!table)
353 				return -ENOMEM;
354 			*rtte = __pa(table) | _REGION3_ENTRY;
355 		}
356 		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
357 		rc = base_segment_walk(table, addr, next, alloc);
358 		if (rc)
359 			return rc;
360 		if (!alloc)
361 			base_crst_free(table);
362 	} while (rtte++, addr = next, addr < end);
363 	return 0;
364 }
365 
366 static int base_region2_walk(unsigned long *origin, unsigned long addr,
367 			     unsigned long end, int alloc)
368 {
369 	unsigned long *rste, next, *table;
370 	int rc;
371 
372 	rste = origin;
373 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
374 	do {
375 		next = base_region2_addr_end(addr, end);
376 		if (*rste & _REGION_ENTRY_INVALID) {
377 			if (!alloc)
378 				continue;
379 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
380 			if (!table)
381 				return -ENOMEM;
382 			*rste = __pa(table) | _REGION2_ENTRY;
383 		}
384 		table = __va(*rste & _REGION_ENTRY_ORIGIN);
385 		rc = base_region3_walk(table, addr, next, alloc);
386 		if (rc)
387 			return rc;
388 		if (!alloc)
389 			base_crst_free(table);
390 	} while (rste++, addr = next, addr < end);
391 	return 0;
392 }
393 
394 static int base_region1_walk(unsigned long *origin, unsigned long addr,
395 			     unsigned long end, int alloc)
396 {
397 	unsigned long *rfte, next, *table;
398 	int rc;
399 
400 	rfte = origin;
401 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
402 	do {
403 		next = base_region1_addr_end(addr, end);
404 		if (*rfte & _REGION_ENTRY_INVALID) {
405 			if (!alloc)
406 				continue;
407 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
408 			if (!table)
409 				return -ENOMEM;
410 			*rfte = __pa(table) | _REGION1_ENTRY;
411 		}
412 		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
413 		rc = base_region2_walk(table, addr, next, alloc);
414 		if (rc)
415 			return rc;
416 		if (!alloc)
417 			base_crst_free(table);
418 	} while (rfte++, addr = next, addr < end);
419 	return 0;
420 }
421 
422 /**
423  * base_asce_free - free asce and tables returned from base_asce_alloc()
424  * @asce: asce to be freed
425  *
426  * Frees all region, segment, and page tables that were allocated with a
427  * corresponding base_asce_alloc() call.
428  */
429 void base_asce_free(unsigned long asce)
430 {
431 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
432 
433 	if (!asce)
434 		return;
435 	switch (asce & _ASCE_TYPE_MASK) {
436 	case _ASCE_TYPE_SEGMENT:
437 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
438 		break;
439 	case _ASCE_TYPE_REGION3:
440 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
441 		break;
442 	case _ASCE_TYPE_REGION2:
443 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
444 		break;
445 	case _ASCE_TYPE_REGION1:
446 		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
447 		break;
448 	}
449 	base_crst_free(table);
450 }
451 
452 static int base_pgt_cache_init(void)
453 {
454 	static DEFINE_MUTEX(base_pgt_cache_mutex);
455 	unsigned long sz = _PAGE_TABLE_SIZE;
456 
457 	if (base_pgt_cache)
458 		return 0;
459 	mutex_lock(&base_pgt_cache_mutex);
460 	if (!base_pgt_cache)
461 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
462 	mutex_unlock(&base_pgt_cache_mutex);
463 	return base_pgt_cache ? 0 : -ENOMEM;
464 }
465 
466 /**
467  * base_asce_alloc - create kernel mapping without enhanced DAT features
468  * @addr: virtual start address of kernel mapping
469  * @num_pages: number of consecutive pages
470  *
471  * Generate an asce, including all required region, segment and page tables,
472  * that can be used to access the virtual kernel mapping. The difference is
473  * that the returned asce does not make use of any enhanced DAT features like
474  * e.g. large pages. This is required for some I/O functions that pass an
475  * asce, like e.g. some service call requests.
476  *
477  * Note: the returned asce may NEVER be attached to any cpu. It may only be
478  *	 used for I/O requests. tlb entries that might result because the
479  *	 asce was attached to a cpu won't be cleared.
480  */
481 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
482 {
483 	unsigned long asce, *table, end;
484 	int rc;
485 
486 	if (base_pgt_cache_init())
487 		return 0;
488 	end = addr + num_pages * PAGE_SIZE;
489 	if (end <= _REGION3_SIZE) {
490 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
491 		if (!table)
492 			return 0;
493 		rc = base_segment_walk(table, addr, end, 1);
494 		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
495 	} else if (end <= _REGION2_SIZE) {
496 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
497 		if (!table)
498 			return 0;
499 		rc = base_region3_walk(table, addr, end, 1);
500 		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
501 	} else if (end <= _REGION1_SIZE) {
502 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
503 		if (!table)
504 			return 0;
505 		rc = base_region2_walk(table, addr, end, 1);
506 		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
507 	} else {
508 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
509 		if (!table)
510 			return 0;
511 		rc = base_region1_walk(table, addr, end, 1);
512 		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
513 	}
514 	if (rc) {
515 		base_asce_free(asce);
516 		asce = 0;
517 	}
518 	return asce;
519 }
520