xref: /linux/arch/s390/mm/pgalloc.c (revision 7fe03f8ff55d33fe6398637f78a8620dd2a78b38)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/page-states.h>
14 #include <asm/pgalloc.h>
15 #include <asm/gmap.h>
16 #include <asm/tlb.h>
17 #include <asm/tlbflush.h>
18 
19 #ifdef CONFIG_PGSTE
20 
21 int page_table_allocate_pgste = 0;
22 EXPORT_SYMBOL(page_table_allocate_pgste);
23 
24 static const struct ctl_table page_table_sysctl[] = {
25 	{
26 		.procname	= "allocate_pgste",
27 		.data		= &page_table_allocate_pgste,
28 		.maxlen		= sizeof(int),
29 		.mode		= S_IRUGO | S_IWUSR,
30 		.proc_handler	= proc_dointvec_minmax,
31 		.extra1		= SYSCTL_ZERO,
32 		.extra2		= SYSCTL_ONE,
33 	},
34 };
35 
36 static int __init page_table_register_sysctl(void)
37 {
38 	return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
39 }
40 __initcall(page_table_register_sysctl);
41 
42 #endif /* CONFIG_PGSTE */
43 
44 unsigned long *crst_table_alloc(struct mm_struct *mm)
45 {
46 	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
47 	unsigned long *table;
48 
49 	if (!ptdesc)
50 		return NULL;
51 	table = ptdesc_to_virt(ptdesc);
52 	__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
53 	return table;
54 }
55 
56 void crst_table_free(struct mm_struct *mm, unsigned long *table)
57 {
58 	if (!table)
59 		return;
60 	pagetable_free(virt_to_ptdesc(table));
61 }
62 
63 static void __crst_table_upgrade(void *arg)
64 {
65 	struct mm_struct *mm = arg;
66 
67 	/* change all active ASCEs to avoid the creation of new TLBs */
68 	if (current->active_mm == mm) {
69 		get_lowcore()->user_asce.val = mm->context.asce;
70 		local_ctl_load(7, &get_lowcore()->user_asce);
71 	}
72 	__tlb_flush_local();
73 }
74 
75 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
76 {
77 	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
78 	unsigned long asce_limit = mm->context.asce_limit;
79 
80 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
81 	VM_BUG_ON(asce_limit < _REGION2_SIZE);
82 
83 	if (end <= asce_limit)
84 		return 0;
85 
86 	if (asce_limit == _REGION2_SIZE) {
87 		p4d = crst_table_alloc(mm);
88 		if (unlikely(!p4d))
89 			goto err_p4d;
90 		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
91 		pagetable_p4d_ctor(virt_to_ptdesc(p4d));
92 	}
93 	if (end > _REGION1_SIZE) {
94 		pgd = crst_table_alloc(mm);
95 		if (unlikely(!pgd))
96 			goto err_pgd;
97 		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
98 		pagetable_pgd_ctor(virt_to_ptdesc(pgd));
99 	}
100 
101 	spin_lock_bh(&mm->page_table_lock);
102 
103 	/*
104 	 * This routine gets called with mmap_lock lock held and there is
105 	 * no reason to optimize for the case of otherwise. However, if
106 	 * that would ever change, the below check will let us know.
107 	 */
108 	VM_BUG_ON(asce_limit != mm->context.asce_limit);
109 
110 	if (p4d) {
111 		__pgd = (unsigned long *) mm->pgd;
112 		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
113 		mm->pgd = (pgd_t *) p4d;
114 		mm->context.asce_limit = _REGION1_SIZE;
115 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
116 			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
117 		mm_inc_nr_puds(mm);
118 	}
119 	if (pgd) {
120 		__pgd = (unsigned long *) mm->pgd;
121 		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
122 		mm->pgd = (pgd_t *) pgd;
123 		mm->context.asce_limit = TASK_SIZE_MAX;
124 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
125 			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
126 	}
127 
128 	spin_unlock_bh(&mm->page_table_lock);
129 
130 	on_each_cpu(__crst_table_upgrade, mm, 0);
131 
132 	return 0;
133 
134 err_pgd:
135 	pagetable_dtor(virt_to_ptdesc(p4d));
136 	crst_table_free(mm, p4d);
137 err_p4d:
138 	return -ENOMEM;
139 }
140 
141 #ifdef CONFIG_PGSTE
142 
143 struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm)
144 {
145 	struct ptdesc *ptdesc;
146 	u64 *table;
147 
148 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
149 	if (ptdesc) {
150 		table = (u64 *)ptdesc_to_virt(ptdesc);
151 		__arch_set_page_dat(table, 1);
152 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
153 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
154 	}
155 	return ptdesc;
156 }
157 
158 void page_table_free_pgste(struct ptdesc *ptdesc)
159 {
160 	pagetable_free(ptdesc);
161 }
162 
163 #endif /* CONFIG_PGSTE */
164 
165 unsigned long *page_table_alloc(struct mm_struct *mm)
166 {
167 	struct ptdesc *ptdesc;
168 	unsigned long *table;
169 
170 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
171 	if (!ptdesc)
172 		return NULL;
173 	if (!pagetable_pte_ctor(ptdesc)) {
174 		pagetable_free(ptdesc);
175 		return NULL;
176 	}
177 	table = ptdesc_to_virt(ptdesc);
178 	__arch_set_page_dat(table, 1);
179 	/* pt_list is used by gmap only */
180 	INIT_LIST_HEAD(&ptdesc->pt_list);
181 	memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
182 	memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
183 	return table;
184 }
185 
186 void page_table_free(struct mm_struct *mm, unsigned long *table)
187 {
188 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
189 
190 	pagetable_dtor_free(ptdesc);
191 }
192 
193 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
194 static void pte_free_now(struct rcu_head *head)
195 {
196 	struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
197 
198 	pagetable_dtor_free(ptdesc);
199 }
200 
201 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
202 {
203 	struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
204 
205 	call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
206 	/*
207 	 * THPs are not allowed for KVM guests. Warn if pgste ever reaches here.
208 	 * Turn to the generic pte_free_defer() version once gmap is removed.
209 	 */
210 	WARN_ON_ONCE(mm_has_pgste(mm));
211 }
212 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
213 
214 /*
215  * Base infrastructure required to generate basic asces, region, segment,
216  * and page tables that do not make use of enhanced features like EDAT1.
217  */
218 
219 static struct kmem_cache *base_pgt_cache;
220 
221 static unsigned long *base_pgt_alloc(void)
222 {
223 	unsigned long *table;
224 
225 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
226 	if (table)
227 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
228 	return table;
229 }
230 
231 static void base_pgt_free(unsigned long *table)
232 {
233 	kmem_cache_free(base_pgt_cache, table);
234 }
235 
236 static unsigned long *base_crst_alloc(unsigned long val)
237 {
238 	unsigned long *table;
239 	struct ptdesc *ptdesc;
240 
241 	ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
242 	if (!ptdesc)
243 		return NULL;
244 	table = ptdesc_address(ptdesc);
245 	crst_table_init(table, val);
246 	return table;
247 }
248 
249 static void base_crst_free(unsigned long *table)
250 {
251 	if (!table)
252 		return;
253 	pagetable_free(virt_to_ptdesc(table));
254 }
255 
256 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
257 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
258 						   unsigned long end)	\
259 {									\
260 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
261 									\
262 	return (next - 1) < (end - 1) ? next : end;			\
263 }
264 
265 BASE_ADDR_END_FUNC(page,    PAGE_SIZE)
266 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
267 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
268 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
269 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
270 
271 static inline unsigned long base_lra(unsigned long address)
272 {
273 	unsigned long real;
274 
275 	asm volatile(
276 		"	lra	%0,0(%1)\n"
277 		: "=d" (real) : "a" (address) : "cc");
278 	return real;
279 }
280 
281 static int base_page_walk(unsigned long *origin, unsigned long addr,
282 			  unsigned long end, int alloc)
283 {
284 	unsigned long *pte, next;
285 
286 	if (!alloc)
287 		return 0;
288 	pte = origin;
289 	pte += (addr & _PAGE_INDEX) >> PAGE_SHIFT;
290 	do {
291 		next = base_page_addr_end(addr, end);
292 		*pte = base_lra(addr);
293 	} while (pte++, addr = next, addr < end);
294 	return 0;
295 }
296 
297 static int base_segment_walk(unsigned long *origin, unsigned long addr,
298 			     unsigned long end, int alloc)
299 {
300 	unsigned long *ste, next, *table;
301 	int rc;
302 
303 	ste = origin;
304 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
305 	do {
306 		next = base_segment_addr_end(addr, end);
307 		if (*ste & _SEGMENT_ENTRY_INVALID) {
308 			if (!alloc)
309 				continue;
310 			table = base_pgt_alloc();
311 			if (!table)
312 				return -ENOMEM;
313 			*ste = __pa(table) | _SEGMENT_ENTRY;
314 		}
315 		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
316 		rc = base_page_walk(table, addr, next, alloc);
317 		if (rc)
318 			return rc;
319 		if (!alloc)
320 			base_pgt_free(table);
321 		cond_resched();
322 	} while (ste++, addr = next, addr < end);
323 	return 0;
324 }
325 
326 static int base_region3_walk(unsigned long *origin, unsigned long addr,
327 			     unsigned long end, int alloc)
328 {
329 	unsigned long *rtte, next, *table;
330 	int rc;
331 
332 	rtte = origin;
333 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
334 	do {
335 		next = base_region3_addr_end(addr, end);
336 		if (*rtte & _REGION_ENTRY_INVALID) {
337 			if (!alloc)
338 				continue;
339 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
340 			if (!table)
341 				return -ENOMEM;
342 			*rtte = __pa(table) | _REGION3_ENTRY;
343 		}
344 		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
345 		rc = base_segment_walk(table, addr, next, alloc);
346 		if (rc)
347 			return rc;
348 		if (!alloc)
349 			base_crst_free(table);
350 	} while (rtte++, addr = next, addr < end);
351 	return 0;
352 }
353 
354 static int base_region2_walk(unsigned long *origin, unsigned long addr,
355 			     unsigned long end, int alloc)
356 {
357 	unsigned long *rste, next, *table;
358 	int rc;
359 
360 	rste = origin;
361 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
362 	do {
363 		next = base_region2_addr_end(addr, end);
364 		if (*rste & _REGION_ENTRY_INVALID) {
365 			if (!alloc)
366 				continue;
367 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
368 			if (!table)
369 				return -ENOMEM;
370 			*rste = __pa(table) | _REGION2_ENTRY;
371 		}
372 		table = __va(*rste & _REGION_ENTRY_ORIGIN);
373 		rc = base_region3_walk(table, addr, next, alloc);
374 		if (rc)
375 			return rc;
376 		if (!alloc)
377 			base_crst_free(table);
378 	} while (rste++, addr = next, addr < end);
379 	return 0;
380 }
381 
382 static int base_region1_walk(unsigned long *origin, unsigned long addr,
383 			     unsigned long end, int alloc)
384 {
385 	unsigned long *rfte, next, *table;
386 	int rc;
387 
388 	rfte = origin;
389 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
390 	do {
391 		next = base_region1_addr_end(addr, end);
392 		if (*rfte & _REGION_ENTRY_INVALID) {
393 			if (!alloc)
394 				continue;
395 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
396 			if (!table)
397 				return -ENOMEM;
398 			*rfte = __pa(table) | _REGION1_ENTRY;
399 		}
400 		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
401 		rc = base_region2_walk(table, addr, next, alloc);
402 		if (rc)
403 			return rc;
404 		if (!alloc)
405 			base_crst_free(table);
406 	} while (rfte++, addr = next, addr < end);
407 	return 0;
408 }
409 
410 /**
411  * base_asce_free - free asce and tables returned from base_asce_alloc()
412  * @asce: asce to be freed
413  *
414  * Frees all region, segment, and page tables that were allocated with a
415  * corresponding base_asce_alloc() call.
416  */
417 void base_asce_free(unsigned long asce)
418 {
419 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
420 
421 	if (!asce)
422 		return;
423 	switch (asce & _ASCE_TYPE_MASK) {
424 	case _ASCE_TYPE_SEGMENT:
425 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
426 		break;
427 	case _ASCE_TYPE_REGION3:
428 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
429 		break;
430 	case _ASCE_TYPE_REGION2:
431 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
432 		break;
433 	case _ASCE_TYPE_REGION1:
434 		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
435 		break;
436 	}
437 	base_crst_free(table);
438 }
439 
440 static int base_pgt_cache_init(void)
441 {
442 	static DEFINE_MUTEX(base_pgt_cache_mutex);
443 	unsigned long sz = _PAGE_TABLE_SIZE;
444 
445 	if (base_pgt_cache)
446 		return 0;
447 	mutex_lock(&base_pgt_cache_mutex);
448 	if (!base_pgt_cache)
449 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
450 	mutex_unlock(&base_pgt_cache_mutex);
451 	return base_pgt_cache ? 0 : -ENOMEM;
452 }
453 
454 /**
455  * base_asce_alloc - create kernel mapping without enhanced DAT features
456  * @addr: virtual start address of kernel mapping
457  * @num_pages: number of consecutive pages
458  *
459  * Generate an asce, including all required region, segment and page tables,
460  * that can be used to access the virtual kernel mapping. The difference is
461  * that the returned asce does not make use of any enhanced DAT features like
462  * e.g. large pages. This is required for some I/O functions that pass an
463  * asce, like e.g. some service call requests.
464  *
465  * Note: the returned asce may NEVER be attached to any cpu. It may only be
466  *	 used for I/O requests. tlb entries that might result because the
467  *	 asce was attached to a cpu won't be cleared.
468  */
469 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
470 {
471 	unsigned long asce, *table, end;
472 	int rc;
473 
474 	if (base_pgt_cache_init())
475 		return 0;
476 	end = addr + num_pages * PAGE_SIZE;
477 	if (end <= _REGION3_SIZE) {
478 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
479 		if (!table)
480 			return 0;
481 		rc = base_segment_walk(table, addr, end, 1);
482 		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
483 	} else if (end <= _REGION2_SIZE) {
484 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
485 		if (!table)
486 			return 0;
487 		rc = base_region3_walk(table, addr, end, 1);
488 		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
489 	} else if (end <= _REGION1_SIZE) {
490 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
491 		if (!table)
492 			return 0;
493 		rc = base_region2_walk(table, addr, end, 1);
494 		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
495 	} else {
496 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
497 		if (!table)
498 			return 0;
499 		rc = base_region1_walk(table, addr, end, 1);
500 		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
501 	}
502 	if (rc) {
503 		base_asce_free(asce);
504 		asce = 0;
505 	}
506 	return asce;
507 }
508