xref: /linux/arch/s390/mm/pgalloc.c (revision 46ba4d0bfcc1cafa2336cab03c02124ba0da0552)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/page-states.h>
14 #include <asm/pgalloc.h>
15 #include <asm/gmap.h>
16 #include <asm/tlb.h>
17 #include <asm/tlbflush.h>
18 
19 unsigned long *crst_table_alloc(struct mm_struct *mm)
20 {
21 	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
22 	unsigned long *table;
23 
24 	if (!ptdesc)
25 		return NULL;
26 	table = ptdesc_to_virt(ptdesc);
27 	__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
28 	return table;
29 }
30 
31 void crst_table_free(struct mm_struct *mm, unsigned long *table)
32 {
33 	if (!table)
34 		return;
35 	pagetable_free(virt_to_ptdesc(table));
36 }
37 
38 static void __crst_table_upgrade(void *arg)
39 {
40 	struct mm_struct *mm = arg;
41 
42 	/* change all active ASCEs to avoid the creation of new TLBs */
43 	if (current->active_mm == mm) {
44 		get_lowcore()->user_asce.val = mm->context.asce;
45 		local_ctl_load(7, &get_lowcore()->user_asce);
46 	}
47 	__tlb_flush_local();
48 }
49 
50 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
51 {
52 	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
53 	unsigned long asce_limit = mm->context.asce_limit;
54 
55 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
56 	VM_BUG_ON(asce_limit < _REGION2_SIZE);
57 
58 	if (end <= asce_limit)
59 		return 0;
60 
61 	if (asce_limit == _REGION2_SIZE) {
62 		p4d = crst_table_alloc(mm);
63 		if (unlikely(!p4d))
64 			goto err_p4d;
65 		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
66 		pagetable_p4d_ctor(virt_to_ptdesc(p4d));
67 	}
68 	if (end > _REGION1_SIZE) {
69 		pgd = crst_table_alloc(mm);
70 		if (unlikely(!pgd))
71 			goto err_pgd;
72 		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
73 		pagetable_pgd_ctor(virt_to_ptdesc(pgd));
74 	}
75 
76 	spin_lock_bh(&mm->page_table_lock);
77 
78 	/*
79 	 * This routine gets called with mmap_lock lock held and there is
80 	 * no reason to optimize for the case of otherwise. However, if
81 	 * that would ever change, the below check will let us know.
82 	 */
83 	VM_BUG_ON(asce_limit != mm->context.asce_limit);
84 
85 	if (p4d) {
86 		__pgd = (unsigned long *) mm->pgd;
87 		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
88 		mm->pgd = (pgd_t *) p4d;
89 		mm->context.asce_limit = _REGION1_SIZE;
90 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
91 			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
92 		mm_inc_nr_puds(mm);
93 	}
94 	if (pgd) {
95 		__pgd = (unsigned long *) mm->pgd;
96 		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
97 		mm->pgd = (pgd_t *) pgd;
98 		mm->context.asce_limit = TASK_SIZE_MAX;
99 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
100 			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
101 	}
102 
103 	spin_unlock_bh(&mm->page_table_lock);
104 
105 	on_each_cpu(__crst_table_upgrade, mm, 0);
106 
107 	return 0;
108 
109 err_pgd:
110 	pagetable_dtor(virt_to_ptdesc(p4d));
111 	crst_table_free(mm, p4d);
112 err_p4d:
113 	return -ENOMEM;
114 }
115 
116 #ifdef CONFIG_PGSTE
117 
118 struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm)
119 {
120 	struct ptdesc *ptdesc;
121 	u64 *table;
122 
123 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
124 	if (ptdesc) {
125 		table = (u64 *)ptdesc_to_virt(ptdesc);
126 		__arch_set_page_dat(table, 1);
127 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
128 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
129 	}
130 	return ptdesc;
131 }
132 
133 void page_table_free_pgste(struct ptdesc *ptdesc)
134 {
135 	pagetable_free(ptdesc);
136 }
137 
138 #endif /* CONFIG_PGSTE */
139 
140 unsigned long *page_table_alloc(struct mm_struct *mm)
141 {
142 	struct ptdesc *ptdesc;
143 	unsigned long *table;
144 
145 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
146 	if (!ptdesc)
147 		return NULL;
148 	if (!pagetable_pte_ctor(ptdesc)) {
149 		pagetable_free(ptdesc);
150 		return NULL;
151 	}
152 	table = ptdesc_to_virt(ptdesc);
153 	__arch_set_page_dat(table, 1);
154 	memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
155 	memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
156 	return table;
157 }
158 
159 void page_table_free(struct mm_struct *mm, unsigned long *table)
160 {
161 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
162 
163 	pagetable_dtor_free(ptdesc);
164 }
165 
166 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
167 static void pte_free_now(struct rcu_head *head)
168 {
169 	struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
170 
171 	pagetable_dtor_free(ptdesc);
172 }
173 
174 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
175 {
176 	struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
177 
178 	call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
179 	/*
180 	 * THPs are not allowed for KVM guests. Warn if pgste ever reaches here.
181 	 * Turn to the generic pte_free_defer() version once gmap is removed.
182 	 */
183 	WARN_ON_ONCE(mm_has_pgste(mm));
184 }
185 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
186 
187 /*
188  * Base infrastructure required to generate basic asces, region, segment,
189  * and page tables that do not make use of enhanced features like EDAT1.
190  */
191 
192 static struct kmem_cache *base_pgt_cache;
193 
194 static unsigned long *base_pgt_alloc(void)
195 {
196 	unsigned long *table;
197 
198 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
199 	if (table)
200 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
201 	return table;
202 }
203 
204 static void base_pgt_free(unsigned long *table)
205 {
206 	kmem_cache_free(base_pgt_cache, table);
207 }
208 
209 static unsigned long *base_crst_alloc(unsigned long val)
210 {
211 	unsigned long *table;
212 	struct ptdesc *ptdesc;
213 
214 	ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
215 	if (!ptdesc)
216 		return NULL;
217 	table = ptdesc_address(ptdesc);
218 	crst_table_init(table, val);
219 	return table;
220 }
221 
222 static void base_crst_free(unsigned long *table)
223 {
224 	if (!table)
225 		return;
226 	pagetable_free(virt_to_ptdesc(table));
227 }
228 
229 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
230 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
231 						   unsigned long end)	\
232 {									\
233 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
234 									\
235 	return (next - 1) < (end - 1) ? next : end;			\
236 }
237 
238 BASE_ADDR_END_FUNC(page,    PAGE_SIZE)
239 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
240 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
241 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
242 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
243 
244 static inline unsigned long base_lra(unsigned long address)
245 {
246 	unsigned long real;
247 
248 	asm volatile(
249 		"	lra	%0,0(%1)\n"
250 		: "=d" (real) : "a" (address) : "cc");
251 	return real;
252 }
253 
254 static int base_page_walk(unsigned long *origin, unsigned long addr,
255 			  unsigned long end, int alloc)
256 {
257 	unsigned long *pte, next;
258 
259 	if (!alloc)
260 		return 0;
261 	pte = origin;
262 	pte += (addr & _PAGE_INDEX) >> PAGE_SHIFT;
263 	do {
264 		next = base_page_addr_end(addr, end);
265 		*pte = base_lra(addr);
266 	} while (pte++, addr = next, addr < end);
267 	return 0;
268 }
269 
270 static int base_segment_walk(unsigned long *origin, unsigned long addr,
271 			     unsigned long end, int alloc)
272 {
273 	unsigned long *ste, next, *table;
274 	int rc;
275 
276 	ste = origin;
277 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
278 	do {
279 		next = base_segment_addr_end(addr, end);
280 		if (*ste & _SEGMENT_ENTRY_INVALID) {
281 			if (!alloc)
282 				continue;
283 			table = base_pgt_alloc();
284 			if (!table)
285 				return -ENOMEM;
286 			*ste = __pa(table) | _SEGMENT_ENTRY;
287 		}
288 		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
289 		rc = base_page_walk(table, addr, next, alloc);
290 		if (rc)
291 			return rc;
292 		if (!alloc)
293 			base_pgt_free(table);
294 		cond_resched();
295 	} while (ste++, addr = next, addr < end);
296 	return 0;
297 }
298 
299 static int base_region3_walk(unsigned long *origin, unsigned long addr,
300 			     unsigned long end, int alloc)
301 {
302 	unsigned long *rtte, next, *table;
303 	int rc;
304 
305 	rtte = origin;
306 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
307 	do {
308 		next = base_region3_addr_end(addr, end);
309 		if (*rtte & _REGION_ENTRY_INVALID) {
310 			if (!alloc)
311 				continue;
312 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
313 			if (!table)
314 				return -ENOMEM;
315 			*rtte = __pa(table) | _REGION3_ENTRY;
316 		}
317 		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
318 		rc = base_segment_walk(table, addr, next, alloc);
319 		if (rc)
320 			return rc;
321 		if (!alloc)
322 			base_crst_free(table);
323 	} while (rtte++, addr = next, addr < end);
324 	return 0;
325 }
326 
327 static int base_region2_walk(unsigned long *origin, unsigned long addr,
328 			     unsigned long end, int alloc)
329 {
330 	unsigned long *rste, next, *table;
331 	int rc;
332 
333 	rste = origin;
334 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
335 	do {
336 		next = base_region2_addr_end(addr, end);
337 		if (*rste & _REGION_ENTRY_INVALID) {
338 			if (!alloc)
339 				continue;
340 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
341 			if (!table)
342 				return -ENOMEM;
343 			*rste = __pa(table) | _REGION2_ENTRY;
344 		}
345 		table = __va(*rste & _REGION_ENTRY_ORIGIN);
346 		rc = base_region3_walk(table, addr, next, alloc);
347 		if (rc)
348 			return rc;
349 		if (!alloc)
350 			base_crst_free(table);
351 	} while (rste++, addr = next, addr < end);
352 	return 0;
353 }
354 
355 static int base_region1_walk(unsigned long *origin, unsigned long addr,
356 			     unsigned long end, int alloc)
357 {
358 	unsigned long *rfte, next, *table;
359 	int rc;
360 
361 	rfte = origin;
362 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
363 	do {
364 		next = base_region1_addr_end(addr, end);
365 		if (*rfte & _REGION_ENTRY_INVALID) {
366 			if (!alloc)
367 				continue;
368 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
369 			if (!table)
370 				return -ENOMEM;
371 			*rfte = __pa(table) | _REGION1_ENTRY;
372 		}
373 		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
374 		rc = base_region2_walk(table, addr, next, alloc);
375 		if (rc)
376 			return rc;
377 		if (!alloc)
378 			base_crst_free(table);
379 	} while (rfte++, addr = next, addr < end);
380 	return 0;
381 }
382 
383 /**
384  * base_asce_free - free asce and tables returned from base_asce_alloc()
385  * @asce: asce to be freed
386  *
387  * Frees all region, segment, and page tables that were allocated with a
388  * corresponding base_asce_alloc() call.
389  */
390 void base_asce_free(unsigned long asce)
391 {
392 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
393 
394 	if (!asce)
395 		return;
396 	switch (asce & _ASCE_TYPE_MASK) {
397 	case _ASCE_TYPE_SEGMENT:
398 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
399 		break;
400 	case _ASCE_TYPE_REGION3:
401 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
402 		break;
403 	case _ASCE_TYPE_REGION2:
404 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
405 		break;
406 	case _ASCE_TYPE_REGION1:
407 		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
408 		break;
409 	}
410 	base_crst_free(table);
411 }
412 
413 static int base_pgt_cache_init(void)
414 {
415 	static DEFINE_MUTEX(base_pgt_cache_mutex);
416 	unsigned long sz = _PAGE_TABLE_SIZE;
417 
418 	if (base_pgt_cache)
419 		return 0;
420 	mutex_lock(&base_pgt_cache_mutex);
421 	if (!base_pgt_cache)
422 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
423 	mutex_unlock(&base_pgt_cache_mutex);
424 	return base_pgt_cache ? 0 : -ENOMEM;
425 }
426 
427 /**
428  * base_asce_alloc - create kernel mapping without enhanced DAT features
429  * @addr: virtual start address of kernel mapping
430  * @num_pages: number of consecutive pages
431  *
432  * Generate an asce, including all required region, segment and page tables,
433  * that can be used to access the virtual kernel mapping. The difference is
434  * that the returned asce does not make use of any enhanced DAT features like
435  * e.g. large pages. This is required for some I/O functions that pass an
436  * asce, like e.g. some service call requests.
437  *
438  * Note: the returned asce may NEVER be attached to any cpu. It may only be
439  *	 used for I/O requests. tlb entries that might result because the
440  *	 asce was attached to a cpu won't be cleared.
441  */
442 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
443 {
444 	unsigned long asce, *table, end;
445 	int rc;
446 
447 	if (base_pgt_cache_init())
448 		return 0;
449 	end = addr + num_pages * PAGE_SIZE;
450 	if (end <= _REGION3_SIZE) {
451 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
452 		if (!table)
453 			return 0;
454 		rc = base_segment_walk(table, addr, end, 1);
455 		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
456 	} else if (end <= _REGION2_SIZE) {
457 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
458 		if (!table)
459 			return 0;
460 		rc = base_region3_walk(table, addr, end, 1);
461 		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
462 	} else if (end <= _REGION1_SIZE) {
463 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
464 		if (!table)
465 			return 0;
466 		rc = base_region2_walk(table, addr, end, 1);
467 		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
468 	} else {
469 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
470 		if (!table)
471 			return 0;
472 		rc = base_region1_walk(table, addr, end, 1);
473 		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
474 	}
475 	if (rc) {
476 		base_asce_free(asce);
477 		asce = 0;
478 	}
479 	return asce;
480 }
481