xref: /linux/arch/s390/mm/pgalloc.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/page-states.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 
17 unsigned long *crst_table_alloc_noprof(struct mm_struct *mm)
18 {
19 	gfp_t gfp = GFP_KERNEL_ACCOUNT;
20 	struct ptdesc *ptdesc;
21 	unsigned long *table;
22 
23 	if (mm == &init_mm)
24 		gfp &= ~__GFP_ACCOUNT;
25 	ptdesc = pagetable_alloc_noprof(gfp, CRST_ALLOC_ORDER);
26 	if (!ptdesc)
27 		return NULL;
28 	table = ptdesc_address(ptdesc);
29 	__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
30 	return table;
31 }
32 
33 void crst_table_free(struct mm_struct *mm, unsigned long *table)
34 {
35 	if (!table)
36 		return;
37 	pagetable_free(virt_to_ptdesc(table));
38 }
39 
40 static void __crst_table_upgrade(void *arg)
41 {
42 	struct mm_struct *mm = arg;
43 	struct ctlreg asce;
44 
45 	/* change all active ASCEs to avoid the creation of new TLBs */
46 	if (current->active_mm == mm) {
47 		asce.val = mm->context.asce;
48 		get_lowcore()->user_asce = asce;
49 		local_ctl_load(7, &asce);
50 		if (!test_thread_flag(TIF_ASCE_PRIMARY))
51 			local_ctl_load(1, &asce);
52 	}
53 	__tlb_flush_local();
54 }
55 
56 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
57 {
58 	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
59 	unsigned long asce_limit = mm->context.asce_limit;
60 
61 	mmap_assert_write_locked(mm);
62 
63 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
64 	VM_BUG_ON(asce_limit < _REGION2_SIZE);
65 
66 	if (end <= asce_limit)
67 		return 0;
68 
69 	if (asce_limit == _REGION2_SIZE) {
70 		p4d = crst_table_alloc(mm);
71 		if (unlikely(!p4d))
72 			goto err_p4d;
73 		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
74 		pagetable_p4d_ctor(virt_to_ptdesc(p4d));
75 	}
76 	if (end > _REGION1_SIZE) {
77 		pgd = crst_table_alloc(mm);
78 		if (unlikely(!pgd))
79 			goto err_pgd;
80 		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
81 		pagetable_pgd_ctor(virt_to_ptdesc(pgd));
82 	}
83 
84 	spin_lock_bh(&mm->page_table_lock);
85 
86 	if (p4d) {
87 		__pgd = (unsigned long *) mm->pgd;
88 		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
89 		mm->pgd = (pgd_t *) p4d;
90 		mm->context.asce_limit = _REGION1_SIZE;
91 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
92 			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
93 		mm_inc_nr_puds(mm);
94 	}
95 	if (pgd) {
96 		__pgd = (unsigned long *) mm->pgd;
97 		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
98 		mm->pgd = (pgd_t *) pgd;
99 		mm->context.asce_limit = TASK_SIZE_MAX;
100 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
101 			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
102 	}
103 
104 	spin_unlock_bh(&mm->page_table_lock);
105 
106 	on_each_cpu(__crst_table_upgrade, mm, 0);
107 
108 	return 0;
109 
110 err_pgd:
111 	pagetable_dtor(virt_to_ptdesc(p4d));
112 	crst_table_free(mm, p4d);
113 err_p4d:
114 	return -ENOMEM;
115 }
116 
117 #ifdef CONFIG_PGSTE
118 
119 struct ptdesc *page_table_alloc_pgste_noprof(struct mm_struct *mm)
120 {
121 	struct ptdesc *ptdesc;
122 	u64 *table;
123 
124 	ptdesc = pagetable_alloc_noprof(GFP_KERNEL_ACCOUNT, 0);
125 	if (ptdesc) {
126 		table = (u64 *)ptdesc_address(ptdesc);
127 		__arch_set_page_dat(table, 1);
128 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
129 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
130 	}
131 	return ptdesc;
132 }
133 
134 void page_table_free_pgste(struct ptdesc *ptdesc)
135 {
136 	pagetable_free(ptdesc);
137 }
138 
139 #endif /* CONFIG_PGSTE */
140 
141 unsigned long *page_table_alloc_noprof(struct mm_struct *mm)
142 {
143 	gfp_t gfp = GFP_KERNEL_ACCOUNT;
144 	struct ptdesc *ptdesc;
145 	unsigned long *table;
146 
147 	if (mm == &init_mm)
148 		gfp &= ~__GFP_ACCOUNT;
149 	ptdesc = pagetable_alloc_noprof(gfp, 0);
150 	if (!ptdesc)
151 		return NULL;
152 	if (!pagetable_pte_ctor(mm, ptdesc)) {
153 		pagetable_free(ptdesc);
154 		return NULL;
155 	}
156 	table = ptdesc_address(ptdesc);
157 	__arch_set_page_dat(table, 1);
158 	memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
159 	memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
160 	return table;
161 }
162 
163 void page_table_free(struct mm_struct *mm, unsigned long *table)
164 {
165 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
166 
167 	if (pagetable_is_reserved(ptdesc))
168 		return free_reserved_ptdesc(ptdesc);
169 	pagetable_dtor_free(ptdesc);
170 }
171 
172 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
173 static void pte_free_now(struct rcu_head *head)
174 {
175 	struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
176 
177 	pagetable_dtor_free(ptdesc);
178 }
179 
180 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
181 {
182 	struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
183 
184 	call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
185 }
186 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
187 
188 /*
189  * Base infrastructure required to generate basic asces, region, segment,
190  * and page tables that do not make use of enhanced features like EDAT1.
191  */
192 
193 static struct kmem_cache *base_pgt_cache;
194 
195 static unsigned long *base_pgt_alloc(void)
196 {
197 	unsigned long *table;
198 
199 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
200 	if (table)
201 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
202 	return table;
203 }
204 
205 static void base_pgt_free(unsigned long *table)
206 {
207 	kmem_cache_free(base_pgt_cache, table);
208 }
209 
210 static unsigned long *base_crst_alloc(unsigned long val)
211 {
212 	unsigned long *table;
213 	struct ptdesc *ptdesc;
214 
215 	ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
216 	if (!ptdesc)
217 		return NULL;
218 	table = ptdesc_address(ptdesc);
219 	crst_table_init(table, val);
220 	return table;
221 }
222 
223 static void base_crst_free(unsigned long *table)
224 {
225 	if (!table)
226 		return;
227 	pagetable_free(virt_to_ptdesc(table));
228 }
229 
230 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
231 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
232 						   unsigned long end)	\
233 {									\
234 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
235 									\
236 	return (next - 1) < (end - 1) ? next : end;			\
237 }
238 
239 BASE_ADDR_END_FUNC(page,    PAGE_SIZE)
240 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
241 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
242 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
243 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
244 
245 static inline unsigned long base_lra(unsigned long address)
246 {
247 	unsigned long real;
248 
249 	asm volatile(
250 		"	lra	%0,0(%1)"
251 		: "=d" (real) : "a" (address) : "cc");
252 	return real;
253 }
254 
255 static int base_page_walk(unsigned long *origin, unsigned long addr,
256 			  unsigned long end, int alloc)
257 {
258 	unsigned long *pte, next;
259 
260 	if (!alloc)
261 		return 0;
262 	pte = origin;
263 	pte += (addr & _PAGE_INDEX) >> PAGE_SHIFT;
264 	do {
265 		next = base_page_addr_end(addr, end);
266 		*pte = base_lra(addr);
267 	} while (pte++, addr = next, addr < end);
268 	return 0;
269 }
270 
271 static int base_segment_walk(unsigned long *origin, unsigned long addr,
272 			     unsigned long end, int alloc)
273 {
274 	unsigned long *ste, next, *table;
275 	int rc;
276 
277 	ste = origin;
278 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
279 	do {
280 		next = base_segment_addr_end(addr, end);
281 		if (*ste & _SEGMENT_ENTRY_INVALID) {
282 			if (!alloc)
283 				continue;
284 			table = base_pgt_alloc();
285 			if (!table)
286 				return -ENOMEM;
287 			*ste = __pa(table) | _SEGMENT_ENTRY;
288 		}
289 		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
290 		rc = base_page_walk(table, addr, next, alloc);
291 		if (rc)
292 			return rc;
293 		if (!alloc)
294 			base_pgt_free(table);
295 		cond_resched();
296 	} while (ste++, addr = next, addr < end);
297 	return 0;
298 }
299 
300 static int base_region3_walk(unsigned long *origin, unsigned long addr,
301 			     unsigned long end, int alloc)
302 {
303 	unsigned long *rtte, next, *table;
304 	int rc;
305 
306 	rtte = origin;
307 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
308 	do {
309 		next = base_region3_addr_end(addr, end);
310 		if (*rtte & _REGION_ENTRY_INVALID) {
311 			if (!alloc)
312 				continue;
313 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
314 			if (!table)
315 				return -ENOMEM;
316 			*rtte = __pa(table) | _REGION3_ENTRY;
317 		}
318 		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
319 		rc = base_segment_walk(table, addr, next, alloc);
320 		if (rc)
321 			return rc;
322 		if (!alloc)
323 			base_crst_free(table);
324 	} while (rtte++, addr = next, addr < end);
325 	return 0;
326 }
327 
328 static int base_region2_walk(unsigned long *origin, unsigned long addr,
329 			     unsigned long end, int alloc)
330 {
331 	unsigned long *rste, next, *table;
332 	int rc;
333 
334 	rste = origin;
335 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
336 	do {
337 		next = base_region2_addr_end(addr, end);
338 		if (*rste & _REGION_ENTRY_INVALID) {
339 			if (!alloc)
340 				continue;
341 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
342 			if (!table)
343 				return -ENOMEM;
344 			*rste = __pa(table) | _REGION2_ENTRY;
345 		}
346 		table = __va(*rste & _REGION_ENTRY_ORIGIN);
347 		rc = base_region3_walk(table, addr, next, alloc);
348 		if (rc)
349 			return rc;
350 		if (!alloc)
351 			base_crst_free(table);
352 	} while (rste++, addr = next, addr < end);
353 	return 0;
354 }
355 
356 static int base_region1_walk(unsigned long *origin, unsigned long addr,
357 			     unsigned long end, int alloc)
358 {
359 	unsigned long *rfte, next, *table;
360 	int rc;
361 
362 	rfte = origin;
363 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
364 	do {
365 		next = base_region1_addr_end(addr, end);
366 		if (*rfte & _REGION_ENTRY_INVALID) {
367 			if (!alloc)
368 				continue;
369 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
370 			if (!table)
371 				return -ENOMEM;
372 			*rfte = __pa(table) | _REGION1_ENTRY;
373 		}
374 		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
375 		rc = base_region2_walk(table, addr, next, alloc);
376 		if (rc)
377 			return rc;
378 		if (!alloc)
379 			base_crst_free(table);
380 	} while (rfte++, addr = next, addr < end);
381 	return 0;
382 }
383 
384 /**
385  * base_asce_free - free asce and tables returned from base_asce_alloc()
386  * @asce: asce to be freed
387  *
388  * Frees all region, segment, and page tables that were allocated with a
389  * corresponding base_asce_alloc() call.
390  */
391 void base_asce_free(unsigned long asce)
392 {
393 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
394 
395 	if (!asce)
396 		return;
397 	switch (asce & _ASCE_TYPE_MASK) {
398 	case _ASCE_TYPE_SEGMENT:
399 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
400 		break;
401 	case _ASCE_TYPE_REGION3:
402 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
403 		break;
404 	case _ASCE_TYPE_REGION2:
405 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
406 		break;
407 	case _ASCE_TYPE_REGION1:
408 		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
409 		break;
410 	}
411 	base_crst_free(table);
412 }
413 
414 static int base_pgt_cache_init(void)
415 {
416 	static DEFINE_MUTEX(base_pgt_cache_mutex);
417 	unsigned long sz = _PAGE_TABLE_SIZE;
418 
419 	if (base_pgt_cache)
420 		return 0;
421 	mutex_lock(&base_pgt_cache_mutex);
422 	if (!base_pgt_cache)
423 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
424 	mutex_unlock(&base_pgt_cache_mutex);
425 	return base_pgt_cache ? 0 : -ENOMEM;
426 }
427 
428 /**
429  * base_asce_alloc - create kernel mapping without enhanced DAT features
430  * @addr: virtual start address of kernel mapping
431  * @num_pages: number of consecutive pages
432  *
433  * Generate an asce, including all required region, segment and page tables,
434  * that can be used to access the virtual kernel mapping. The difference is
435  * that the returned asce does not make use of any enhanced DAT features like
436  * e.g. large pages. This is required for some I/O functions that pass an
437  * asce, like e.g. some service call requests.
438  *
439  * Note: the returned asce may NEVER be attached to any cpu. It may only be
440  *	 used for I/O requests. tlb entries that might result because the
441  *	 asce was attached to a cpu won't be cleared.
442  */
443 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
444 {
445 	unsigned long asce, *table, end;
446 	int rc;
447 
448 	if (base_pgt_cache_init())
449 		return 0;
450 	end = addr + num_pages * PAGE_SIZE;
451 	if (end <= _REGION3_SIZE) {
452 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
453 		if (!table)
454 			return 0;
455 		rc = base_segment_walk(table, addr, end, 1);
456 		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
457 	} else if (end <= _REGION2_SIZE) {
458 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
459 		if (!table)
460 			return 0;
461 		rc = base_region3_walk(table, addr, end, 1);
462 		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
463 	} else if (end <= _REGION1_SIZE) {
464 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
465 		if (!table)
466 			return 0;
467 		rc = base_region2_walk(table, addr, end, 1);
468 		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
469 	} else {
470 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
471 		if (!table)
472 			return 0;
473 		rc = base_region1_walk(table, addr, end, 1);
474 		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
475 	}
476 	if (rc) {
477 		base_asce_free(asce);
478 		asce = 0;
479 	}
480 	return asce;
481 }
482