xref: /linux/arch/s390/mm/pgalloc.c (revision 40840afa53bed05b990b201d749dfee3bd6e7e42)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/page-states.h>
14 #include <asm/pgalloc.h>
15 #include <asm/gmap.h>
16 #include <asm/tlb.h>
17 #include <asm/tlbflush.h>
18 
19 unsigned long *crst_table_alloc(struct mm_struct *mm)
20 {
21 	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
22 	unsigned long *table;
23 
24 	if (!ptdesc)
25 		return NULL;
26 	table = ptdesc_to_virt(ptdesc);
27 	__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
28 	return table;
29 }
30 
31 void crst_table_free(struct mm_struct *mm, unsigned long *table)
32 {
33 	if (!table)
34 		return;
35 	pagetable_free(virt_to_ptdesc(table));
36 }
37 
38 static void __crst_table_upgrade(void *arg)
39 {
40 	struct mm_struct *mm = arg;
41 	struct ctlreg asce;
42 
43 	/* change all active ASCEs to avoid the creation of new TLBs */
44 	if (current->active_mm == mm) {
45 		asce.val = mm->context.asce;
46 		get_lowcore()->user_asce = asce;
47 		local_ctl_load(7, &asce);
48 		if (!test_thread_flag(TIF_ASCE_PRIMARY))
49 			local_ctl_load(1, &asce);
50 	}
51 	__tlb_flush_local();
52 }
53 
54 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
55 {
56 	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
57 	unsigned long asce_limit = mm->context.asce_limit;
58 
59 	mmap_assert_write_locked(mm);
60 
61 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
62 	VM_BUG_ON(asce_limit < _REGION2_SIZE);
63 
64 	if (end <= asce_limit)
65 		return 0;
66 
67 	if (asce_limit == _REGION2_SIZE) {
68 		p4d = crst_table_alloc(mm);
69 		if (unlikely(!p4d))
70 			goto err_p4d;
71 		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
72 		pagetable_p4d_ctor(virt_to_ptdesc(p4d));
73 	}
74 	if (end > _REGION1_SIZE) {
75 		pgd = crst_table_alloc(mm);
76 		if (unlikely(!pgd))
77 			goto err_pgd;
78 		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
79 		pagetable_pgd_ctor(virt_to_ptdesc(pgd));
80 	}
81 
82 	spin_lock_bh(&mm->page_table_lock);
83 
84 	if (p4d) {
85 		__pgd = (unsigned long *) mm->pgd;
86 		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
87 		mm->pgd = (pgd_t *) p4d;
88 		mm->context.asce_limit = _REGION1_SIZE;
89 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
90 			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
91 		mm_inc_nr_puds(mm);
92 	}
93 	if (pgd) {
94 		__pgd = (unsigned long *) mm->pgd;
95 		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
96 		mm->pgd = (pgd_t *) pgd;
97 		mm->context.asce_limit = TASK_SIZE_MAX;
98 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
99 			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
100 	}
101 
102 	spin_unlock_bh(&mm->page_table_lock);
103 
104 	on_each_cpu(__crst_table_upgrade, mm, 0);
105 
106 	return 0;
107 
108 err_pgd:
109 	pagetable_dtor(virt_to_ptdesc(p4d));
110 	crst_table_free(mm, p4d);
111 err_p4d:
112 	return -ENOMEM;
113 }
114 
115 #ifdef CONFIG_PGSTE
116 
117 struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm)
118 {
119 	struct ptdesc *ptdesc;
120 	u64 *table;
121 
122 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
123 	if (ptdesc) {
124 		table = (u64 *)ptdesc_to_virt(ptdesc);
125 		__arch_set_page_dat(table, 1);
126 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
127 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
128 	}
129 	return ptdesc;
130 }
131 
132 void page_table_free_pgste(struct ptdesc *ptdesc)
133 {
134 	pagetable_free(ptdesc);
135 }
136 
137 #endif /* CONFIG_PGSTE */
138 
139 unsigned long *page_table_alloc(struct mm_struct *mm)
140 {
141 	struct ptdesc *ptdesc;
142 	unsigned long *table;
143 
144 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
145 	if (!ptdesc)
146 		return NULL;
147 	if (!pagetable_pte_ctor(ptdesc)) {
148 		pagetable_free(ptdesc);
149 		return NULL;
150 	}
151 	table = ptdesc_to_virt(ptdesc);
152 	__arch_set_page_dat(table, 1);
153 	memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
154 	memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
155 	return table;
156 }
157 
158 void page_table_free(struct mm_struct *mm, unsigned long *table)
159 {
160 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
161 
162 	pagetable_dtor_free(ptdesc);
163 }
164 
165 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
166 static void pte_free_now(struct rcu_head *head)
167 {
168 	struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
169 
170 	pagetable_dtor_free(ptdesc);
171 }
172 
173 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
174 {
175 	struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
176 
177 	call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
178 	/*
179 	 * THPs are not allowed for KVM guests. Warn if pgste ever reaches here.
180 	 * Turn to the generic pte_free_defer() version once gmap is removed.
181 	 */
182 	WARN_ON_ONCE(mm_has_pgste(mm));
183 }
184 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
185 
186 /*
187  * Base infrastructure required to generate basic asces, region, segment,
188  * and page tables that do not make use of enhanced features like EDAT1.
189  */
190 
191 static struct kmem_cache *base_pgt_cache;
192 
193 static unsigned long *base_pgt_alloc(void)
194 {
195 	unsigned long *table;
196 
197 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
198 	if (table)
199 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
200 	return table;
201 }
202 
203 static void base_pgt_free(unsigned long *table)
204 {
205 	kmem_cache_free(base_pgt_cache, table);
206 }
207 
208 static unsigned long *base_crst_alloc(unsigned long val)
209 {
210 	unsigned long *table;
211 	struct ptdesc *ptdesc;
212 
213 	ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
214 	if (!ptdesc)
215 		return NULL;
216 	table = ptdesc_address(ptdesc);
217 	crst_table_init(table, val);
218 	return table;
219 }
220 
221 static void base_crst_free(unsigned long *table)
222 {
223 	if (!table)
224 		return;
225 	pagetable_free(virt_to_ptdesc(table));
226 }
227 
228 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
229 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
230 						   unsigned long end)	\
231 {									\
232 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
233 									\
234 	return (next - 1) < (end - 1) ? next : end;			\
235 }
236 
237 BASE_ADDR_END_FUNC(page,    PAGE_SIZE)
238 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
239 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
240 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
241 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
242 
243 static inline unsigned long base_lra(unsigned long address)
244 {
245 	unsigned long real;
246 
247 	asm volatile(
248 		"	lra	%0,0(%1)\n"
249 		: "=d" (real) : "a" (address) : "cc");
250 	return real;
251 }
252 
253 static int base_page_walk(unsigned long *origin, unsigned long addr,
254 			  unsigned long end, int alloc)
255 {
256 	unsigned long *pte, next;
257 
258 	if (!alloc)
259 		return 0;
260 	pte = origin;
261 	pte += (addr & _PAGE_INDEX) >> PAGE_SHIFT;
262 	do {
263 		next = base_page_addr_end(addr, end);
264 		*pte = base_lra(addr);
265 	} while (pte++, addr = next, addr < end);
266 	return 0;
267 }
268 
269 static int base_segment_walk(unsigned long *origin, unsigned long addr,
270 			     unsigned long end, int alloc)
271 {
272 	unsigned long *ste, next, *table;
273 	int rc;
274 
275 	ste = origin;
276 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
277 	do {
278 		next = base_segment_addr_end(addr, end);
279 		if (*ste & _SEGMENT_ENTRY_INVALID) {
280 			if (!alloc)
281 				continue;
282 			table = base_pgt_alloc();
283 			if (!table)
284 				return -ENOMEM;
285 			*ste = __pa(table) | _SEGMENT_ENTRY;
286 		}
287 		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
288 		rc = base_page_walk(table, addr, next, alloc);
289 		if (rc)
290 			return rc;
291 		if (!alloc)
292 			base_pgt_free(table);
293 		cond_resched();
294 	} while (ste++, addr = next, addr < end);
295 	return 0;
296 }
297 
298 static int base_region3_walk(unsigned long *origin, unsigned long addr,
299 			     unsigned long end, int alloc)
300 {
301 	unsigned long *rtte, next, *table;
302 	int rc;
303 
304 	rtte = origin;
305 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
306 	do {
307 		next = base_region3_addr_end(addr, end);
308 		if (*rtte & _REGION_ENTRY_INVALID) {
309 			if (!alloc)
310 				continue;
311 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
312 			if (!table)
313 				return -ENOMEM;
314 			*rtte = __pa(table) | _REGION3_ENTRY;
315 		}
316 		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
317 		rc = base_segment_walk(table, addr, next, alloc);
318 		if (rc)
319 			return rc;
320 		if (!alloc)
321 			base_crst_free(table);
322 	} while (rtte++, addr = next, addr < end);
323 	return 0;
324 }
325 
326 static int base_region2_walk(unsigned long *origin, unsigned long addr,
327 			     unsigned long end, int alloc)
328 {
329 	unsigned long *rste, next, *table;
330 	int rc;
331 
332 	rste = origin;
333 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
334 	do {
335 		next = base_region2_addr_end(addr, end);
336 		if (*rste & _REGION_ENTRY_INVALID) {
337 			if (!alloc)
338 				continue;
339 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
340 			if (!table)
341 				return -ENOMEM;
342 			*rste = __pa(table) | _REGION2_ENTRY;
343 		}
344 		table = __va(*rste & _REGION_ENTRY_ORIGIN);
345 		rc = base_region3_walk(table, addr, next, alloc);
346 		if (rc)
347 			return rc;
348 		if (!alloc)
349 			base_crst_free(table);
350 	} while (rste++, addr = next, addr < end);
351 	return 0;
352 }
353 
354 static int base_region1_walk(unsigned long *origin, unsigned long addr,
355 			     unsigned long end, int alloc)
356 {
357 	unsigned long *rfte, next, *table;
358 	int rc;
359 
360 	rfte = origin;
361 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
362 	do {
363 		next = base_region1_addr_end(addr, end);
364 		if (*rfte & _REGION_ENTRY_INVALID) {
365 			if (!alloc)
366 				continue;
367 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
368 			if (!table)
369 				return -ENOMEM;
370 			*rfte = __pa(table) | _REGION1_ENTRY;
371 		}
372 		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
373 		rc = base_region2_walk(table, addr, next, alloc);
374 		if (rc)
375 			return rc;
376 		if (!alloc)
377 			base_crst_free(table);
378 	} while (rfte++, addr = next, addr < end);
379 	return 0;
380 }
381 
382 /**
383  * base_asce_free - free asce and tables returned from base_asce_alloc()
384  * @asce: asce to be freed
385  *
386  * Frees all region, segment, and page tables that were allocated with a
387  * corresponding base_asce_alloc() call.
388  */
389 void base_asce_free(unsigned long asce)
390 {
391 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
392 
393 	if (!asce)
394 		return;
395 	switch (asce & _ASCE_TYPE_MASK) {
396 	case _ASCE_TYPE_SEGMENT:
397 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
398 		break;
399 	case _ASCE_TYPE_REGION3:
400 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
401 		break;
402 	case _ASCE_TYPE_REGION2:
403 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
404 		break;
405 	case _ASCE_TYPE_REGION1:
406 		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
407 		break;
408 	}
409 	base_crst_free(table);
410 }
411 
412 static int base_pgt_cache_init(void)
413 {
414 	static DEFINE_MUTEX(base_pgt_cache_mutex);
415 	unsigned long sz = _PAGE_TABLE_SIZE;
416 
417 	if (base_pgt_cache)
418 		return 0;
419 	mutex_lock(&base_pgt_cache_mutex);
420 	if (!base_pgt_cache)
421 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
422 	mutex_unlock(&base_pgt_cache_mutex);
423 	return base_pgt_cache ? 0 : -ENOMEM;
424 }
425 
426 /**
427  * base_asce_alloc - create kernel mapping without enhanced DAT features
428  * @addr: virtual start address of kernel mapping
429  * @num_pages: number of consecutive pages
430  *
431  * Generate an asce, including all required region, segment and page tables,
432  * that can be used to access the virtual kernel mapping. The difference is
433  * that the returned asce does not make use of any enhanced DAT features like
434  * e.g. large pages. This is required for some I/O functions that pass an
435  * asce, like e.g. some service call requests.
436  *
437  * Note: the returned asce may NEVER be attached to any cpu. It may only be
438  *	 used for I/O requests. tlb entries that might result because the
439  *	 asce was attached to a cpu won't be cleared.
440  */
441 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
442 {
443 	unsigned long asce, *table, end;
444 	int rc;
445 
446 	if (base_pgt_cache_init())
447 		return 0;
448 	end = addr + num_pages * PAGE_SIZE;
449 	if (end <= _REGION3_SIZE) {
450 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
451 		if (!table)
452 			return 0;
453 		rc = base_segment_walk(table, addr, end, 1);
454 		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
455 	} else if (end <= _REGION2_SIZE) {
456 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
457 		if (!table)
458 			return 0;
459 		rc = base_region3_walk(table, addr, end, 1);
460 		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
461 	} else if (end <= _REGION1_SIZE) {
462 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
463 		if (!table)
464 			return 0;
465 		rc = base_region2_walk(table, addr, end, 1);
466 		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
467 	} else {
468 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
469 		if (!table)
470 			return 0;
471 		rc = base_region1_walk(table, addr, end, 1);
472 		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
473 	}
474 	if (rc) {
475 		base_asce_free(asce);
476 		asce = 0;
477 	}
478 	return asce;
479 }
480