xref: /linux/arch/s390/mm/pgalloc.c (revision ecae0bd5173b1014f95a14a8dfbe40ec10367dcf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Page table allocation functions
4  *
5  *    Copyright IBM Corp. 2016
6  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8 
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14 #include <asm/gmap.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 
18 #ifdef CONFIG_PGSTE
19 
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
22 
23 static struct ctl_table page_table_sysctl[] = {
24 	{
25 		.procname	= "allocate_pgste",
26 		.data		= &page_table_allocate_pgste,
27 		.maxlen		= sizeof(int),
28 		.mode		= S_IRUGO | S_IWUSR,
29 		.proc_handler	= proc_dointvec_minmax,
30 		.extra1		= SYSCTL_ZERO,
31 		.extra2		= SYSCTL_ONE,
32 	},
33 };
34 
35 static int __init page_table_register_sysctl(void)
36 {
37 	return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
38 }
39 __initcall(page_table_register_sysctl);
40 
41 #endif /* CONFIG_PGSTE */
42 
43 unsigned long *crst_table_alloc(struct mm_struct *mm)
44 {
45 	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
46 
47 	if (!ptdesc)
48 		return NULL;
49 	arch_set_page_dat(ptdesc_page(ptdesc), CRST_ALLOC_ORDER);
50 	return (unsigned long *) ptdesc_to_virt(ptdesc);
51 }
52 
53 void crst_table_free(struct mm_struct *mm, unsigned long *table)
54 {
55 	pagetable_free(virt_to_ptdesc(table));
56 }
57 
58 static void __crst_table_upgrade(void *arg)
59 {
60 	struct mm_struct *mm = arg;
61 
62 	/* change all active ASCEs to avoid the creation of new TLBs */
63 	if (current->active_mm == mm) {
64 		S390_lowcore.user_asce = mm->context.asce;
65 		__ctl_load(S390_lowcore.user_asce, 7, 7);
66 	}
67 	__tlb_flush_local();
68 }
69 
70 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
71 {
72 	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
73 	unsigned long asce_limit = mm->context.asce_limit;
74 
75 	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
76 	VM_BUG_ON(asce_limit < _REGION2_SIZE);
77 
78 	if (end <= asce_limit)
79 		return 0;
80 
81 	if (asce_limit == _REGION2_SIZE) {
82 		p4d = crst_table_alloc(mm);
83 		if (unlikely(!p4d))
84 			goto err_p4d;
85 		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
86 	}
87 	if (end > _REGION1_SIZE) {
88 		pgd = crst_table_alloc(mm);
89 		if (unlikely(!pgd))
90 			goto err_pgd;
91 		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
92 	}
93 
94 	spin_lock_bh(&mm->page_table_lock);
95 
96 	/*
97 	 * This routine gets called with mmap_lock lock held and there is
98 	 * no reason to optimize for the case of otherwise. However, if
99 	 * that would ever change, the below check will let us know.
100 	 */
101 	VM_BUG_ON(asce_limit != mm->context.asce_limit);
102 
103 	if (p4d) {
104 		__pgd = (unsigned long *) mm->pgd;
105 		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
106 		mm->pgd = (pgd_t *) p4d;
107 		mm->context.asce_limit = _REGION1_SIZE;
108 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
109 			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
110 		mm_inc_nr_puds(mm);
111 	}
112 	if (pgd) {
113 		__pgd = (unsigned long *) mm->pgd;
114 		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
115 		mm->pgd = (pgd_t *) pgd;
116 		mm->context.asce_limit = TASK_SIZE_MAX;
117 		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
118 			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
119 	}
120 
121 	spin_unlock_bh(&mm->page_table_lock);
122 
123 	on_each_cpu(__crst_table_upgrade, mm, 0);
124 
125 	return 0;
126 
127 err_pgd:
128 	crst_table_free(mm, p4d);
129 err_p4d:
130 	return -ENOMEM;
131 }
132 
133 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
134 {
135 	return atomic_fetch_xor(bits, v) ^ bits;
136 }
137 
138 #ifdef CONFIG_PGSTE
139 
140 struct page *page_table_alloc_pgste(struct mm_struct *mm)
141 {
142 	struct ptdesc *ptdesc;
143 	u64 *table;
144 
145 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
146 	if (ptdesc) {
147 		table = (u64 *)ptdesc_to_virt(ptdesc);
148 		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
149 		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
150 	}
151 	return ptdesc_page(ptdesc);
152 }
153 
154 void page_table_free_pgste(struct page *page)
155 {
156 	pagetable_free(page_ptdesc(page));
157 }
158 
159 #endif /* CONFIG_PGSTE */
160 
161 /*
162  * A 2KB-pgtable is either upper or lower half of a normal page.
163  * The second half of the page may be unused or used as another
164  * 2KB-pgtable.
165  *
166  * Whenever possible the parent page for a new 2KB-pgtable is picked
167  * from the list of partially allocated pages mm_context_t::pgtable_list.
168  * In case the list is empty a new parent page is allocated and added to
169  * the list.
170  *
171  * When a parent page gets fully allocated it contains 2KB-pgtables in both
172  * upper and lower halves and is removed from mm_context_t::pgtable_list.
173  *
174  * When 2KB-pgtable is freed from to fully allocated parent page that
175  * page turns partially allocated and added to mm_context_t::pgtable_list.
176  *
177  * If 2KB-pgtable is freed from the partially allocated parent page that
178  * page turns unused and gets removed from mm_context_t::pgtable_list.
179  * Furthermore, the unused parent page is released.
180  *
181  * As follows from the above, no unallocated or fully allocated parent
182  * pages are contained in mm_context_t::pgtable_list.
183  *
184  * The upper byte (bits 24-31) of the parent page _refcount is used
185  * for tracking contained 2KB-pgtables and has the following format:
186  *
187  *   PP  AA
188  * 01234567    upper byte (bits 24-31) of struct page::_refcount
189  *   ||  ||
190  *   ||  |+--- upper 2KB-pgtable is allocated
191  *   ||  +---- lower 2KB-pgtable is allocated
192  *   |+------- upper 2KB-pgtable is pending for removal
193  *   +-------- lower 2KB-pgtable is pending for removal
194  *
195  * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
196  * using _refcount is possible).
197  *
198  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
199  * The parent page is either:
200  *   - added to mm_context_t::pgtable_list in case the second half of the
201  *     parent page is still unallocated;
202  *   - removed from mm_context_t::pgtable_list in case both hales of the
203  *     parent page are allocated;
204  * These operations are protected with mm_context_t::lock.
205  *
206  * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
207  * and the corresponding PP bit is set to 1 in a single atomic operation.
208  * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
209  * exclusive and may never be both set to 1!
210  * The parent page is either:
211  *   - added to mm_context_t::pgtable_list in case the second half of the
212  *     parent page is still allocated;
213  *   - removed from mm_context_t::pgtable_list in case the second half of
214  *     the parent page is unallocated;
215  * These operations are protected with mm_context_t::lock.
216  *
217  * It is important to understand that mm_context_t::lock only protects
218  * mm_context_t::pgtable_list and AA bits, but not the parent page itself
219  * and PP bits.
220  *
221  * Releasing the parent page happens whenever the PP bit turns from 1 to 0,
222  * while both AA bits and the second PP bit are already unset. Then the
223  * parent page does not contain any 2KB-pgtable fragment anymore, and it has
224  * also been removed from mm_context_t::pgtable_list. It is safe to release
225  * the page therefore.
226  *
227  * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
228  * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
229  * while the PP bits are never used, nor such a page is added to or removed
230  * from mm_context_t::pgtable_list.
231  *
232  * pte_free_defer() overrides those rules: it takes the page off pgtable_list,
233  * and prevents both 2K fragments from being reused. pte_free_defer() has to
234  * guarantee that its pgtable cannot be reused before the RCU grace period
235  * has elapsed (which page_table_free_rcu() does not actually guarantee).
236  * But for simplicity, because page->rcu_head overlays page->lru, and because
237  * the RCU callback might not be called before the mm_context_t has been freed,
238  * pte_free_defer() in this implementation prevents both fragments from being
239  * reused, and delays making the call to RCU until both fragments are freed.
240  */
241 unsigned long *page_table_alloc(struct mm_struct *mm)
242 {
243 	unsigned long *table;
244 	struct ptdesc *ptdesc;
245 	unsigned int mask, bit;
246 
247 	/* Try to get a fragment of a 4K page as a 2K page table */
248 	if (!mm_alloc_pgste(mm)) {
249 		table = NULL;
250 		spin_lock_bh(&mm->context.lock);
251 		if (!list_empty(&mm->context.pgtable_list)) {
252 			ptdesc = list_first_entry(&mm->context.pgtable_list,
253 						struct ptdesc, pt_list);
254 			mask = atomic_read(&ptdesc->_refcount) >> 24;
255 			/*
256 			 * The pending removal bits must also be checked.
257 			 * Failure to do so might lead to an impossible
258 			 * value of (i.e 0x13 or 0x23) written to _refcount.
259 			 * Such values violate the assumption that pending and
260 			 * allocation bits are mutually exclusive, and the rest
261 			 * of the code unrails as result. That could lead to
262 			 * a whole bunch of races and corruptions.
263 			 */
264 			mask = (mask | (mask >> 4)) & 0x03U;
265 			if (mask != 0x03U) {
266 				table = (unsigned long *) ptdesc_to_virt(ptdesc);
267 				bit = mask & 1;		/* =1 -> second 2K */
268 				if (bit)
269 					table += PTRS_PER_PTE;
270 				atomic_xor_bits(&ptdesc->_refcount,
271 							0x01U << (bit + 24));
272 				list_del_init(&ptdesc->pt_list);
273 			}
274 		}
275 		spin_unlock_bh(&mm->context.lock);
276 		if (table)
277 			return table;
278 	}
279 	/* Allocate a fresh page */
280 	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
281 	if (!ptdesc)
282 		return NULL;
283 	if (!pagetable_pte_ctor(ptdesc)) {
284 		pagetable_free(ptdesc);
285 		return NULL;
286 	}
287 	arch_set_page_dat(ptdesc_page(ptdesc), 0);
288 	/* Initialize page table */
289 	table = (unsigned long *) ptdesc_to_virt(ptdesc);
290 	if (mm_alloc_pgste(mm)) {
291 		/* Return 4K page table with PGSTEs */
292 		INIT_LIST_HEAD(&ptdesc->pt_list);
293 		atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
294 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
295 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
296 	} else {
297 		/* Return the first 2K fragment of the page */
298 		atomic_xor_bits(&ptdesc->_refcount, 0x01U << 24);
299 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
300 		spin_lock_bh(&mm->context.lock);
301 		list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
302 		spin_unlock_bh(&mm->context.lock);
303 	}
304 	return table;
305 }
306 
307 static void page_table_release_check(struct page *page, void *table,
308 				     unsigned int half, unsigned int mask)
309 {
310 	char msg[128];
311 
312 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
313 		return;
314 	if (!mask && list_empty(&page->lru))
315 		return;
316 	snprintf(msg, sizeof(msg),
317 		 "Invalid pgtable %p release half 0x%02x mask 0x%02x",
318 		 table, half, mask);
319 	dump_page(page, msg);
320 }
321 
322 static void pte_free_now(struct rcu_head *head)
323 {
324 	struct ptdesc *ptdesc;
325 
326 	ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
327 	pagetable_pte_dtor(ptdesc);
328 	pagetable_free(ptdesc);
329 }
330 
331 void page_table_free(struct mm_struct *mm, unsigned long *table)
332 {
333 	unsigned int mask, bit, half;
334 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
335 
336 	if (!mm_alloc_pgste(mm)) {
337 		/* Free 2K page table fragment of a 4K page */
338 		bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
339 		spin_lock_bh(&mm->context.lock);
340 		/*
341 		 * Mark the page for delayed release. The actual release
342 		 * will happen outside of the critical section from this
343 		 * function or from __tlb_remove_table()
344 		 */
345 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
346 		mask >>= 24;
347 		if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
348 			/*
349 			 * Other half is allocated, and neither half has had
350 			 * its free deferred: add page to head of list, to make
351 			 * this freed half available for immediate reuse.
352 			 */
353 			list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
354 		} else {
355 			/* If page is on list, now remove it. */
356 			list_del_init(&ptdesc->pt_list);
357 		}
358 		spin_unlock_bh(&mm->context.lock);
359 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x10U << (bit + 24));
360 		mask >>= 24;
361 		if (mask != 0x00U)
362 			return;
363 		half = 0x01U << bit;
364 	} else {
365 		half = 0x03U;
366 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
367 		mask >>= 24;
368 	}
369 
370 	page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
371 	if (folio_test_clear_active(ptdesc_folio(ptdesc)))
372 		call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
373 	else
374 		pte_free_now(&ptdesc->pt_rcu_head);
375 }
376 
377 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
378 			 unsigned long vmaddr)
379 {
380 	struct mm_struct *mm;
381 	unsigned int bit, mask;
382 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
383 
384 	mm = tlb->mm;
385 	if (mm_alloc_pgste(mm)) {
386 		gmap_unlink(mm, table, vmaddr);
387 		table = (unsigned long *) ((unsigned long)table | 0x03U);
388 		tlb_remove_ptdesc(tlb, table);
389 		return;
390 	}
391 	bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
392 	spin_lock_bh(&mm->context.lock);
393 	/*
394 	 * Mark the page for delayed release. The actual release will happen
395 	 * outside of the critical section from __tlb_remove_table() or from
396 	 * page_table_free()
397 	 */
398 	mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
399 	mask >>= 24;
400 	if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
401 		/*
402 		 * Other half is allocated, and neither half has had
403 		 * its free deferred: add page to end of list, to make
404 		 * this freed half available for reuse once its pending
405 		 * bit has been cleared by __tlb_remove_table().
406 		 */
407 		list_add_tail(&ptdesc->pt_list, &mm->context.pgtable_list);
408 	} else {
409 		/* If page is on list, now remove it. */
410 		list_del_init(&ptdesc->pt_list);
411 	}
412 	spin_unlock_bh(&mm->context.lock);
413 	table = (unsigned long *) ((unsigned long) table | (0x01U << bit));
414 	tlb_remove_table(tlb, table);
415 }
416 
417 void __tlb_remove_table(void *_table)
418 {
419 	unsigned int mask = (unsigned long) _table & 0x03U, half = mask;
420 	void *table = (void *)((unsigned long) _table ^ mask);
421 	struct ptdesc *ptdesc = virt_to_ptdesc(table);
422 
423 	switch (half) {
424 	case 0x00U:	/* pmd, pud, or p4d */
425 		pagetable_free(ptdesc);
426 		return;
427 	case 0x01U:	/* lower 2K of a 4K page table */
428 	case 0x02U:	/* higher 2K of a 4K page table */
429 		mask = atomic_xor_bits(&ptdesc->_refcount, mask << (4 + 24));
430 		mask >>= 24;
431 		if (mask != 0x00U)
432 			return;
433 		break;
434 	case 0x03U:	/* 4K page table with pgstes */
435 		mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
436 		mask >>= 24;
437 		break;
438 	}
439 
440 	page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
441 	if (folio_test_clear_active(ptdesc_folio(ptdesc)))
442 		call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
443 	else
444 		pte_free_now(&ptdesc->pt_rcu_head);
445 }
446 
447 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
448 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
449 {
450 	struct page *page;
451 
452 	page = virt_to_page(pgtable);
453 	SetPageActive(page);
454 	page_table_free(mm, (unsigned long *)pgtable);
455 	/*
456 	 * page_table_free() does not do the pgste gmap_unlink() which
457 	 * page_table_free_rcu() does: warn us if pgste ever reaches here.
458 	 */
459 	WARN_ON_ONCE(mm_has_pgste(mm));
460 }
461 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
462 
463 /*
464  * Base infrastructure required to generate basic asces, region, segment,
465  * and page tables that do not make use of enhanced features like EDAT1.
466  */
467 
468 static struct kmem_cache *base_pgt_cache;
469 
470 static unsigned long *base_pgt_alloc(void)
471 {
472 	unsigned long *table;
473 
474 	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
475 	if (table)
476 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
477 	return table;
478 }
479 
480 static void base_pgt_free(unsigned long *table)
481 {
482 	kmem_cache_free(base_pgt_cache, table);
483 }
484 
485 static unsigned long *base_crst_alloc(unsigned long val)
486 {
487 	unsigned long *table;
488 	struct ptdesc *ptdesc;
489 
490 	ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, CRST_ALLOC_ORDER);
491 	if (!ptdesc)
492 		return NULL;
493 	table = ptdesc_address(ptdesc);
494 
495 	crst_table_init(table, val);
496 	return table;
497 }
498 
499 static void base_crst_free(unsigned long *table)
500 {
501 	pagetable_free(virt_to_ptdesc(table));
502 }
503 
504 #define BASE_ADDR_END_FUNC(NAME, SIZE)					\
505 static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
506 						   unsigned long end)	\
507 {									\
508 	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
509 									\
510 	return (next - 1) < (end - 1) ? next : end;			\
511 }
512 
513 BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
514 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
515 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
516 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
517 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
518 
519 static inline unsigned long base_lra(unsigned long address)
520 {
521 	unsigned long real;
522 
523 	asm volatile(
524 		"	lra	%0,0(%1)\n"
525 		: "=d" (real) : "a" (address) : "cc");
526 	return real;
527 }
528 
529 static int base_page_walk(unsigned long *origin, unsigned long addr,
530 			  unsigned long end, int alloc)
531 {
532 	unsigned long *pte, next;
533 
534 	if (!alloc)
535 		return 0;
536 	pte = origin;
537 	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
538 	do {
539 		next = base_page_addr_end(addr, end);
540 		*pte = base_lra(addr);
541 	} while (pte++, addr = next, addr < end);
542 	return 0;
543 }
544 
545 static int base_segment_walk(unsigned long *origin, unsigned long addr,
546 			     unsigned long end, int alloc)
547 {
548 	unsigned long *ste, next, *table;
549 	int rc;
550 
551 	ste = origin;
552 	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
553 	do {
554 		next = base_segment_addr_end(addr, end);
555 		if (*ste & _SEGMENT_ENTRY_INVALID) {
556 			if (!alloc)
557 				continue;
558 			table = base_pgt_alloc();
559 			if (!table)
560 				return -ENOMEM;
561 			*ste = __pa(table) | _SEGMENT_ENTRY;
562 		}
563 		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
564 		rc = base_page_walk(table, addr, next, alloc);
565 		if (rc)
566 			return rc;
567 		if (!alloc)
568 			base_pgt_free(table);
569 		cond_resched();
570 	} while (ste++, addr = next, addr < end);
571 	return 0;
572 }
573 
574 static int base_region3_walk(unsigned long *origin, unsigned long addr,
575 			     unsigned long end, int alloc)
576 {
577 	unsigned long *rtte, next, *table;
578 	int rc;
579 
580 	rtte = origin;
581 	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
582 	do {
583 		next = base_region3_addr_end(addr, end);
584 		if (*rtte & _REGION_ENTRY_INVALID) {
585 			if (!alloc)
586 				continue;
587 			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
588 			if (!table)
589 				return -ENOMEM;
590 			*rtte = __pa(table) | _REGION3_ENTRY;
591 		}
592 		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
593 		rc = base_segment_walk(table, addr, next, alloc);
594 		if (rc)
595 			return rc;
596 		if (!alloc)
597 			base_crst_free(table);
598 	} while (rtte++, addr = next, addr < end);
599 	return 0;
600 }
601 
602 static int base_region2_walk(unsigned long *origin, unsigned long addr,
603 			     unsigned long end, int alloc)
604 {
605 	unsigned long *rste, next, *table;
606 	int rc;
607 
608 	rste = origin;
609 	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
610 	do {
611 		next = base_region2_addr_end(addr, end);
612 		if (*rste & _REGION_ENTRY_INVALID) {
613 			if (!alloc)
614 				continue;
615 			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
616 			if (!table)
617 				return -ENOMEM;
618 			*rste = __pa(table) | _REGION2_ENTRY;
619 		}
620 		table = __va(*rste & _REGION_ENTRY_ORIGIN);
621 		rc = base_region3_walk(table, addr, next, alloc);
622 		if (rc)
623 			return rc;
624 		if (!alloc)
625 			base_crst_free(table);
626 	} while (rste++, addr = next, addr < end);
627 	return 0;
628 }
629 
630 static int base_region1_walk(unsigned long *origin, unsigned long addr,
631 			     unsigned long end, int alloc)
632 {
633 	unsigned long *rfte, next, *table;
634 	int rc;
635 
636 	rfte = origin;
637 	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
638 	do {
639 		next = base_region1_addr_end(addr, end);
640 		if (*rfte & _REGION_ENTRY_INVALID) {
641 			if (!alloc)
642 				continue;
643 			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
644 			if (!table)
645 				return -ENOMEM;
646 			*rfte = __pa(table) | _REGION1_ENTRY;
647 		}
648 		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
649 		rc = base_region2_walk(table, addr, next, alloc);
650 		if (rc)
651 			return rc;
652 		if (!alloc)
653 			base_crst_free(table);
654 	} while (rfte++, addr = next, addr < end);
655 	return 0;
656 }
657 
658 /**
659  * base_asce_free - free asce and tables returned from base_asce_alloc()
660  * @asce: asce to be freed
661  *
662  * Frees all region, segment, and page tables that were allocated with a
663  * corresponding base_asce_alloc() call.
664  */
665 void base_asce_free(unsigned long asce)
666 {
667 	unsigned long *table = __va(asce & _ASCE_ORIGIN);
668 
669 	if (!asce)
670 		return;
671 	switch (asce & _ASCE_TYPE_MASK) {
672 	case _ASCE_TYPE_SEGMENT:
673 		base_segment_walk(table, 0, _REGION3_SIZE, 0);
674 		break;
675 	case _ASCE_TYPE_REGION3:
676 		base_region3_walk(table, 0, _REGION2_SIZE, 0);
677 		break;
678 	case _ASCE_TYPE_REGION2:
679 		base_region2_walk(table, 0, _REGION1_SIZE, 0);
680 		break;
681 	case _ASCE_TYPE_REGION1:
682 		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
683 		break;
684 	}
685 	base_crst_free(table);
686 }
687 
688 static int base_pgt_cache_init(void)
689 {
690 	static DEFINE_MUTEX(base_pgt_cache_mutex);
691 	unsigned long sz = _PAGE_TABLE_SIZE;
692 
693 	if (base_pgt_cache)
694 		return 0;
695 	mutex_lock(&base_pgt_cache_mutex);
696 	if (!base_pgt_cache)
697 		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
698 	mutex_unlock(&base_pgt_cache_mutex);
699 	return base_pgt_cache ? 0 : -ENOMEM;
700 }
701 
702 /**
703  * base_asce_alloc - create kernel mapping without enhanced DAT features
704  * @addr: virtual start address of kernel mapping
705  * @num_pages: number of consecutive pages
706  *
707  * Generate an asce, including all required region, segment and page tables,
708  * that can be used to access the virtual kernel mapping. The difference is
709  * that the returned asce does not make use of any enhanced DAT features like
710  * e.g. large pages. This is required for some I/O functions that pass an
711  * asce, like e.g. some service call requests.
712  *
713  * Note: the returned asce may NEVER be attached to any cpu. It may only be
714  *	 used for I/O requests. tlb entries that might result because the
715  *	 asce was attached to a cpu won't be cleared.
716  */
717 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
718 {
719 	unsigned long asce, *table, end;
720 	int rc;
721 
722 	if (base_pgt_cache_init())
723 		return 0;
724 	end = addr + num_pages * PAGE_SIZE;
725 	if (end <= _REGION3_SIZE) {
726 		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
727 		if (!table)
728 			return 0;
729 		rc = base_segment_walk(table, addr, end, 1);
730 		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
731 	} else if (end <= _REGION2_SIZE) {
732 		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
733 		if (!table)
734 			return 0;
735 		rc = base_region3_walk(table, addr, end, 1);
736 		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
737 	} else if (end <= _REGION1_SIZE) {
738 		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
739 		if (!table)
740 			return 0;
741 		rc = base_region2_walk(table, addr, end, 1);
742 		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
743 	} else {
744 		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
745 		if (!table)
746 			return 0;
747 		rc = base_region1_walk(table, addr, end, 1);
748 		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
749 	}
750 	if (rc) {
751 		base_asce_free(asce);
752 		asce = 0;
753 	}
754 	return asce;
755 }
756