xref: /linux/arch/arm64/mm/pgd.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * PGD allocation/freeing
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  * Author: Catalin Marinas <catalin.marinas@arm.com>
7  */
8 
9 #include <linux/mm.h>
10 #include <linux/gfp.h>
11 #include <linux/highmem.h>
12 #include <linux/slab.h>
13 
14 #include <asm/pgalloc.h>
15 #include <asm/page.h>
16 #include <asm/tlbflush.h>
17 
18 static struct kmem_cache *pgd_cache __ro_after_init;
19 
pgdir_is_page_size(void)20 static bool pgdir_is_page_size(void)
21 {
22 	if (PGD_SIZE == PAGE_SIZE)
23 		return true;
24 	if (CONFIG_PGTABLE_LEVELS == 4)
25 		return !pgtable_l4_enabled();
26 	if (CONFIG_PGTABLE_LEVELS == 5)
27 		return !pgtable_l5_enabled();
28 	return false;
29 }
30 
pgd_alloc(struct mm_struct * mm)31 pgd_t *pgd_alloc(struct mm_struct *mm)
32 {
33 	gfp_t gfp = GFP_PGTABLE_USER;
34 
35 	if (pgdir_is_page_size())
36 		return (pgd_t *)__get_free_page(gfp);
37 	else
38 		return kmem_cache_alloc(pgd_cache, gfp);
39 }
40 
pgd_free(struct mm_struct * mm,pgd_t * pgd)41 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
42 {
43 	if (pgdir_is_page_size())
44 		free_page((unsigned long)pgd);
45 	else
46 		kmem_cache_free(pgd_cache, pgd);
47 }
48 
pgtable_cache_init(void)49 void __init pgtable_cache_init(void)
50 {
51 	if (pgdir_is_page_size())
52 		return;
53 
54 #ifdef CONFIG_ARM64_PA_BITS_52
55 	/*
56 	 * With 52-bit physical addresses, the architecture requires the
57 	 * top-level table to be aligned to at least 64 bytes.
58 	 */
59 	BUILD_BUG_ON(PGD_SIZE < 64);
60 #endif
61 
62 	/*
63 	 * Naturally aligned pgds required by the architecture.
64 	 */
65 	pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
66 				      SLAB_PANIC, NULL);
67 }
68