xref: /linux/arch/sh/mm/pgtable.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 
4 #define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
5 
6 static struct kmem_cache *pgd_cachep;
7 #if PAGETABLE_LEVELS > 2
8 static struct kmem_cache *pmd_cachep;
9 #endif
10 
11 void pgd_ctor(void *x)
12 {
13 	pgd_t *pgd = x;
14 
15 	memcpy(pgd + USER_PTRS_PER_PGD,
16 	       swapper_pg_dir + USER_PTRS_PER_PGD,
17 	       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
18 }
19 
20 void pgtable_cache_init(void)
21 {
22 	pgd_cachep = kmem_cache_create("pgd_cache",
23 				       PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
24 				       PAGE_SIZE, SLAB_PANIC, pgd_ctor);
25 #if PAGETABLE_LEVELS > 2
26 	pmd_cachep = kmem_cache_create("pmd_cache",
27 				       PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
28 				       PAGE_SIZE, SLAB_PANIC, NULL);
29 #endif
30 }
31 
32 pgd_t *pgd_alloc(struct mm_struct *mm)
33 {
34 	return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
35 }
36 
37 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
38 {
39 	kmem_cache_free(pgd_cachep, pgd);
40 }
41 
42 #if PAGETABLE_LEVELS > 2
43 void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
44 {
45 	set_pud(pud, __pud((unsigned long)pmd));
46 }
47 
48 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
49 {
50 	return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
51 }
52 
53 void pmd_free(struct mm_struct *mm, pmd_t *pmd)
54 {
55 	kmem_cache_free(pmd_cachep, pmd);
56 }
57 #endif /* PAGETABLE_LEVELS > 2 */
58