xref: /linux/arch/csky/include/asm/pgalloc.h (revision 1f2367a39f17bd553a75e179a747f9b257bc9478)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #ifndef __ASM_CSKY_PGALLOC_H
5 #define __ASM_CSKY_PGALLOC_H
6 
7 #include <linux/highmem.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 
11 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
12 					pte_t *pte)
13 {
14 	set_pmd(pmd, __pmd(__pa(pte)));
15 }
16 
17 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
18 					pgtable_t pte)
19 {
20 	set_pmd(pmd, __pmd(__pa(page_address(pte))));
21 }
22 
23 #define pmd_pgtable(pmd) pmd_page(pmd)
24 
25 extern void pgd_init(unsigned long *p);
26 
27 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
28 {
29 	pte_t *pte;
30 	unsigned long i;
31 
32 	pte = (pte_t *) __get_free_page(GFP_KERNEL);
33 	if (!pte)
34 		return NULL;
35 
36 	for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
37 		(pte + i)->pte_low = _PAGE_GLOBAL;
38 
39 	return pte;
40 }
41 
42 static inline struct page *pte_alloc_one(struct mm_struct *mm)
43 {
44 	struct page *pte;
45 
46 	pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
47 	if (!pte)
48 		return NULL;
49 
50 	if (!pgtable_page_ctor(pte)) {
51 		__free_page(pte);
52 		return NULL;
53 	}
54 
55 	return pte;
56 }
57 
58 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
59 {
60 	free_pages((unsigned long)pte, PTE_ORDER);
61 }
62 
63 static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
64 {
65 	pgtable_page_dtor(pte);
66 	__free_pages(pte, PTE_ORDER);
67 }
68 
69 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
70 {
71 	free_pages((unsigned long)pgd, PGD_ORDER);
72 }
73 
74 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
75 {
76 	pgd_t *ret;
77 	pgd_t *init;
78 
79 	ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
80 	if (ret) {
81 		init = pgd_offset(&init_mm, 0UL);
82 		pgd_init((unsigned long *)ret);
83 		memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
84 			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
85 		/* prevent out of order excute */
86 		smp_mb();
87 #ifdef CONFIG_CPU_NEED_TLBSYNC
88 		dcache_wb_range((unsigned int)ret,
89 				(unsigned int)(ret + PTRS_PER_PGD));
90 #endif
91 	}
92 
93 	return ret;
94 }
95 
96 #define __pte_free_tlb(tlb, pte, address)		\
97 do {							\
98 	pgtable_page_dtor(pte);				\
99 	tlb_remove_page(tlb, pte);			\
100 } while (0)
101 
102 #define check_pgt_cache()	do {} while (0)
103 
104 extern void pagetable_init(void);
105 extern void pre_mmu_init(void);
106 extern void pre_trap_init(void);
107 
108 #endif /* __ASM_CSKY_PGALLOC_H */
109