xref: /linux/arch/powerpc/include/asm/pgalloc.h (revision fa08661af834875c9bd6f7f0b1b9388dc72a6585)
1 #ifndef _ASM_POWERPC_PGALLOC_H
2 #define _ASM_POWERPC_PGALLOC_H
3 #ifdef __KERNEL__
4 
5 #include <linux/mm.h>
6 
7 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
8 {
9 	free_page((unsigned long)pte);
10 }
11 
12 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
13 {
14 	pgtable_page_dtor(ptepage);
15 	__free_page(ptepage);
16 }
17 
18 typedef struct pgtable_free {
19 	unsigned long val;
20 } pgtable_free_t;
21 
22 #define PGF_CACHENUM_MASK	0x7
23 
24 static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
25 						unsigned long mask)
26 {
27 	BUG_ON(cachenum > PGF_CACHENUM_MASK);
28 
29 	return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
30 }
31 
32 #ifdef CONFIG_PPC64
33 #include <asm/pgalloc-64.h>
34 #else
35 #include <asm/pgalloc-32.h>
36 #endif
37 
38 extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
39 
40 #ifdef CONFIG_SMP
41 #define __pte_free_tlb(tlb,ptepage,address)		\
42 do { \
43 	pgtable_page_dtor(ptepage); \
44 	pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
45 					PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
46 } while (0)
47 #else
48 #define __pte_free_tlb(tlb, pte, address)	pte_free((tlb)->mm, (pte))
49 #endif
50 
51 
52 #endif /* __KERNEL__ */
53 #endif /* _ASM_POWERPC_PGALLOC_H */
54