1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2001, 2003 by Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 */ 9 #ifndef _ASM_PGALLOC_H 10 #define _ASM_PGALLOC_H 11 12 #include <linux/highmem.h> 13 #include <linux/mm.h> 14 #include <linux/sched.h> 15 16 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 17 pte_t *pte) 18 { 19 set_pmd(pmd, __pmd((unsigned long)pte)); 20 } 21 22 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 23 pgtable_t pte) 24 { 25 set_pmd(pmd, __pmd((unsigned long)page_address(pte))); 26 } 27 #define pmd_pgtable(pmd) pmd_page(pmd) 28 29 /* 30 * Initialize a new pmd table with invalid pointers. 31 */ 32 extern void pmd_init(unsigned long page, unsigned long pagetable); 33 34 #ifndef __PAGETABLE_PMD_FOLDED 35 36 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 37 { 38 set_pud(pud, __pud((unsigned long)pmd)); 39 } 40 #endif 41 42 /* 43 * Initialize a new pgd / pmd table with invalid pointers. 44 */ 45 extern void pgd_init(unsigned long page); 46 extern pgd_t *pgd_alloc(struct mm_struct *mm); 47 48 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 49 { 50 free_pages((unsigned long)pgd, PGD_ORDER); 51 } 52 53 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 54 unsigned long address) 55 { 56 return (pte_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PTE_ORDER); 57 } 58 59 static inline struct page *pte_alloc_one(struct mm_struct *mm, 60 unsigned long address) 61 { 62 struct page *pte; 63 64 pte = alloc_pages(GFP_KERNEL, PTE_ORDER); 65 if (!pte) 66 return NULL; 67 clear_highpage(pte); 68 if (!pgtable_page_ctor(pte)) { 69 __free_page(pte); 70 return NULL; 71 } 72 return pte; 73 } 74 75 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 76 { 77 free_pages((unsigned long)pte, PTE_ORDER); 78 } 79 80 static inline void pte_free(struct mm_struct *mm, pgtable_t pte) 81 { 82 pgtable_page_dtor(pte); 83 __free_pages(pte, PTE_ORDER); 84 } 85 86 #define __pte_free_tlb(tlb,pte,address) \ 87 do { \ 88 pgtable_page_dtor(pte); \ 89 tlb_remove_page((tlb), pte); \ 90 } while (0) 91 92 #ifndef __PAGETABLE_PMD_FOLDED 93 94 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 95 { 96 pmd_t *pmd; 97 98 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER); 99 if (pmd) 100 pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); 101 return pmd; 102 } 103 104 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 105 { 106 free_pages((unsigned long)pmd, PMD_ORDER); 107 } 108 109 #define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) 110 111 #endif 112 113 #ifndef __PAGETABLE_PUD_FOLDED 114 115 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 116 { 117 pud_t *pud; 118 119 pud = (pud_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PUD_ORDER); 120 if (pud) 121 pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table); 122 return pud; 123 } 124 125 static inline void pud_free(struct mm_struct *mm, pud_t *pud) 126 { 127 free_pages((unsigned long)pud, PUD_ORDER); 128 } 129 130 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 131 { 132 set_pgd(pgd, __pgd((unsigned long)pud)); 133 } 134 135 #define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x) 136 137 #endif /* __PAGETABLE_PUD_FOLDED */ 138 139 #define check_pgt_cache() do { } while (0) 140 141 extern void pagetable_init(void); 142 143 #endif /* _ASM_PGALLOC_H */ 144