1 /* 2 * IBM System z Huge TLB Page Support for Kernel. 3 * 4 * Copyright IBM Corp. 2007 5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/hugetlb.h> 10 11 static inline pmd_t __pte_to_pmd(pte_t pte) 12 { 13 pmd_t pmd; 14 15 /* 16 * Convert encoding pte bits pmd bits 17 * .IR...wrdytp dy..R...I...wr 18 * empty .10...000000 -> 00..0...1...00 19 * prot-none, clean, old .11...000001 -> 00..1...1...00 20 * prot-none, clean, young .11...000101 -> 01..1...1...00 21 * prot-none, dirty, old .10...001001 -> 10..1...1...00 22 * prot-none, dirty, young .10...001101 -> 11..1...1...00 23 * read-only, clean, old .11...010001 -> 00..1...1...01 24 * read-only, clean, young .01...010101 -> 01..1...0...01 25 * read-only, dirty, old .11...011001 -> 10..1...1...01 26 * read-only, dirty, young .01...011101 -> 11..1...0...01 27 * read-write, clean, old .11...110001 -> 00..0...1...11 28 * read-write, clean, young .01...110101 -> 01..0...0...11 29 * read-write, dirty, old .10...111001 -> 10..0...1...11 30 * read-write, dirty, young .00...111101 -> 11..0...0...11 31 */ 32 if (pte_present(pte)) { 33 pmd_val(pmd) = pte_val(pte) & PAGE_MASK; 34 pmd_val(pmd) |= (pte_val(pte) & _PAGE_READ) >> 4; 35 pmd_val(pmd) |= (pte_val(pte) & _PAGE_WRITE) >> 4; 36 pmd_val(pmd) |= (pte_val(pte) & _PAGE_INVALID) >> 5; 37 pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT); 38 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; 39 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; 40 } else 41 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; 42 return pmd; 43 } 44 45 static inline pte_t __pmd_to_pte(pmd_t pmd) 46 { 47 pte_t pte; 48 49 /* 50 * Convert encoding pmd bits pte bits 51 * dy..R...I...wr .IR...wrdytp 52 * empty 00..0...1...00 -> .10...001100 53 * prot-none, clean, old 00..0...1...00 -> .10...000001 54 * prot-none, clean, young 01..0...1...00 -> .10...000101 55 * prot-none, dirty, old 10..0...1...00 -> .10...001001 56 * prot-none, dirty, young 11..0...1...00 -> .10...001101 57 * read-only, clean, old 00..1...1...01 -> .11...010001 58 * read-only, clean, young 01..1...1...01 -> .11...010101 59 * read-only, dirty, old 10..1...1...01 -> .11...011001 60 * read-only, dirty, young 11..1...1...01 -> .11...011101 61 * read-write, clean, old 00..0...1...11 -> .10...110001 62 * read-write, clean, young 01..0...1...11 -> .10...110101 63 * read-write, dirty, old 10..0...1...11 -> .10...111001 64 * read-write, dirty, young 11..0...1...11 -> .10...111101 65 */ 66 if (pmd_present(pmd)) { 67 pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE; 68 pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT; 69 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_READ) << 4; 70 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4; 71 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5; 72 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT); 73 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; 74 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; 75 } else 76 pte_val(pte) = _PAGE_INVALID; 77 return pte; 78 } 79 80 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 81 pte_t *ptep, pte_t pte) 82 { 83 pmd_t pmd; 84 85 pmd = __pte_to_pmd(pte); 86 if (!MACHINE_HAS_HPAGE) { 87 /* Emulated huge ptes loose the dirty and young bit */ 88 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; 89 pmd_val(pmd) |= pte_page(pte)[1].index; 90 } else 91 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 92 *(pmd_t *) ptep = pmd; 93 } 94 95 pte_t huge_ptep_get(pte_t *ptep) 96 { 97 unsigned long origin; 98 pmd_t pmd; 99 100 pmd = *(pmd_t *) ptep; 101 if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) { 102 origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN; 103 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; 104 pmd_val(pmd) |= *(unsigned long *) origin; 105 /* Emulated huge ptes are young and dirty by definition */ 106 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY; 107 } 108 return __pmd_to_pte(pmd); 109 } 110 111 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 112 unsigned long addr, pte_t *ptep) 113 { 114 pmd_t *pmdp = (pmd_t *) ptep; 115 pte_t pte = huge_ptep_get(ptep); 116 117 pmdp_flush_direct(mm, addr, pmdp); 118 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 119 return pte; 120 } 121 122 int arch_prepare_hugepage(struct page *page) 123 { 124 unsigned long addr = page_to_phys(page); 125 pte_t pte; 126 pte_t *ptep; 127 int i; 128 129 if (MACHINE_HAS_HPAGE) 130 return 0; 131 132 ptep = (pte_t *) pte_alloc_one(&init_mm, addr); 133 if (!ptep) 134 return -ENOMEM; 135 136 pte_val(pte) = addr; 137 for (i = 0; i < PTRS_PER_PTE; i++) { 138 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); 139 pte_val(pte) += PAGE_SIZE; 140 } 141 page[1].index = (unsigned long) ptep; 142 return 0; 143 } 144 145 void arch_release_hugepage(struct page *page) 146 { 147 pte_t *ptep; 148 149 if (MACHINE_HAS_HPAGE) 150 return; 151 152 ptep = (pte_t *) page[1].index; 153 if (!ptep) 154 return; 155 clear_table((unsigned long *) ptep, _PAGE_INVALID, 156 PTRS_PER_PTE * sizeof(pte_t)); 157 page_table_free(&init_mm, (unsigned long *) ptep); 158 page[1].index = 0; 159 } 160 161 pte_t *huge_pte_alloc(struct mm_struct *mm, 162 unsigned long addr, unsigned long sz) 163 { 164 pgd_t *pgdp; 165 pud_t *pudp; 166 pmd_t *pmdp = NULL; 167 168 pgdp = pgd_offset(mm, addr); 169 pudp = pud_alloc(mm, pgdp, addr); 170 if (pudp) 171 pmdp = pmd_alloc(mm, pudp, addr); 172 return (pte_t *) pmdp; 173 } 174 175 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 176 { 177 pgd_t *pgdp; 178 pud_t *pudp; 179 pmd_t *pmdp = NULL; 180 181 pgdp = pgd_offset(mm, addr); 182 if (pgd_present(*pgdp)) { 183 pudp = pud_offset(pgdp, addr); 184 if (pud_present(*pudp)) 185 pmdp = pmd_offset(pudp, addr); 186 } 187 return (pte_t *) pmdp; 188 } 189 190 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 191 { 192 return 0; 193 } 194 195 int pmd_huge(pmd_t pmd) 196 { 197 if (!MACHINE_HAS_HPAGE) 198 return 0; 199 200 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); 201 } 202 203 int pud_huge(pud_t pud) 204 { 205 return 0; 206 } 207