1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2008 4 * 5 * Guest page hinting for unused pages. 6 * 7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/errno.h> 12 #include <linux/types.h> 13 #include <linux/mm.h> 14 #include <linux/memblock.h> 15 #include <linux/gfp.h> 16 #include <linux/init.h> 17 #include <asm/facility.h> 18 #include <asm/page-states.h> 19 20 static int cmma_flag = 1; 21 22 static int __init cmma(char *str) 23 { 24 char *parm; 25 26 parm = strstrip(str); 27 if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { 28 cmma_flag = 1; 29 return 1; 30 } 31 cmma_flag = 0; 32 if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0) 33 return 1; 34 return 0; 35 } 36 __setup("cmma=", cmma); 37 38 static inline int cmma_test_essa(void) 39 { 40 register unsigned long tmp asm("0") = 0; 41 register int rc asm("1"); 42 43 /* test ESSA_GET_STATE */ 44 asm volatile( 45 " .insn rrf,0xb9ab0000,%1,%1,%2,0\n" 46 "0: la %0,0\n" 47 "1:\n" 48 EX_TABLE(0b,1b) 49 : "=&d" (rc), "+&d" (tmp) 50 : "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP)); 51 return rc; 52 } 53 54 void __init cmma_init(void) 55 { 56 if (!cmma_flag) 57 return; 58 if (cmma_test_essa()) { 59 cmma_flag = 0; 60 return; 61 } 62 if (test_facility(147)) 63 cmma_flag = 2; 64 } 65 66 static inline unsigned char get_page_state(struct page *page) 67 { 68 unsigned char state; 69 70 asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0" 71 : "=&d" (state) 72 : "a" (page_to_phys(page)), 73 "i" (ESSA_GET_STATE)); 74 return state & 0x3f; 75 } 76 77 static inline void set_page_unused(struct page *page, int order) 78 { 79 int i, rc; 80 81 for (i = 0; i < (1 << order); i++) 82 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 83 : "=&d" (rc) 84 : "a" (page_to_phys(page + i)), 85 "i" (ESSA_SET_UNUSED)); 86 } 87 88 static inline void set_page_stable_dat(struct page *page, int order) 89 { 90 int i, rc; 91 92 for (i = 0; i < (1 << order); i++) 93 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 94 : "=&d" (rc) 95 : "a" (page_to_phys(page + i)), 96 "i" (ESSA_SET_STABLE)); 97 } 98 99 static inline void set_page_stable_nodat(struct page *page, int order) 100 { 101 int i, rc; 102 103 for (i = 0; i < (1 << order); i++) 104 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 105 : "=&d" (rc) 106 : "a" (page_to_phys(page + i)), 107 "i" (ESSA_SET_STABLE_NODAT)); 108 } 109 110 static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end) 111 { 112 unsigned long next; 113 struct page *page; 114 pmd_t *pmd; 115 116 pmd = pmd_offset(pud, addr); 117 do { 118 next = pmd_addr_end(addr, end); 119 if (pmd_none(*pmd) || pmd_large(*pmd)) 120 continue; 121 page = virt_to_page(pmd_val(*pmd)); 122 set_bit(PG_arch_1, &page->flags); 123 } while (pmd++, addr = next, addr != end); 124 } 125 126 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end) 127 { 128 unsigned long next; 129 struct page *page; 130 pud_t *pud; 131 int i; 132 133 pud = pud_offset(p4d, addr); 134 do { 135 next = pud_addr_end(addr, end); 136 if (pud_none(*pud) || pud_large(*pud)) 137 continue; 138 if (!pud_folded(*pud)) { 139 page = virt_to_page(pud_val(*pud)); 140 for (i = 0; i < 3; i++) 141 set_bit(PG_arch_1, &page[i].flags); 142 } 143 mark_kernel_pmd(pud, addr, next); 144 } while (pud++, addr = next, addr != end); 145 } 146 147 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end) 148 { 149 unsigned long next; 150 struct page *page; 151 p4d_t *p4d; 152 int i; 153 154 p4d = p4d_offset(pgd, addr); 155 do { 156 next = p4d_addr_end(addr, end); 157 if (p4d_none(*p4d)) 158 continue; 159 if (!p4d_folded(*p4d)) { 160 page = virt_to_page(p4d_val(*p4d)); 161 for (i = 0; i < 3; i++) 162 set_bit(PG_arch_1, &page[i].flags); 163 } 164 mark_kernel_pud(p4d, addr, next); 165 } while (p4d++, addr = next, addr != end); 166 } 167 168 static void mark_kernel_pgd(void) 169 { 170 unsigned long addr, next; 171 struct page *page; 172 pgd_t *pgd; 173 int i; 174 175 addr = 0; 176 pgd = pgd_offset_k(addr); 177 do { 178 next = pgd_addr_end(addr, MODULES_END); 179 if (pgd_none(*pgd)) 180 continue; 181 if (!pgd_folded(*pgd)) { 182 page = virt_to_page(pgd_val(*pgd)); 183 for (i = 0; i < 3; i++) 184 set_bit(PG_arch_1, &page[i].flags); 185 } 186 mark_kernel_p4d(pgd, addr, next); 187 } while (pgd++, addr = next, addr != MODULES_END); 188 } 189 190 void __init cmma_init_nodat(void) 191 { 192 struct memblock_region *reg; 193 struct page *page; 194 unsigned long start, end, ix; 195 196 if (cmma_flag < 2) 197 return; 198 /* Mark pages used in kernel page tables */ 199 mark_kernel_pgd(); 200 201 /* Set all kernel pages not used for page tables to stable/no-dat */ 202 for_each_memblock(memory, reg) { 203 start = memblock_region_memory_base_pfn(reg); 204 end = memblock_region_memory_end_pfn(reg); 205 page = pfn_to_page(start); 206 for (ix = start; ix < end; ix++, page++) { 207 if (__test_and_clear_bit(PG_arch_1, &page->flags)) 208 continue; /* skip page table pages */ 209 if (!list_empty(&page->lru)) 210 continue; /* skip free pages */ 211 set_page_stable_nodat(page, 0); 212 } 213 } 214 } 215 216 void arch_free_page(struct page *page, int order) 217 { 218 if (!cmma_flag) 219 return; 220 set_page_unused(page, order); 221 } 222 223 void arch_alloc_page(struct page *page, int order) 224 { 225 if (!cmma_flag) 226 return; 227 if (cmma_flag < 2) 228 set_page_stable_dat(page, order); 229 else 230 set_page_stable_nodat(page, order); 231 } 232 233 void arch_set_page_dat(struct page *page, int order) 234 { 235 if (!cmma_flag) 236 return; 237 set_page_stable_dat(page, order); 238 } 239 240 void arch_set_page_nodat(struct page *page, int order) 241 { 242 if (cmma_flag < 2) 243 return; 244 set_page_stable_nodat(page, order); 245 } 246 247 int arch_test_page_nodat(struct page *page) 248 { 249 unsigned char state; 250 251 if (cmma_flag < 2) 252 return 0; 253 state = get_page_state(page); 254 return !!(state & 0x20); 255 } 256 257 void arch_set_page_states(int make_stable) 258 { 259 unsigned long flags, order, t; 260 struct list_head *l; 261 struct page *page; 262 struct zone *zone; 263 264 if (!cmma_flag) 265 return; 266 if (make_stable) 267 drain_local_pages(NULL); 268 for_each_populated_zone(zone) { 269 spin_lock_irqsave(&zone->lock, flags); 270 for_each_migratetype_order(order, t) { 271 list_for_each(l, &zone->free_area[order].free_list[t]) { 272 page = list_entry(l, struct page, lru); 273 if (make_stable) 274 set_page_stable_dat(page, order); 275 else 276 set_page_unused(page, order); 277 } 278 } 279 spin_unlock_irqrestore(&zone->lock, flags); 280 } 281 } 282