1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Transitional page tables for kexec and hibernate 5 * 6 * This file derived from: arch/arm64/kernel/hibernate.c 7 * 8 * Copyright (c) 2021, Microsoft Corporation. 9 * Pasha Tatashin <pasha.tatashin@soleen.com> 10 * 11 */ 12 13 /* 14 * Transitional tables are used during system transferring from one world to 15 * another: such as during hibernate restore, and kexec reboots. During these 16 * phases one cannot rely on page table not being overwritten. This is because 17 * hibernate and kexec can overwrite the current page tables during transition. 18 */ 19 20 #include <asm/trans_pgd.h> 21 #include <asm/pgalloc.h> 22 #include <asm/pgtable.h> 23 #include <linux/suspend.h> 24 #include <linux/bug.h> 25 #include <linux/mm.h> 26 #include <linux/mmzone.h> 27 #include <linux/kfence.h> 28 29 static void *trans_alloc(struct trans_pgd_info *info) 30 { 31 return info->trans_alloc_page(info->trans_alloc_arg); 32 } 33 34 static int copy_pte(struct trans_pgd_info *info, pmd_t *dst_pmdp, 35 pmd_t *src_pmdp, unsigned long start, unsigned long end) 36 { 37 pte_t *src_ptep; 38 pte_t *dst_ptep; 39 unsigned long addr = start; 40 41 dst_ptep = trans_alloc(info); 42 if (!dst_ptep) 43 return -ENOMEM; 44 pmd_populate_kernel(NULL, dst_pmdp, dst_ptep); 45 dst_ptep = pte_offset_kernel(dst_pmdp, start); 46 47 src_ptep = pte_offset_kernel(src_pmdp, start); 48 do { 49 pte_t pte = __ptep_get(src_ptep); 50 51 if (pte_none(pte)) 52 continue; 53 __set_pte(dst_ptep, pte_mkvalid_k(pte_mkwrite_novma(pte))); 54 } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end); 55 56 return 0; 57 } 58 59 static int copy_pmd(struct trans_pgd_info *info, pud_t *dst_pudp, 60 pud_t *src_pudp, unsigned long start, unsigned long end) 61 { 62 pmd_t *src_pmdp; 63 pmd_t *dst_pmdp; 64 unsigned long next; 65 unsigned long addr = start; 66 67 if (pud_none(READ_ONCE(*dst_pudp))) { 68 dst_pmdp = trans_alloc(info); 69 if (!dst_pmdp) 70 return -ENOMEM; 71 pud_populate(NULL, dst_pudp, dst_pmdp); 72 } 73 dst_pmdp = pmd_offset(dst_pudp, start); 74 75 src_pmdp = pmd_offset(src_pudp, start); 76 do { 77 pmd_t pmd = READ_ONCE(*src_pmdp); 78 79 next = pmd_addr_end(addr, end); 80 if (pmd_none(pmd)) 81 continue; 82 if (pmd_table(pmd)) { 83 if (copy_pte(info, dst_pmdp, src_pmdp, addr, next)) 84 return -ENOMEM; 85 } else { 86 set_pmd(dst_pmdp, pmd_mkvalid_k(pmd_mkwrite_novma(pmd))); 87 } 88 } while (dst_pmdp++, src_pmdp++, addr = next, addr != end); 89 90 return 0; 91 } 92 93 static int copy_pud(struct trans_pgd_info *info, p4d_t *dst_p4dp, 94 p4d_t *src_p4dp, unsigned long start, 95 unsigned long end) 96 { 97 pud_t *dst_pudp; 98 pud_t *src_pudp; 99 unsigned long next; 100 unsigned long addr = start; 101 102 if (p4d_none(READ_ONCE(*dst_p4dp))) { 103 dst_pudp = trans_alloc(info); 104 if (!dst_pudp) 105 return -ENOMEM; 106 p4d_populate(NULL, dst_p4dp, dst_pudp); 107 } 108 dst_pudp = pud_offset(dst_p4dp, start); 109 110 src_pudp = pud_offset(src_p4dp, start); 111 do { 112 pud_t pud = READ_ONCE(*src_pudp); 113 114 next = pud_addr_end(addr, end); 115 if (pud_none(pud)) 116 continue; 117 if (pud_table(pud)) { 118 if (copy_pmd(info, dst_pudp, src_pudp, addr, next)) 119 return -ENOMEM; 120 } else { 121 set_pud(dst_pudp, pud_mkvalid_k(pud_mkwrite_novma(pud))); 122 } 123 } while (dst_pudp++, src_pudp++, addr = next, addr != end); 124 125 return 0; 126 } 127 128 static int copy_p4d(struct trans_pgd_info *info, pgd_t *dst_pgdp, 129 pgd_t *src_pgdp, unsigned long start, 130 unsigned long end) 131 { 132 p4d_t *dst_p4dp; 133 p4d_t *src_p4dp; 134 unsigned long next; 135 unsigned long addr = start; 136 137 if (pgd_none(READ_ONCE(*dst_pgdp))) { 138 dst_p4dp = trans_alloc(info); 139 if (!dst_p4dp) 140 return -ENOMEM; 141 pgd_populate(NULL, dst_pgdp, dst_p4dp); 142 } 143 144 dst_p4dp = p4d_offset(dst_pgdp, start); 145 src_p4dp = p4d_offset(src_pgdp, start); 146 do { 147 next = p4d_addr_end(addr, end); 148 if (p4d_none(READ_ONCE(*src_p4dp))) 149 continue; 150 if (copy_pud(info, dst_p4dp, src_p4dp, addr, next)) 151 return -ENOMEM; 152 } while (dst_p4dp++, src_p4dp++, addr = next, addr != end); 153 154 return 0; 155 } 156 157 static int copy_page_tables(struct trans_pgd_info *info, pgd_t *dst_pgdp, 158 unsigned long start, unsigned long end) 159 { 160 unsigned long next; 161 unsigned long addr = start; 162 pgd_t *src_pgdp = pgd_offset_k(start); 163 164 dst_pgdp = pgd_offset_pgd(dst_pgdp, start); 165 do { 166 next = pgd_addr_end(addr, end); 167 if (pgd_none(READ_ONCE(*src_pgdp))) 168 continue; 169 if (copy_p4d(info, dst_pgdp, src_pgdp, addr, next)) 170 return -ENOMEM; 171 } while (dst_pgdp++, src_pgdp++, addr = next, addr != end); 172 173 return 0; 174 } 175 176 /* 177 * Create trans_pgd and copy linear map. 178 * info: contains allocator and its argument 179 * dst_pgdp: new page table that is created, and to which map is copied. 180 * start: Start of the interval (inclusive). 181 * end: End of the interval (exclusive). 182 * 183 * Returns 0 on success, and -ENOMEM on failure. 184 */ 185 int trans_pgd_create_copy(struct trans_pgd_info *info, pgd_t **dst_pgdp, 186 unsigned long start, unsigned long end) 187 { 188 int rc; 189 pgd_t *trans_pgd = trans_alloc(info); 190 191 if (!trans_pgd) { 192 pr_err("Failed to allocate memory for temporary page tables.\n"); 193 return -ENOMEM; 194 } 195 196 rc = copy_page_tables(info, trans_pgd, start, end); 197 if (!rc) 198 *dst_pgdp = trans_pgd; 199 200 return rc; 201 } 202 203 /* 204 * The page we want to idmap may be outside the range covered by VA_BITS that 205 * can be built using the kernel's p?d_populate() helpers. As a one off, for a 206 * single page, we build these page tables bottom up and just assume that will 207 * need the maximum T0SZ. 208 * 209 * Returns 0 on success, and -ENOMEM on failure. 210 * On success trans_ttbr0 contains page table with idmapped page, t0sz is set to 211 * maximum T0SZ for this page. 212 */ 213 int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0, 214 unsigned long *t0sz, void *page) 215 { 216 phys_addr_t dst_addr = virt_to_phys(page); 217 unsigned long pfn = __phys_to_pfn(dst_addr); 218 int max_msb = (dst_addr & GENMASK(52, 48)) ? 51 : 47; 219 int bits_mapped = PAGE_SHIFT - 4; 220 unsigned long level_mask, prev_level_entry, *levels[4]; 221 int this_level, index, level_lsb, level_msb; 222 223 dst_addr &= PAGE_MASK; 224 prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_ROX)); 225 226 for (this_level = 3; this_level >= 0; this_level--) { 227 levels[this_level] = trans_alloc(info); 228 if (!levels[this_level]) 229 return -ENOMEM; 230 231 level_lsb = ARM64_HW_PGTABLE_LEVEL_SHIFT(this_level); 232 level_msb = min(level_lsb + bits_mapped, max_msb); 233 level_mask = GENMASK_ULL(level_msb, level_lsb); 234 235 index = (dst_addr & level_mask) >> level_lsb; 236 *(levels[this_level] + index) = prev_level_entry; 237 238 pfn = virt_to_pfn(levels[this_level]); 239 prev_level_entry = pte_val(pfn_pte(pfn, 240 __pgprot(PMD_TYPE_TABLE))); 241 242 if (level_msb == max_msb) 243 break; 244 } 245 246 *trans_ttbr0 = phys_to_ttbr(__pfn_to_phys(pfn)); 247 *t0sz = TCR_T0SZ(max_msb + 1); 248 249 return 0; 250 } 251 252 /* 253 * Create a copy of the vector table so we can call HVC_SET_VECTORS or 254 * HVC_SOFT_RESTART from contexts where the table may be overwritten. 255 */ 256 int trans_pgd_copy_el2_vectors(struct trans_pgd_info *info, 257 phys_addr_t *el2_vectors) 258 { 259 void *hyp_stub = trans_alloc(info); 260 261 if (!hyp_stub) 262 return -ENOMEM; 263 *el2_vectors = virt_to_phys(hyp_stub); 264 memcpy(hyp_stub, &trans_pgd_stub_vectors, ARM64_VECTOR_TABLE_LEN); 265 caches_clean_inval_pou((unsigned long)hyp_stub, 266 (unsigned long)hyp_stub + 267 ARM64_VECTOR_TABLE_LEN); 268 dcache_clean_inval_poc((unsigned long)hyp_stub, 269 (unsigned long)hyp_stub + 270 ARM64_VECTOR_TABLE_LEN); 271 272 return 0; 273 } 274