1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 Google LLC 4 * Author: Quentin Perret <qperret@google.com> 5 */ 6 7 #include <linux/kvm_host.h> 8 #include <asm/kvm_hyp.h> 9 #include <asm/kvm_mmu.h> 10 #include <asm/kvm_pgtable.h> 11 #include <asm/kvm_pkvm.h> 12 #include <asm/spectre.h> 13 14 #include <nvhe/early_alloc.h> 15 #include <nvhe/gfp.h> 16 #include <nvhe/memory.h> 17 #include <nvhe/mem_protect.h> 18 #include <nvhe/mm.h> 19 #include <nvhe/spinlock.h> 20 21 struct kvm_pgtable pkvm_pgtable; 22 hyp_spinlock_t pkvm_pgd_lock; 23 24 struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS]; 25 unsigned int hyp_memblock_nr; 26 27 static u64 __io_map_base; 28 29 struct hyp_fixmap_slot { 30 u64 addr; 31 kvm_pte_t *ptep; 32 }; 33 static DEFINE_PER_CPU(struct hyp_fixmap_slot, fixmap_slots); 34 35 static int __pkvm_create_mappings(unsigned long start, unsigned long size, 36 unsigned long phys, enum kvm_pgtable_prot prot) 37 { 38 int err; 39 40 hyp_spin_lock(&pkvm_pgd_lock); 41 err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot); 42 hyp_spin_unlock(&pkvm_pgd_lock); 43 44 return err; 45 } 46 47 /** 48 * pkvm_alloc_private_va_range - Allocates a private VA range. 49 * @size: The size of the VA range to reserve. 50 * @haddr: The hypervisor virtual start address of the allocation. 51 * 52 * The private virtual address (VA) range is allocated above __io_map_base 53 * and aligned based on the order of @size. 54 * 55 * Return: 0 on success or negative error code on failure. 56 */ 57 int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr) 58 { 59 unsigned long base, addr; 60 int ret = 0; 61 62 hyp_spin_lock(&pkvm_pgd_lock); 63 64 /* Align the allocation based on the order of its size */ 65 addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size)); 66 67 /* The allocated size is always a multiple of PAGE_SIZE */ 68 base = addr + PAGE_ALIGN(size); 69 70 /* Are we overflowing on the vmemmap ? */ 71 if (!addr || base > __hyp_vmemmap) 72 ret = -ENOMEM; 73 else { 74 __io_map_base = base; 75 *haddr = addr; 76 } 77 78 hyp_spin_unlock(&pkvm_pgd_lock); 79 80 return ret; 81 } 82 83 int __pkvm_create_private_mapping(phys_addr_t phys, size_t size, 84 enum kvm_pgtable_prot prot, 85 unsigned long *haddr) 86 { 87 unsigned long addr; 88 int err; 89 90 size = PAGE_ALIGN(size + offset_in_page(phys)); 91 err = pkvm_alloc_private_va_range(size, &addr); 92 if (err) 93 return err; 94 95 err = __pkvm_create_mappings(addr, size, phys, prot); 96 if (err) 97 return err; 98 99 *haddr = addr + offset_in_page(phys); 100 return err; 101 } 102 103 int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot) 104 { 105 unsigned long start = (unsigned long)from; 106 unsigned long end = (unsigned long)to; 107 unsigned long virt_addr; 108 phys_addr_t phys; 109 110 hyp_assert_lock_held(&pkvm_pgd_lock); 111 112 start = start & PAGE_MASK; 113 end = PAGE_ALIGN(end); 114 115 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { 116 int err; 117 118 phys = hyp_virt_to_phys((void *)virt_addr); 119 err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE, 120 phys, prot); 121 if (err) 122 return err; 123 } 124 125 return 0; 126 } 127 128 int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot) 129 { 130 int ret; 131 132 hyp_spin_lock(&pkvm_pgd_lock); 133 ret = pkvm_create_mappings_locked(from, to, prot); 134 hyp_spin_unlock(&pkvm_pgd_lock); 135 136 return ret; 137 } 138 139 int hyp_back_vmemmap(phys_addr_t back) 140 { 141 unsigned long i, start, size, end = 0; 142 int ret; 143 144 for (i = 0; i < hyp_memblock_nr; i++) { 145 start = hyp_memory[i].base; 146 start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE); 147 /* 148 * The begining of the hyp_vmemmap region for the current 149 * memblock may already be backed by the page backing the end 150 * the previous region, so avoid mapping it twice. 151 */ 152 start = max(start, end); 153 154 end = hyp_memory[i].base + hyp_memory[i].size; 155 end = PAGE_ALIGN((u64)hyp_phys_to_page(end)); 156 if (start >= end) 157 continue; 158 159 size = end - start; 160 ret = __pkvm_create_mappings(start, size, back, PAGE_HYP); 161 if (ret) 162 return ret; 163 164 memset(hyp_phys_to_virt(back), 0, size); 165 back += size; 166 } 167 168 return 0; 169 } 170 171 static void *__hyp_bp_vect_base; 172 int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot) 173 { 174 void *vector; 175 176 switch (slot) { 177 case HYP_VECTOR_DIRECT: { 178 vector = __kvm_hyp_vector; 179 break; 180 } 181 case HYP_VECTOR_SPECTRE_DIRECT: { 182 vector = __bp_harden_hyp_vecs; 183 break; 184 } 185 case HYP_VECTOR_INDIRECT: 186 case HYP_VECTOR_SPECTRE_INDIRECT: { 187 vector = (void *)__hyp_bp_vect_base; 188 break; 189 } 190 default: 191 return -EINVAL; 192 } 193 194 vector = __kvm_vector_slot2addr(vector, slot); 195 *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector; 196 197 return 0; 198 } 199 200 int hyp_map_vectors(void) 201 { 202 phys_addr_t phys; 203 unsigned long bp_base; 204 int ret; 205 206 if (!kvm_system_needs_idmapped_vectors()) { 207 __hyp_bp_vect_base = __bp_harden_hyp_vecs; 208 return 0; 209 } 210 211 phys = __hyp_pa(__bp_harden_hyp_vecs); 212 ret = __pkvm_create_private_mapping(phys, __BP_HARDEN_HYP_VECS_SZ, 213 PAGE_HYP_EXEC, &bp_base); 214 if (ret) 215 return ret; 216 217 __hyp_bp_vect_base = (void *)bp_base; 218 219 return 0; 220 } 221 222 void *hyp_fixmap_map(phys_addr_t phys) 223 { 224 struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots); 225 kvm_pte_t pte, *ptep = slot->ptep; 226 227 pte = *ptep; 228 pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID); 229 pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID; 230 WRITE_ONCE(*ptep, pte); 231 dsb(ishst); 232 233 return (void *)slot->addr; 234 } 235 236 static void fixmap_clear_slot(struct hyp_fixmap_slot *slot) 237 { 238 kvm_pte_t *ptep = slot->ptep; 239 u64 addr = slot->addr; 240 241 WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID); 242 243 /* 244 * Irritatingly, the architecture requires that we use inner-shareable 245 * broadcast TLB invalidation here in case another CPU speculates 246 * through our fixmap and decides to create an "amalagamation of the 247 * values held in the TLB" due to the apparent lack of a 248 * break-before-make sequence. 249 * 250 * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03 251 */ 252 dsb(ishst); 253 __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), (KVM_PGTABLE_MAX_LEVELS - 1)); 254 dsb(ish); 255 isb(); 256 } 257 258 void hyp_fixmap_unmap(void) 259 { 260 fixmap_clear_slot(this_cpu_ptr(&fixmap_slots)); 261 } 262 263 static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx, 264 enum kvm_pgtable_walk_flags visit) 265 { 266 struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg); 267 268 if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_MAX_LEVELS - 1) 269 return -EINVAL; 270 271 slot->addr = ctx->addr; 272 slot->ptep = ctx->ptep; 273 274 /* 275 * Clear the PTE, but keep the page-table page refcount elevated to 276 * prevent it from ever being freed. This lets us manipulate the PTEs 277 * by hand safely without ever needing to allocate memory. 278 */ 279 fixmap_clear_slot(slot); 280 281 return 0; 282 } 283 284 static int create_fixmap_slot(u64 addr, u64 cpu) 285 { 286 struct kvm_pgtable_walker walker = { 287 .cb = __create_fixmap_slot_cb, 288 .flags = KVM_PGTABLE_WALK_LEAF, 289 .arg = (void *)cpu, 290 }; 291 292 return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker); 293 } 294 295 int hyp_create_pcpu_fixmap(void) 296 { 297 unsigned long addr, i; 298 int ret; 299 300 for (i = 0; i < hyp_nr_cpus; i++) { 301 ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr); 302 if (ret) 303 return ret; 304 305 ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE, 306 __hyp_pa(__hyp_bss_start), PAGE_HYP); 307 if (ret) 308 return ret; 309 310 ret = create_fixmap_slot(addr, i); 311 if (ret) 312 return ret; 313 } 314 315 return 0; 316 } 317 318 int hyp_create_idmap(u32 hyp_va_bits) 319 { 320 unsigned long start, end; 321 322 start = hyp_virt_to_phys((void *)__hyp_idmap_text_start); 323 start = ALIGN_DOWN(start, PAGE_SIZE); 324 325 end = hyp_virt_to_phys((void *)__hyp_idmap_text_end); 326 end = ALIGN(end, PAGE_SIZE); 327 328 /* 329 * One half of the VA space is reserved to linearly map portions of 330 * memory -- see va_layout.c for more details. The other half of the VA 331 * space contains the trampoline page, and needs some care. Split that 332 * second half in two and find the quarter of VA space not conflicting 333 * with the idmap to place the IOs and the vmemmap. IOs use the lower 334 * half of the quarter and the vmemmap the upper half. 335 */ 336 __io_map_base = start & BIT(hyp_va_bits - 2); 337 __io_map_base ^= BIT(hyp_va_bits - 2); 338 __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3); 339 340 return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC); 341 } 342 343 static void *admit_host_page(void *arg) 344 { 345 struct kvm_hyp_memcache *host_mc = arg; 346 347 if (!host_mc->nr_pages) 348 return NULL; 349 350 /* 351 * The host still owns the pages in its memcache, so we need to go 352 * through a full host-to-hyp donation cycle to change it. Fortunately, 353 * __pkvm_host_donate_hyp() takes care of races for us, so if it 354 * succeeds we're good to go. 355 */ 356 if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1)) 357 return NULL; 358 359 return pop_hyp_memcache(host_mc, hyp_phys_to_virt); 360 } 361 362 /* Refill our local memcache by poping pages from the one provided by the host. */ 363 int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages, 364 struct kvm_hyp_memcache *host_mc) 365 { 366 struct kvm_hyp_memcache tmp = *host_mc; 367 int ret; 368 369 ret = __topup_hyp_memcache(mc, min_pages, admit_host_page, 370 hyp_virt_to_phys, &tmp); 371 *host_mc = tmp; 372 373 return ret; 374 } 375