1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables kernel and guest-mode vCPU access to guest physical 6 * memory with suitable invalidation mechanisms. 7 * 8 * Copyright © 2021 Amazon.com, Inc. or its affiliates. 9 * 10 * Authors: 11 * David Woodhouse <dwmw2@infradead.org> 12 */ 13 14 #include <linux/kvm_host.h> 15 #include <linux/kvm.h> 16 #include <linux/highmem.h> 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 20 #include "kvm_mm.h" 21 22 /* 23 * MMU notifier 'invalidate_range_start' hook. 24 */ 25 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, 26 unsigned long end, bool may_block) 27 { 28 struct gfn_to_pfn_cache *gpc; 29 30 spin_lock(&kvm->gpc_lock); 31 list_for_each_entry(gpc, &kvm->gpc_list, list) { 32 read_lock_irq(&gpc->lock); 33 34 /* Only a single page so no need to care about length */ 35 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && 36 gpc->uhva >= start && gpc->uhva < end) { 37 read_unlock_irq(&gpc->lock); 38 39 /* 40 * There is a small window here where the cache could 41 * be modified, and invalidation would no longer be 42 * necessary. Hence check again whether invalidation 43 * is still necessary once the write lock has been 44 * acquired. 45 */ 46 47 write_lock_irq(&gpc->lock); 48 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && 49 gpc->uhva >= start && gpc->uhva < end) 50 gpc->valid = false; 51 write_unlock_irq(&gpc->lock); 52 continue; 53 } 54 55 read_unlock_irq(&gpc->lock); 56 } 57 spin_unlock(&kvm->gpc_lock); 58 } 59 60 bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) 61 { 62 struct kvm_memslots *slots = kvm_memslots(gpc->kvm); 63 64 if (!gpc->active) 65 return false; 66 67 /* 68 * If the page was cached from a memslot, make sure the memslots have 69 * not been re-configured. 70 */ 71 if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation) 72 return false; 73 74 if (kvm_is_error_hva(gpc->uhva)) 75 return false; 76 77 if (offset_in_page(gpc->uhva) + len > PAGE_SIZE) 78 return false; 79 80 if (!gpc->valid) 81 return false; 82 83 return true; 84 } 85 86 static void *gpc_map(kvm_pfn_t pfn) 87 { 88 if (pfn_valid(pfn)) 89 return kmap(pfn_to_page(pfn)); 90 91 #ifdef CONFIG_HAS_IOMEM 92 return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); 93 #else 94 return NULL; 95 #endif 96 } 97 98 static void gpc_unmap(kvm_pfn_t pfn, void *khva) 99 { 100 /* Unmap the old pfn/page if it was mapped before. */ 101 if (is_error_noslot_pfn(pfn) || !khva) 102 return; 103 104 if (pfn_valid(pfn)) { 105 kunmap(pfn_to_page(pfn)); 106 return; 107 } 108 109 #ifdef CONFIG_HAS_IOMEM 110 memunmap(khva); 111 #endif 112 } 113 114 static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq) 115 { 116 /* 117 * mn_active_invalidate_count acts for all intents and purposes 118 * like mmu_invalidate_in_progress here; but the latter cannot 119 * be used here because the invalidation of caches in the 120 * mmu_notifier event occurs _before_ mmu_invalidate_in_progress 121 * is elevated. 122 * 123 * Note, it does not matter that mn_active_invalidate_count 124 * is not protected by gpc->lock. It is guaranteed to 125 * be elevated before the mmu_notifier acquires gpc->lock, and 126 * isn't dropped until after mmu_invalidate_seq is updated. 127 */ 128 if (kvm->mn_active_invalidate_count) 129 return true; 130 131 /* 132 * Ensure mn_active_invalidate_count is read before 133 * mmu_invalidate_seq. This pairs with the smp_wmb() in 134 * mmu_notifier_invalidate_range_end() to guarantee either the 135 * old (non-zero) value of mn_active_invalidate_count or the 136 * new (incremented) value of mmu_invalidate_seq is observed. 137 */ 138 smp_rmb(); 139 return kvm->mmu_invalidate_seq != mmu_seq; 140 } 141 142 static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) 143 { 144 /* Note, the new page offset may be different than the old! */ 145 void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); 146 kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT; 147 void *new_khva = NULL; 148 unsigned long mmu_seq; 149 150 lockdep_assert_held(&gpc->refresh_lock); 151 152 lockdep_assert_held_write(&gpc->lock); 153 154 /* 155 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva 156 * assets have already been updated and so a concurrent check() from a 157 * different task may not fail the gpa/uhva/generation checks. 158 */ 159 gpc->valid = false; 160 161 do { 162 mmu_seq = gpc->kvm->mmu_invalidate_seq; 163 smp_rmb(); 164 165 write_unlock_irq(&gpc->lock); 166 167 /* 168 * If the previous iteration "failed" due to an mmu_notifier 169 * event, release the pfn and unmap the kernel virtual address 170 * from the previous attempt. Unmapping might sleep, so this 171 * needs to be done after dropping the lock. Opportunistically 172 * check for resched while the lock isn't held. 173 */ 174 if (new_pfn != KVM_PFN_ERR_FAULT) { 175 /* 176 * Keep the mapping if the previous iteration reused 177 * the existing mapping and didn't create a new one. 178 */ 179 if (new_khva != old_khva) 180 gpc_unmap(new_pfn, new_khva); 181 182 kvm_release_pfn_clean(new_pfn); 183 184 cond_resched(); 185 } 186 187 /* We always request a writeable mapping */ 188 new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL); 189 if (is_error_noslot_pfn(new_pfn)) 190 goto out_error; 191 192 /* 193 * Obtain a new kernel mapping if KVM itself will access the 194 * pfn. Note, kmap() and memremap() can both sleep, so this 195 * too must be done outside of gpc->lock! 196 */ 197 if (new_pfn == gpc->pfn) 198 new_khva = old_khva; 199 else 200 new_khva = gpc_map(new_pfn); 201 202 if (!new_khva) { 203 kvm_release_pfn_clean(new_pfn); 204 goto out_error; 205 } 206 207 write_lock_irq(&gpc->lock); 208 209 /* 210 * Other tasks must wait for _this_ refresh to complete before 211 * attempting to refresh. 212 */ 213 WARN_ON_ONCE(gpc->valid); 214 } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq)); 215 216 gpc->valid = true; 217 gpc->pfn = new_pfn; 218 gpc->khva = new_khva + offset_in_page(gpc->uhva); 219 220 /* 221 * Put the reference to the _new_ pfn. The pfn is now tracked by the 222 * cache and can be safely migrated, swapped, etc... as the cache will 223 * invalidate any mappings in response to relevant mmu_notifier events. 224 */ 225 kvm_release_pfn_clean(new_pfn); 226 227 return 0; 228 229 out_error: 230 write_lock_irq(&gpc->lock); 231 232 return -EFAULT; 233 } 234 235 static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva, 236 unsigned long len) 237 { 238 unsigned long page_offset; 239 bool unmap_old = false; 240 unsigned long old_uhva; 241 kvm_pfn_t old_pfn; 242 bool hva_change = false; 243 void *old_khva; 244 int ret; 245 246 /* Either gpa or uhva must be valid, but not both */ 247 if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva))) 248 return -EINVAL; 249 250 /* 251 * The cached acces must fit within a single page. The 'len' argument 252 * exists only to enforce that. 253 */ 254 page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) : 255 offset_in_page(gpa); 256 if (page_offset + len > PAGE_SIZE) 257 return -EINVAL; 258 259 lockdep_assert_held(&gpc->refresh_lock); 260 261 write_lock_irq(&gpc->lock); 262 263 if (!gpc->active) { 264 ret = -EINVAL; 265 goto out_unlock; 266 } 267 268 old_pfn = gpc->pfn; 269 old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); 270 old_uhva = PAGE_ALIGN_DOWN(gpc->uhva); 271 272 if (kvm_is_error_gpa(gpa)) { 273 gpc->gpa = INVALID_GPA; 274 gpc->memslot = NULL; 275 gpc->uhva = PAGE_ALIGN_DOWN(uhva); 276 277 if (gpc->uhva != old_uhva) 278 hva_change = true; 279 } else { 280 struct kvm_memslots *slots = kvm_memslots(gpc->kvm); 281 282 if (gpc->gpa != gpa || gpc->generation != slots->generation || 283 kvm_is_error_hva(gpc->uhva)) { 284 gfn_t gfn = gpa_to_gfn(gpa); 285 286 gpc->gpa = gpa; 287 gpc->generation = slots->generation; 288 gpc->memslot = __gfn_to_memslot(slots, gfn); 289 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); 290 291 if (kvm_is_error_hva(gpc->uhva)) { 292 ret = -EFAULT; 293 goto out; 294 } 295 296 /* 297 * Even if the GPA and/or the memslot generation changed, the 298 * HVA may still be the same. 299 */ 300 if (gpc->uhva != old_uhva) 301 hva_change = true; 302 } else { 303 gpc->uhva = old_uhva; 304 } 305 } 306 307 /* Note: the offset must be correct before calling hva_to_pfn_retry() */ 308 gpc->uhva += page_offset; 309 310 /* 311 * If the userspace HVA changed or the PFN was already invalid, 312 * drop the lock and do the HVA to PFN lookup again. 313 */ 314 if (!gpc->valid || hva_change) { 315 ret = hva_to_pfn_retry(gpc); 316 } else { 317 /* 318 * If the HVA→PFN mapping was already valid, don't unmap it. 319 * But do update gpc->khva because the offset within the page 320 * may have changed. 321 */ 322 gpc->khva = old_khva + page_offset; 323 ret = 0; 324 goto out_unlock; 325 } 326 327 out: 328 /* 329 * Invalidate the cache and purge the pfn/khva if the refresh failed. 330 * Some/all of the uhva, gpa, and memslot generation info may still be 331 * valid, leave it as is. 332 */ 333 if (ret) { 334 gpc->valid = false; 335 gpc->pfn = KVM_PFN_ERR_FAULT; 336 gpc->khva = NULL; 337 } 338 339 /* Detect a pfn change before dropping the lock! */ 340 unmap_old = (old_pfn != gpc->pfn); 341 342 out_unlock: 343 write_unlock_irq(&gpc->lock); 344 345 if (unmap_old) 346 gpc_unmap(old_pfn, old_khva); 347 348 return ret; 349 } 350 351 int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) 352 { 353 unsigned long uhva; 354 355 guard(mutex)(&gpc->refresh_lock); 356 357 /* 358 * If the GPA is valid then ignore the HVA, as a cache can be GPA-based 359 * or HVA-based, not both. For GPA-based caches, the HVA will be 360 * recomputed during refresh if necessary. 361 */ 362 uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD; 363 364 return __kvm_gpc_refresh(gpc, gpc->gpa, uhva, len); 365 } 366 367 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm) 368 { 369 rwlock_init(&gpc->lock); 370 mutex_init(&gpc->refresh_lock); 371 372 gpc->kvm = kvm; 373 gpc->pfn = KVM_PFN_ERR_FAULT; 374 gpc->gpa = INVALID_GPA; 375 gpc->uhva = KVM_HVA_ERR_BAD; 376 gpc->active = gpc->valid = false; 377 } 378 379 static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva, 380 unsigned long len) 381 { 382 struct kvm *kvm = gpc->kvm; 383 384 guard(mutex)(&gpc->refresh_lock); 385 386 if (!gpc->active) { 387 if (KVM_BUG_ON(gpc->valid, kvm)) 388 return -EIO; 389 390 spin_lock(&kvm->gpc_lock); 391 list_add(&gpc->list, &kvm->gpc_list); 392 spin_unlock(&kvm->gpc_lock); 393 394 /* 395 * Activate the cache after adding it to the list, a concurrent 396 * refresh must not establish a mapping until the cache is 397 * reachable by mmu_notifier events. 398 */ 399 write_lock_irq(&gpc->lock); 400 gpc->active = true; 401 write_unlock_irq(&gpc->lock); 402 } 403 return __kvm_gpc_refresh(gpc, gpa, uhva, len); 404 } 405 406 int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) 407 { 408 return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len); 409 } 410 411 int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len) 412 { 413 return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len); 414 } 415 416 void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) 417 { 418 struct kvm *kvm = gpc->kvm; 419 kvm_pfn_t old_pfn; 420 void *old_khva; 421 422 guard(mutex)(&gpc->refresh_lock); 423 424 if (gpc->active) { 425 /* 426 * Deactivate the cache before removing it from the list, KVM 427 * must stall mmu_notifier events until all users go away, i.e. 428 * until gpc->lock is dropped and refresh is guaranteed to fail. 429 */ 430 write_lock_irq(&gpc->lock); 431 gpc->active = false; 432 gpc->valid = false; 433 434 /* 435 * Leave the GPA => uHVA cache intact, it's protected by the 436 * memslot generation. The PFN lookup needs to be redone every 437 * time as mmu_notifier protection is lost when the cache is 438 * removed from the VM's gpc_list. 439 */ 440 old_khva = gpc->khva - offset_in_page(gpc->khva); 441 gpc->khva = NULL; 442 443 old_pfn = gpc->pfn; 444 gpc->pfn = KVM_PFN_ERR_FAULT; 445 write_unlock_irq(&gpc->lock); 446 447 spin_lock(&kvm->gpc_lock); 448 list_del(&gpc->list); 449 spin_unlock(&kvm->gpc_lock); 450 451 gpc_unmap(old_pfn, old_khva); 452 } 453 } 454