Lines Matching +full:page +full:- +full:based

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables kernel and guest-mode vCPU access to guest physical
30 spin_lock(&kvm->gpc_lock);
31 list_for_each_entry(gpc, &kvm->gpc_list, list) {
32 read_lock_irq(&gpc->lock);
34 /* Only a single page so no need to care about length */
35 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
36 gpc->uhva >= start && gpc->uhva < end) {
37 read_unlock_irq(&gpc->lock);
47 write_lock_irq(&gpc->lock);
48 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
49 gpc->uhva >= start && gpc->uhva < end)
50 gpc->valid = false;
51 write_unlock_irq(&gpc->lock);
55 read_unlock_irq(&gpc->lock);
57 spin_unlock(&kvm->gpc_lock);
67 * The cached access must fit within a single page. The 'len' argument
75 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
77 if (!gpc->active)
81 * If the page was cached from a memslot, make sure the memslots have
82 * not been re-configured.
84 if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation)
87 if (kvm_is_error_hva(gpc->uhva))
90 if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
93 if (!gpc->valid)
113 /* Unmap the old pfn/page if it was mapped before. */
137 * is not protected by gpc->lock. It is guaranteed to
138 * be elevated before the mmu_notifier acquires gpc->lock, and
141 if (kvm->mn_active_invalidate_count)
148 * old (non-zero) value of mn_active_invalidate_count or the
152 return kvm->mmu_invalidate_seq != mmu_seq;
157 /* Note, the new page offset may be different than the old! */
158 void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
162 struct page *page;
165 .slot = gpc->memslot,
166 .gfn = gpa_to_gfn(gpc->gpa),
168 .hva = gpc->uhva,
169 .refcounted_page = &page,
172 lockdep_assert_held(&gpc->refresh_lock);
174 lockdep_assert_held_write(&gpc->lock);
177 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
181 gpc->valid = false;
184 mmu_seq = gpc->kvm->mmu_invalidate_seq;
187 write_unlock_irq(&gpc->lock);
204 kvm_release_page_unused(page);
216 * too must be done outside of gpc->lock!
218 if (new_pfn == gpc->pfn)
224 kvm_release_page_unused(page);
228 write_lock_irq(&gpc->lock);
234 WARN_ON_ONCE(gpc->valid);
235 } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
237 gpc->valid = true;
238 gpc->pfn = new_pfn;
239 gpc->khva = new_khva + offset_in_page(gpc->uhva);
242 * Put the reference to the _new_ page. The page is now tracked by the
246 kvm_release_page_clean(page);
251 write_lock_irq(&gpc->lock);
253 return -EFAULT;
268 return -EINVAL;
270 lockdep_assert_held(&gpc->refresh_lock);
272 write_lock_irq(&gpc->lock);
274 if (!gpc->active) {
275 ret = -EINVAL;
279 old_pfn = gpc->pfn;
280 old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
281 old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
286 gpc->gpa = INVALID_GPA;
287 gpc->memslot = NULL;
288 gpc->uhva = PAGE_ALIGN_DOWN(uhva);
290 if (gpc->uhva != old_uhva)
293 struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
297 if (gpc->gpa != gpa || gpc->generation != slots->generation ||
298 kvm_is_error_hva(gpc->uhva)) {
301 gpc->gpa = gpa;
302 gpc->generation = slots->generation;
303 gpc->memslot = __gfn_to_memslot(slots, gfn);
304 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
306 if (kvm_is_error_hva(gpc->uhva)) {
307 ret = -EFAULT;
315 if (gpc->uhva != old_uhva)
318 gpc->uhva = old_uhva;
323 gpc->uhva += page_offset;
329 if (!gpc->valid || hva_change) {
334 * But do update gpc->khva because the offset within the page
337 gpc->khva = old_khva + page_offset;
349 gpc->valid = false;
350 gpc->pfn = KVM_PFN_ERR_FAULT;
351 gpc->khva = NULL;
355 unmap_old = (old_pfn != gpc->pfn);
358 write_unlock_irq(&gpc->lock);
370 guard(mutex)(&gpc->refresh_lock);
372 if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
373 return -EINVAL;
376 * If the GPA is valid then ignore the HVA, as a cache can be GPA-based
377 * or HVA-based, not both. For GPA-based caches, the HVA will be
380 uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD;
382 return __kvm_gpc_refresh(gpc, gpc->gpa, uhva);
387 rwlock_init(&gpc->lock);
388 mutex_init(&gpc->refresh_lock);
390 gpc->kvm = kvm;
391 gpc->pfn = KVM_PFN_ERR_FAULT;
392 gpc->gpa = INVALID_GPA;
393 gpc->uhva = KVM_HVA_ERR_BAD;
394 gpc->active = gpc->valid = false;
400 struct kvm *kvm = gpc->kvm;
403 return -EINVAL;
405 guard(mutex)(&gpc->refresh_lock);
407 if (!gpc->active) {
408 if (KVM_BUG_ON(gpc->valid, kvm))
409 return -EIO;
411 spin_lock(&kvm->gpc_lock);
412 list_add(&gpc->list, &kvm->gpc_list);
413 spin_unlock(&kvm->gpc_lock);
420 write_lock_irq(&gpc->lock);
421 gpc->active = true;
422 write_unlock_irq(&gpc->lock);
431 * by KVM to differentiate between GPA-based and HVA-based caches.
434 return -EINVAL;
442 return -EINVAL;
449 struct kvm *kvm = gpc->kvm;
453 guard(mutex)(&gpc->refresh_lock);
455 if (gpc->active) {
459 * until gpc->lock is dropped and refresh is guaranteed to fail.
461 write_lock_irq(&gpc->lock);
462 gpc->active = false;
463 gpc->valid = false;
471 old_khva = gpc->khva - offset_in_page(gpc->khva);
472 gpc->khva = NULL;
474 old_pfn = gpc->pfn;
475 gpc->pfn = KVM_PFN_ERR_FAULT;
476 write_unlock_irq(&gpc->lock);
478 spin_lock(&kvm->gpc_lock);
479 list_del(&gpc->list);
480 spin_unlock(&kvm->gpc_lock);