xref: /linux/virt/kvm/guest_memfd.c (revision 256e3417065b2721f77bcd37331796b59483ef3b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/backing-dev.h>
3 #include <linux/falloc.h>
4 #include <linux/kvm_host.h>
5 #include <linux/pagemap.h>
6 #include <linux/anon_inodes.h>
7 
8 #include "kvm_mm.h"
9 
10 struct kvm_gmem {
11 	struct kvm *kvm;
12 	struct xarray bindings;
13 	struct list_head entry;
14 };
15 
16 /**
17  * folio_file_pfn - like folio_file_page, but return a pfn.
18  * @folio: The folio which contains this index.
19  * @index: The index we want to look up.
20  *
21  * Return: The pfn for this index.
22  */
folio_file_pfn(struct folio * folio,pgoff_t index)23 static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index)
24 {
25 	return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1));
26 }
27 
__kvm_gmem_prepare_folio(struct kvm * kvm,struct kvm_memory_slot * slot,pgoff_t index,struct folio * folio)28 static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
29 				    pgoff_t index, struct folio *folio)
30 {
31 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
32 	kvm_pfn_t pfn = folio_file_pfn(folio, index);
33 	gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff;
34 	int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio));
35 	if (rc) {
36 		pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
37 				    index, gfn, pfn, rc);
38 		return rc;
39 	}
40 #endif
41 
42 	return 0;
43 }
44 
kvm_gmem_mark_prepared(struct folio * folio)45 static inline void kvm_gmem_mark_prepared(struct folio *folio)
46 {
47 	folio_mark_uptodate(folio);
48 }
49 
50 /*
51  * Process @folio, which contains @gfn, so that the guest can use it.
52  * The folio must be locked and the gfn must be contained in @slot.
53  * On successful return the guest sees a zero page so as to avoid
54  * leaking host data and the up-to-date flag is set.
55  */
kvm_gmem_prepare_folio(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,struct folio * folio)56 static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
57 				  gfn_t gfn, struct folio *folio)
58 {
59 	unsigned long nr_pages, i;
60 	pgoff_t index;
61 	int r;
62 
63 	nr_pages = folio_nr_pages(folio);
64 	for (i = 0; i < nr_pages; i++)
65 		clear_highpage(folio_page(folio, i));
66 
67 	/*
68 	 * Preparing huge folios should always be safe, since it should
69 	 * be possible to split them later if needed.
70 	 *
71 	 * Right now the folio order is always going to be zero, but the
72 	 * code is ready for huge folios.  The only assumption is that
73 	 * the base pgoff of memslots is naturally aligned with the
74 	 * requested page order, ensuring that huge folios can also use
75 	 * huge page table entries for GPA->HPA mapping.
76 	 *
77 	 * The order will be passed when creating the guest_memfd, and
78 	 * checked when creating memslots.
79 	 */
80 	WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio)));
81 	index = gfn - slot->base_gfn + slot->gmem.pgoff;
82 	index = ALIGN_DOWN(index, 1 << folio_order(folio));
83 	r = __kvm_gmem_prepare_folio(kvm, slot, index, folio);
84 	if (!r)
85 		kvm_gmem_mark_prepared(folio);
86 
87 	return r;
88 }
89 
90 /*
91  * Returns a locked folio on success.  The caller is responsible for
92  * setting the up-to-date flag before the memory is mapped into the guest.
93  * There is no backing storage for the memory, so the folio will remain
94  * up-to-date until it's removed.
95  *
96  * Ignore accessed, referenced, and dirty flags.  The memory is
97  * unevictable and there is no storage to write back to.
98  */
kvm_gmem_get_folio(struct inode * inode,pgoff_t index)99 static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
100 {
101 	/* TODO: Support huge pages. */
102 	return filemap_grab_folio(inode->i_mapping, index);
103 }
104 
kvm_gmem_invalidate_begin(struct kvm_gmem * gmem,pgoff_t start,pgoff_t end)105 static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
106 				      pgoff_t end)
107 {
108 	bool flush = false, found_memslot = false;
109 	struct kvm_memory_slot *slot;
110 	struct kvm *kvm = gmem->kvm;
111 	unsigned long index;
112 
113 	xa_for_each_range(&gmem->bindings, index, slot, start, end - 1) {
114 		pgoff_t pgoff = slot->gmem.pgoff;
115 
116 		struct kvm_gfn_range gfn_range = {
117 			.start = slot->base_gfn + max(pgoff, start) - pgoff,
118 			.end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
119 			.slot = slot,
120 			.may_block = true,
121 			/* guest memfd is relevant to only private mappings. */
122 			.attr_filter = KVM_FILTER_PRIVATE,
123 		};
124 
125 		if (!found_memslot) {
126 			found_memslot = true;
127 
128 			KVM_MMU_LOCK(kvm);
129 			kvm_mmu_invalidate_begin(kvm);
130 		}
131 
132 		flush |= kvm_mmu_unmap_gfn_range(kvm, &gfn_range);
133 	}
134 
135 	if (flush)
136 		kvm_flush_remote_tlbs(kvm);
137 
138 	if (found_memslot)
139 		KVM_MMU_UNLOCK(kvm);
140 }
141 
kvm_gmem_invalidate_end(struct kvm_gmem * gmem,pgoff_t start,pgoff_t end)142 static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
143 				    pgoff_t end)
144 {
145 	struct kvm *kvm = gmem->kvm;
146 
147 	if (xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
148 		KVM_MMU_LOCK(kvm);
149 		kvm_mmu_invalidate_end(kvm);
150 		KVM_MMU_UNLOCK(kvm);
151 	}
152 }
153 
kvm_gmem_punch_hole(struct inode * inode,loff_t offset,loff_t len)154 static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
155 {
156 	struct list_head *gmem_list = &inode->i_mapping->i_private_list;
157 	pgoff_t start = offset >> PAGE_SHIFT;
158 	pgoff_t end = (offset + len) >> PAGE_SHIFT;
159 	struct kvm_gmem *gmem;
160 
161 	/*
162 	 * Bindings must be stable across invalidation to ensure the start+end
163 	 * are balanced.
164 	 */
165 	filemap_invalidate_lock(inode->i_mapping);
166 
167 	list_for_each_entry(gmem, gmem_list, entry)
168 		kvm_gmem_invalidate_begin(gmem, start, end);
169 
170 	truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
171 
172 	list_for_each_entry(gmem, gmem_list, entry)
173 		kvm_gmem_invalidate_end(gmem, start, end);
174 
175 	filemap_invalidate_unlock(inode->i_mapping);
176 
177 	return 0;
178 }
179 
kvm_gmem_allocate(struct inode * inode,loff_t offset,loff_t len)180 static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
181 {
182 	struct address_space *mapping = inode->i_mapping;
183 	pgoff_t start, index, end;
184 	int r;
185 
186 	/* Dedicated guest is immutable by default. */
187 	if (offset + len > i_size_read(inode))
188 		return -EINVAL;
189 
190 	filemap_invalidate_lock_shared(mapping);
191 
192 	start = offset >> PAGE_SHIFT;
193 	end = (offset + len) >> PAGE_SHIFT;
194 
195 	r = 0;
196 	for (index = start; index < end; ) {
197 		struct folio *folio;
198 
199 		if (signal_pending(current)) {
200 			r = -EINTR;
201 			break;
202 		}
203 
204 		folio = kvm_gmem_get_folio(inode, index);
205 		if (IS_ERR(folio)) {
206 			r = PTR_ERR(folio);
207 			break;
208 		}
209 
210 		index = folio_next_index(folio);
211 
212 		folio_unlock(folio);
213 		folio_put(folio);
214 
215 		/* 64-bit only, wrapping the index should be impossible. */
216 		if (WARN_ON_ONCE(!index))
217 			break;
218 
219 		cond_resched();
220 	}
221 
222 	filemap_invalidate_unlock_shared(mapping);
223 
224 	return r;
225 }
226 
kvm_gmem_fallocate(struct file * file,int mode,loff_t offset,loff_t len)227 static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
228 			       loff_t len)
229 {
230 	int ret;
231 
232 	if (!(mode & FALLOC_FL_KEEP_SIZE))
233 		return -EOPNOTSUPP;
234 
235 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
236 		return -EOPNOTSUPP;
237 
238 	if (!PAGE_ALIGNED(offset) || !PAGE_ALIGNED(len))
239 		return -EINVAL;
240 
241 	if (mode & FALLOC_FL_PUNCH_HOLE)
242 		ret = kvm_gmem_punch_hole(file_inode(file), offset, len);
243 	else
244 		ret = kvm_gmem_allocate(file_inode(file), offset, len);
245 
246 	if (!ret)
247 		file_modified(file);
248 	return ret;
249 }
250 
kvm_gmem_release(struct inode * inode,struct file * file)251 static int kvm_gmem_release(struct inode *inode, struct file *file)
252 {
253 	struct kvm_gmem *gmem = file->private_data;
254 	struct kvm_memory_slot *slot;
255 	struct kvm *kvm = gmem->kvm;
256 	unsigned long index;
257 
258 	/*
259 	 * Prevent concurrent attempts to *unbind* a memslot.  This is the last
260 	 * reference to the file and thus no new bindings can be created, but
261 	 * dereferencing the slot for existing bindings needs to be protected
262 	 * against memslot updates, specifically so that unbind doesn't race
263 	 * and free the memslot (kvm_gmem_get_file() will return NULL).
264 	 *
265 	 * Since .release is called only when the reference count is zero,
266 	 * after which file_ref_get() and get_file_active() fail,
267 	 * kvm_gmem_get_pfn() cannot be using the file concurrently.
268 	 * file_ref_put() provides a full barrier, and get_file_active() the
269 	 * matching acquire barrier.
270 	 */
271 	mutex_lock(&kvm->slots_lock);
272 
273 	filemap_invalidate_lock(inode->i_mapping);
274 
275 	xa_for_each(&gmem->bindings, index, slot)
276 		WRITE_ONCE(slot->gmem.file, NULL);
277 
278 	/*
279 	 * All in-flight operations are gone and new bindings can be created.
280 	 * Zap all SPTEs pointed at by this file.  Do not free the backing
281 	 * memory, as its lifetime is associated with the inode, not the file.
282 	 */
283 	kvm_gmem_invalidate_begin(gmem, 0, -1ul);
284 	kvm_gmem_invalidate_end(gmem, 0, -1ul);
285 
286 	list_del(&gmem->entry);
287 
288 	filemap_invalidate_unlock(inode->i_mapping);
289 
290 	mutex_unlock(&kvm->slots_lock);
291 
292 	xa_destroy(&gmem->bindings);
293 	kfree(gmem);
294 
295 	kvm_put_kvm(kvm);
296 
297 	return 0;
298 }
299 
kvm_gmem_get_file(struct kvm_memory_slot * slot)300 static inline struct file *kvm_gmem_get_file(struct kvm_memory_slot *slot)
301 {
302 	/*
303 	 * Do not return slot->gmem.file if it has already been closed;
304 	 * there might be some time between the last fput() and when
305 	 * kvm_gmem_release() clears slot->gmem.file.
306 	 */
307 	return get_file_active(&slot->gmem.file);
308 }
309 
kvm_gmem_get_index(struct kvm_memory_slot * slot,gfn_t gfn)310 static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn)
311 {
312 	return gfn - slot->base_gfn + slot->gmem.pgoff;
313 }
314 
kvm_gmem_supports_mmap(struct inode * inode)315 static bool kvm_gmem_supports_mmap(struct inode *inode)
316 {
317 	const u64 flags = (u64)inode->i_private;
318 
319 	return flags & GUEST_MEMFD_FLAG_MMAP;
320 }
321 
kvm_gmem_fault_user_mapping(struct vm_fault * vmf)322 static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf)
323 {
324 	struct inode *inode = file_inode(vmf->vma->vm_file);
325 	struct folio *folio;
326 	vm_fault_t ret = VM_FAULT_LOCKED;
327 
328 	if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
329 		return VM_FAULT_SIGBUS;
330 
331 	folio = kvm_gmem_get_folio(inode, vmf->pgoff);
332 	if (IS_ERR(folio)) {
333 		int err = PTR_ERR(folio);
334 
335 		if (err == -EAGAIN)
336 			return VM_FAULT_RETRY;
337 
338 		return vmf_error(err);
339 	}
340 
341 	if (WARN_ON_ONCE(folio_test_large(folio))) {
342 		ret = VM_FAULT_SIGBUS;
343 		goto out_folio;
344 	}
345 
346 	if (!folio_test_uptodate(folio)) {
347 		clear_highpage(folio_page(folio, 0));
348 		kvm_gmem_mark_prepared(folio);
349 	}
350 
351 	vmf->page = folio_file_page(folio, vmf->pgoff);
352 
353 out_folio:
354 	if (ret != VM_FAULT_LOCKED) {
355 		folio_unlock(folio);
356 		folio_put(folio);
357 	}
358 
359 	return ret;
360 }
361 
362 static const struct vm_operations_struct kvm_gmem_vm_ops = {
363 	.fault = kvm_gmem_fault_user_mapping,
364 };
365 
kvm_gmem_mmap(struct file * file,struct vm_area_struct * vma)366 static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma)
367 {
368 	if (!kvm_gmem_supports_mmap(file_inode(file)))
369 		return -ENODEV;
370 
371 	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) !=
372 	    (VM_SHARED | VM_MAYSHARE)) {
373 		return -EINVAL;
374 	}
375 
376 	vma->vm_ops = &kvm_gmem_vm_ops;
377 
378 	return 0;
379 }
380 
381 static struct file_operations kvm_gmem_fops = {
382 	.mmap		= kvm_gmem_mmap,
383 	.open		= generic_file_open,
384 	.release	= kvm_gmem_release,
385 	.fallocate	= kvm_gmem_fallocate,
386 };
387 
kvm_gmem_init(struct module * module)388 void kvm_gmem_init(struct module *module)
389 {
390 	kvm_gmem_fops.owner = module;
391 }
392 
kvm_gmem_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)393 static int kvm_gmem_migrate_folio(struct address_space *mapping,
394 				  struct folio *dst, struct folio *src,
395 				  enum migrate_mode mode)
396 {
397 	WARN_ON_ONCE(1);
398 	return -EINVAL;
399 }
400 
kvm_gmem_error_folio(struct address_space * mapping,struct folio * folio)401 static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
402 {
403 	struct list_head *gmem_list = &mapping->i_private_list;
404 	struct kvm_gmem *gmem;
405 	pgoff_t start, end;
406 
407 	filemap_invalidate_lock_shared(mapping);
408 
409 	start = folio->index;
410 	end = start + folio_nr_pages(folio);
411 
412 	list_for_each_entry(gmem, gmem_list, entry)
413 		kvm_gmem_invalidate_begin(gmem, start, end);
414 
415 	/*
416 	 * Do not truncate the range, what action is taken in response to the
417 	 * error is userspace's decision (assuming the architecture supports
418 	 * gracefully handling memory errors).  If/when the guest attempts to
419 	 * access a poisoned page, kvm_gmem_get_pfn() will return -EHWPOISON,
420 	 * at which point KVM can either terminate the VM or propagate the
421 	 * error to userspace.
422 	 */
423 
424 	list_for_each_entry(gmem, gmem_list, entry)
425 		kvm_gmem_invalidate_end(gmem, start, end);
426 
427 	filemap_invalidate_unlock_shared(mapping);
428 
429 	return MF_DELAYED;
430 }
431 
432 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
kvm_gmem_free_folio(struct folio * folio)433 static void kvm_gmem_free_folio(struct folio *folio)
434 {
435 	struct page *page = folio_page(folio, 0);
436 	kvm_pfn_t pfn = page_to_pfn(page);
437 	int order = folio_order(folio);
438 
439 	kvm_arch_gmem_invalidate(pfn, pfn + (1ul << order));
440 }
441 #endif
442 
443 static const struct address_space_operations kvm_gmem_aops = {
444 	.dirty_folio = noop_dirty_folio,
445 	.migrate_folio	= kvm_gmem_migrate_folio,
446 	.error_remove_folio = kvm_gmem_error_folio,
447 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
448 	.free_folio = kvm_gmem_free_folio,
449 #endif
450 };
451 
kvm_gmem_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)452 static int kvm_gmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
453 			    struct iattr *attr)
454 {
455 	return -EINVAL;
456 }
457 static const struct inode_operations kvm_gmem_iops = {
458 	.setattr	= kvm_gmem_setattr,
459 };
460 
kvm_arch_supports_gmem_mmap(struct kvm * kvm)461 bool __weak kvm_arch_supports_gmem_mmap(struct kvm *kvm)
462 {
463 	return true;
464 }
465 
__kvm_gmem_create(struct kvm * kvm,loff_t size,u64 flags)466 static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
467 {
468 	const char *anon_name = "[kvm-gmem]";
469 	struct kvm_gmem *gmem;
470 	struct inode *inode;
471 	struct file *file;
472 	int fd, err;
473 
474 	fd = get_unused_fd_flags(0);
475 	if (fd < 0)
476 		return fd;
477 
478 	gmem = kzalloc(sizeof(*gmem), GFP_KERNEL);
479 	if (!gmem) {
480 		err = -ENOMEM;
481 		goto err_fd;
482 	}
483 
484 	file = anon_inode_create_getfile(anon_name, &kvm_gmem_fops, gmem,
485 					 O_RDWR, NULL);
486 	if (IS_ERR(file)) {
487 		err = PTR_ERR(file);
488 		goto err_gmem;
489 	}
490 
491 	file->f_flags |= O_LARGEFILE;
492 
493 	inode = file->f_inode;
494 	WARN_ON(file->f_mapping != inode->i_mapping);
495 
496 	inode->i_private = (void *)(unsigned long)flags;
497 	inode->i_op = &kvm_gmem_iops;
498 	inode->i_mapping->a_ops = &kvm_gmem_aops;
499 	inode->i_mode |= S_IFREG;
500 	inode->i_size = size;
501 	mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
502 	mapping_set_inaccessible(inode->i_mapping);
503 	/* Unmovable mappings are supposed to be marked unevictable as well. */
504 	WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping));
505 
506 	kvm_get_kvm(kvm);
507 	gmem->kvm = kvm;
508 	xa_init(&gmem->bindings);
509 	list_add(&gmem->entry, &inode->i_mapping->i_private_list);
510 
511 	fd_install(fd, file);
512 	return fd;
513 
514 err_gmem:
515 	kfree(gmem);
516 err_fd:
517 	put_unused_fd(fd);
518 	return err;
519 }
520 
kvm_gmem_create(struct kvm * kvm,struct kvm_create_guest_memfd * args)521 int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
522 {
523 	loff_t size = args->size;
524 	u64 flags = args->flags;
525 	u64 valid_flags = 0;
526 
527 	if (kvm_arch_supports_gmem_mmap(kvm))
528 		valid_flags |= GUEST_MEMFD_FLAG_MMAP;
529 
530 	if (flags & ~valid_flags)
531 		return -EINVAL;
532 
533 	if (size <= 0 || !PAGE_ALIGNED(size))
534 		return -EINVAL;
535 
536 	return __kvm_gmem_create(kvm, size, flags);
537 }
538 
kvm_gmem_bind(struct kvm * kvm,struct kvm_memory_slot * slot,unsigned int fd,loff_t offset)539 int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
540 		  unsigned int fd, loff_t offset)
541 {
542 	loff_t size = slot->npages << PAGE_SHIFT;
543 	unsigned long start, end;
544 	struct kvm_gmem *gmem;
545 	struct inode *inode;
546 	struct file *file;
547 	int r = -EINVAL;
548 
549 	BUILD_BUG_ON(sizeof(gfn_t) != sizeof(slot->gmem.pgoff));
550 
551 	file = fget(fd);
552 	if (!file)
553 		return -EBADF;
554 
555 	if (file->f_op != &kvm_gmem_fops)
556 		goto err;
557 
558 	gmem = file->private_data;
559 	if (gmem->kvm != kvm)
560 		goto err;
561 
562 	inode = file_inode(file);
563 
564 	if (offset < 0 || !PAGE_ALIGNED(offset) ||
565 	    offset + size > i_size_read(inode))
566 		goto err;
567 
568 	filemap_invalidate_lock(inode->i_mapping);
569 
570 	start = offset >> PAGE_SHIFT;
571 	end = start + slot->npages;
572 
573 	if (!xa_empty(&gmem->bindings) &&
574 	    xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
575 		filemap_invalidate_unlock(inode->i_mapping);
576 		goto err;
577 	}
578 
579 	/*
580 	 * memslots of flag KVM_MEM_GUEST_MEMFD are immutable to change, so
581 	 * kvm_gmem_bind() must occur on a new memslot.  Because the memslot
582 	 * is not visible yet, kvm_gmem_get_pfn() is guaranteed to see the file.
583 	 */
584 	WRITE_ONCE(slot->gmem.file, file);
585 	slot->gmem.pgoff = start;
586 	if (kvm_gmem_supports_mmap(inode))
587 		slot->flags |= KVM_MEMSLOT_GMEM_ONLY;
588 
589 	xa_store_range(&gmem->bindings, start, end - 1, slot, GFP_KERNEL);
590 	filemap_invalidate_unlock(inode->i_mapping);
591 
592 	/*
593 	 * Drop the reference to the file, even on success.  The file pins KVM,
594 	 * not the other way 'round.  Active bindings are invalidated if the
595 	 * file is closed before memslots are destroyed.
596 	 */
597 	r = 0;
598 err:
599 	fput(file);
600 	return r;
601 }
602 
kvm_gmem_unbind(struct kvm_memory_slot * slot)603 void kvm_gmem_unbind(struct kvm_memory_slot *slot)
604 {
605 	unsigned long start = slot->gmem.pgoff;
606 	unsigned long end = start + slot->npages;
607 	struct kvm_gmem *gmem;
608 	struct file *file;
609 
610 	/*
611 	 * Nothing to do if the underlying file was already closed (or is being
612 	 * closed right now), kvm_gmem_release() invalidates all bindings.
613 	 */
614 	file = kvm_gmem_get_file(slot);
615 	if (!file)
616 		return;
617 
618 	gmem = file->private_data;
619 
620 	filemap_invalidate_lock(file->f_mapping);
621 	xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL);
622 
623 	/*
624 	 * synchronize_srcu(&kvm->srcu) ensured that kvm_gmem_get_pfn()
625 	 * cannot see this memslot.
626 	 */
627 	WRITE_ONCE(slot->gmem.file, NULL);
628 	filemap_invalidate_unlock(file->f_mapping);
629 
630 	fput(file);
631 }
632 
633 /* Returns a locked folio on success.  */
__kvm_gmem_get_pfn(struct file * file,struct kvm_memory_slot * slot,pgoff_t index,kvm_pfn_t * pfn,bool * is_prepared,int * max_order)634 static struct folio *__kvm_gmem_get_pfn(struct file *file,
635 					struct kvm_memory_slot *slot,
636 					pgoff_t index, kvm_pfn_t *pfn,
637 					bool *is_prepared, int *max_order)
638 {
639 	struct file *gmem_file = READ_ONCE(slot->gmem.file);
640 	struct kvm_gmem *gmem = file->private_data;
641 	struct folio *folio;
642 
643 	if (file != gmem_file) {
644 		WARN_ON_ONCE(gmem_file);
645 		return ERR_PTR(-EFAULT);
646 	}
647 
648 	gmem = file->private_data;
649 	if (xa_load(&gmem->bindings, index) != slot) {
650 		WARN_ON_ONCE(xa_load(&gmem->bindings, index));
651 		return ERR_PTR(-EIO);
652 	}
653 
654 	folio = kvm_gmem_get_folio(file_inode(file), index);
655 	if (IS_ERR(folio))
656 		return folio;
657 
658 	if (folio_test_hwpoison(folio)) {
659 		folio_unlock(folio);
660 		folio_put(folio);
661 		return ERR_PTR(-EHWPOISON);
662 	}
663 
664 	*pfn = folio_file_pfn(folio, index);
665 	if (max_order)
666 		*max_order = 0;
667 
668 	*is_prepared = folio_test_uptodate(folio);
669 	return folio;
670 }
671 
kvm_gmem_get_pfn(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn,kvm_pfn_t * pfn,struct page ** page,int * max_order)672 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
673 		     gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
674 		     int *max_order)
675 {
676 	pgoff_t index = kvm_gmem_get_index(slot, gfn);
677 	struct file *file = kvm_gmem_get_file(slot);
678 	struct folio *folio;
679 	bool is_prepared = false;
680 	int r = 0;
681 
682 	if (!file)
683 		return -EFAULT;
684 
685 	folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &is_prepared, max_order);
686 	if (IS_ERR(folio)) {
687 		r = PTR_ERR(folio);
688 		goto out;
689 	}
690 
691 	if (!is_prepared)
692 		r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
693 
694 	folio_unlock(folio);
695 
696 	if (!r)
697 		*page = folio_file_page(folio, index);
698 	else
699 		folio_put(folio);
700 
701 out:
702 	fput(file);
703 	return r;
704 }
705 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_get_pfn);
706 
707 #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE
kvm_gmem_populate(struct kvm * kvm,gfn_t start_gfn,void __user * src,long npages,kvm_gmem_populate_cb post_populate,void * opaque)708 long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
709 		       kvm_gmem_populate_cb post_populate, void *opaque)
710 {
711 	struct file *file;
712 	struct kvm_memory_slot *slot;
713 	void __user *p;
714 
715 	int ret = 0, max_order;
716 	long i;
717 
718 	lockdep_assert_held(&kvm->slots_lock);
719 
720 	if (WARN_ON_ONCE(npages <= 0))
721 		return -EINVAL;
722 
723 	slot = gfn_to_memslot(kvm, start_gfn);
724 	if (!kvm_slot_has_gmem(slot))
725 		return -EINVAL;
726 
727 	file = kvm_gmem_get_file(slot);
728 	if (!file)
729 		return -EFAULT;
730 
731 	filemap_invalidate_lock(file->f_mapping);
732 
733 	npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
734 	for (i = 0; i < npages; i += (1 << max_order)) {
735 		struct folio *folio;
736 		gfn_t gfn = start_gfn + i;
737 		pgoff_t index = kvm_gmem_get_index(slot, gfn);
738 		bool is_prepared = false;
739 		kvm_pfn_t pfn;
740 
741 		if (signal_pending(current)) {
742 			ret = -EINTR;
743 			break;
744 		}
745 
746 		folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, &max_order);
747 		if (IS_ERR(folio)) {
748 			ret = PTR_ERR(folio);
749 			break;
750 		}
751 
752 		if (is_prepared) {
753 			folio_unlock(folio);
754 			folio_put(folio);
755 			ret = -EEXIST;
756 			break;
757 		}
758 
759 		folio_unlock(folio);
760 		WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) ||
761 			(npages - i) < (1 << max_order));
762 
763 		ret = -EINVAL;
764 		while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
765 							KVM_MEMORY_ATTRIBUTE_PRIVATE,
766 							KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
767 			if (!max_order)
768 				goto put_folio_and_exit;
769 			max_order--;
770 		}
771 
772 		p = src ? src + i * PAGE_SIZE : NULL;
773 		ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
774 		if (!ret)
775 			kvm_gmem_mark_prepared(folio);
776 
777 put_folio_and_exit:
778 		folio_put(folio);
779 		if (ret)
780 			break;
781 	}
782 
783 	filemap_invalidate_unlock(file->f_mapping);
784 
785 	fput(file);
786 	return ret && !i ? ret : i;
787 }
788 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_populate);
789 #endif
790