xref: /linux/drivers/iommu/iommufd/pages.c (revision ad9a728a3388dc5f66eab6b7135e0154249e9403)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
3  *
4  * The iopt_pages is the center of the storage and motion of PFNs. Each
5  * iopt_pages represents a logical linear array of full PFNs. The array is 0
6  * based and has npages in it. Accessors use 'index' to refer to the entry in
7  * this logical array, regardless of its storage location.
8  *
9  * PFNs are stored in a tiered scheme:
10  *  1) iopt_pages::pinned_pfns xarray
11  *  2) An iommu_domain
12  *  3) The origin of the PFNs, i.e. the userspace pointer
13  *
14  * PFN have to be copied between all combinations of tiers, depending on the
15  * configuration.
16  *
17  * When a PFN is taken out of the userspace pointer it is pinned exactly once.
18  * The storage locations of the PFN's index are tracked in the two interval
19  * trees. If no interval includes the index then it is not pinned.
20  *
21  * If access_itree includes the PFN's index then an in-kernel access has
22  * requested the page. The PFN is stored in the xarray so other requestors can
23  * continue to find it.
24  *
25  * If the domains_itree includes the PFN's index then an iommu_domain is storing
26  * the PFN and it can be read back using iommu_iova_to_phys(). To avoid
27  * duplicating storage the xarray is not used if only iommu_domains are using
28  * the PFN's index.
29  *
30  * As a general principle this is designed so that destroy never fails. This
31  * means removing an iommu_domain or releasing a in-kernel access will not fail
32  * due to insufficient memory. In practice this means some cases have to hold
33  * PFNs in the xarray even though they are also being stored in an iommu_domain.
34  *
35  * While the iopt_pages can use an iommu_domain as storage, it does not have an
36  * IOVA itself. Instead the iopt_area represents a range of IOVA and uses the
37  * iopt_pages as the PFN provider. Multiple iopt_areas can share the iopt_pages
38  * and reference their own slice of the PFN array, with sub page granularity.
39  *
40  * In this file the term 'last' indicates an inclusive and closed interval, eg
41  * [0,0] refers to a single PFN. 'end' means an open range, eg [0,0) refers to
42  * no PFNs.
43  *
44  * Be cautious of overflow. An IOVA can go all the way up to U64_MAX, so
45  * last_iova + 1 can overflow. An iopt_pages index will always be much less than
46  * ULONG_MAX so last_index + 1 cannot overflow.
47  */
48 #include <linux/dma-buf.h>
49 #include <linux/dma-resv.h>
50 #include <linux/file.h>
51 #include <linux/highmem.h>
52 #include <linux/iommu.h>
53 #include <linux/iommufd.h>
54 #include <linux/kthread.h>
55 #include <linux/overflow.h>
56 #include <linux/slab.h>
57 #include <linux/sched/mm.h>
58 #include <linux/vfio_pci_core.h>
59 
60 #include "double_span.h"
61 #include "io_pagetable.h"
62 
63 #ifndef CONFIG_IOMMUFD_TEST
64 #define TEMP_MEMORY_LIMIT 65536
65 #else
66 #define TEMP_MEMORY_LIMIT iommufd_test_memory_limit
67 #endif
68 #define BATCH_BACKUP_SIZE 32
69 
70 /*
71  * More memory makes pin_user_pages() and the batching more efficient, but as
72  * this is only a performance optimization don't try too hard to get it. A 64k
73  * allocation can hold about 26M of 4k pages and 13G of 2M pages in an
74  * pfn_batch. Various destroy paths cannot fail and provide a small amount of
75  * stack memory as a backup contingency. If backup_len is given this cannot
76  * fail.
77  */
temp_kmalloc(size_t * size,void * backup,size_t backup_len)78 static void *temp_kmalloc(size_t *size, void *backup, size_t backup_len)
79 {
80 	void *res;
81 
82 	if (WARN_ON(*size == 0))
83 		return NULL;
84 
85 	if (*size < backup_len)
86 		return backup;
87 
88 	if (!backup && iommufd_should_fail())
89 		return NULL;
90 
91 	*size = min_t(size_t, *size, TEMP_MEMORY_LIMIT);
92 	res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
93 	if (res)
94 		return res;
95 	*size = PAGE_SIZE;
96 	if (backup_len) {
97 		res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
98 		if (res)
99 			return res;
100 		*size = backup_len;
101 		return backup;
102 	}
103 	return kmalloc(*size, GFP_KERNEL);
104 }
105 
interval_tree_double_span_iter_update(struct interval_tree_double_span_iter * iter)106 void interval_tree_double_span_iter_update(
107 	struct interval_tree_double_span_iter *iter)
108 {
109 	unsigned long last_hole = ULONG_MAX;
110 	unsigned int i;
111 
112 	for (i = 0; i != ARRAY_SIZE(iter->spans); i++) {
113 		if (interval_tree_span_iter_done(&iter->spans[i])) {
114 			iter->is_used = -1;
115 			return;
116 		}
117 
118 		if (iter->spans[i].is_hole) {
119 			last_hole = min(last_hole, iter->spans[i].last_hole);
120 			continue;
121 		}
122 
123 		iter->is_used = i + 1;
124 		iter->start_used = iter->spans[i].start_used;
125 		iter->last_used = min(iter->spans[i].last_used, last_hole);
126 		return;
127 	}
128 
129 	iter->is_used = 0;
130 	iter->start_hole = iter->spans[0].start_hole;
131 	iter->last_hole =
132 		min(iter->spans[0].last_hole, iter->spans[1].last_hole);
133 }
134 
interval_tree_double_span_iter_first(struct interval_tree_double_span_iter * iter,struct rb_root_cached * itree1,struct rb_root_cached * itree2,unsigned long first_index,unsigned long last_index)135 void interval_tree_double_span_iter_first(
136 	struct interval_tree_double_span_iter *iter,
137 	struct rb_root_cached *itree1, struct rb_root_cached *itree2,
138 	unsigned long first_index, unsigned long last_index)
139 {
140 	unsigned int i;
141 
142 	iter->itrees[0] = itree1;
143 	iter->itrees[1] = itree2;
144 	for (i = 0; i != ARRAY_SIZE(iter->spans); i++)
145 		interval_tree_span_iter_first(&iter->spans[i], iter->itrees[i],
146 					      first_index, last_index);
147 	interval_tree_double_span_iter_update(iter);
148 }
149 
interval_tree_double_span_iter_next(struct interval_tree_double_span_iter * iter)150 void interval_tree_double_span_iter_next(
151 	struct interval_tree_double_span_iter *iter)
152 {
153 	unsigned int i;
154 
155 	if (iter->is_used == -1 ||
156 	    iter->last_hole == iter->spans[0].last_index) {
157 		iter->is_used = -1;
158 		return;
159 	}
160 
161 	for (i = 0; i != ARRAY_SIZE(iter->spans); i++)
162 		interval_tree_span_iter_advance(
163 			&iter->spans[i], iter->itrees[i], iter->last_hole + 1);
164 	interval_tree_double_span_iter_update(iter);
165 }
166 
iopt_pages_add_npinned(struct iopt_pages * pages,size_t npages)167 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages)
168 {
169 	int rc;
170 
171 	rc = check_add_overflow(pages->npinned, npages, &pages->npinned);
172 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
173 		WARN_ON(rc || pages->npinned > pages->npages);
174 }
175 
iopt_pages_sub_npinned(struct iopt_pages * pages,size_t npages)176 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages)
177 {
178 	int rc;
179 
180 	rc = check_sub_overflow(pages->npinned, npages, &pages->npinned);
181 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
182 		WARN_ON(rc || pages->npinned > pages->npages);
183 }
184 
iopt_pages_err_unpin(struct iopt_pages * pages,unsigned long start_index,unsigned long last_index,struct page ** page_list)185 static void iopt_pages_err_unpin(struct iopt_pages *pages,
186 				 unsigned long start_index,
187 				 unsigned long last_index,
188 				 struct page **page_list)
189 {
190 	unsigned long npages = last_index - start_index + 1;
191 
192 	unpin_user_pages(page_list, npages);
193 	iopt_pages_sub_npinned(pages, npages);
194 }
195 
196 /*
197  * index is the number of PAGE_SIZE units from the start of the area's
198  * iopt_pages. If the iova is sub page-size then the area has an iova that
199  * covers a portion of the first and last pages in the range.
200  */
iopt_area_index_to_iova(struct iopt_area * area,unsigned long index)201 static unsigned long iopt_area_index_to_iova(struct iopt_area *area,
202 					     unsigned long index)
203 {
204 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
205 		WARN_ON(index < iopt_area_index(area) ||
206 			index > iopt_area_last_index(area));
207 	index -= iopt_area_index(area);
208 	if (index == 0)
209 		return iopt_area_iova(area);
210 	return iopt_area_iova(area) - area->page_offset + index * PAGE_SIZE;
211 }
212 
iopt_area_index_to_iova_last(struct iopt_area * area,unsigned long index)213 static unsigned long iopt_area_index_to_iova_last(struct iopt_area *area,
214 						  unsigned long index)
215 {
216 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
217 		WARN_ON(index < iopt_area_index(area) ||
218 			index > iopt_area_last_index(area));
219 	if (index == iopt_area_last_index(area))
220 		return iopt_area_last_iova(area);
221 	return iopt_area_iova(area) - area->page_offset +
222 	       (index - iopt_area_index(area) + 1) * PAGE_SIZE - 1;
223 }
224 
iommu_unmap_nofail(struct iommu_domain * domain,unsigned long iova,size_t size)225 static void iommu_unmap_nofail(struct iommu_domain *domain, unsigned long iova,
226 			       size_t size)
227 {
228 	size_t ret;
229 
230 	ret = iommu_unmap(domain, iova, size);
231 	/*
232 	 * It is a logic error in this code or a driver bug if the IOMMU unmaps
233 	 * something other than exactly as requested. This implies that the
234 	 * iommu driver may not fail unmap for reasons beyond bad agruments.
235 	 * Particularly, the iommu driver may not do a memory allocation on the
236 	 * unmap path.
237 	 */
238 	WARN_ON(ret != size);
239 }
240 
iopt_area_unmap_domain_range(struct iopt_area * area,struct iommu_domain * domain,unsigned long start_index,unsigned long last_index)241 static void iopt_area_unmap_domain_range(struct iopt_area *area,
242 					 struct iommu_domain *domain,
243 					 unsigned long start_index,
244 					 unsigned long last_index)
245 {
246 	unsigned long start_iova = iopt_area_index_to_iova(area, start_index);
247 
248 	iommu_unmap_nofail(domain, start_iova,
249 			   iopt_area_index_to_iova_last(area, last_index) -
250 				   start_iova + 1);
251 }
252 
iopt_pages_find_domain_area(struct iopt_pages * pages,unsigned long index)253 static struct iopt_area *iopt_pages_find_domain_area(struct iopt_pages *pages,
254 						     unsigned long index)
255 {
256 	struct interval_tree_node *node;
257 
258 	node = interval_tree_iter_first(&pages->domains_itree, index, index);
259 	if (!node)
260 		return NULL;
261 	return container_of(node, struct iopt_area, pages_node);
262 }
263 
264 enum batch_kind {
265 	BATCH_CPU_MEMORY = 0,
266 	BATCH_MMIO,
267 };
268 
269 /*
270  * A simple datastructure to hold a vector of PFNs, optimized for contiguous
271  * PFNs. This is used as a temporary holding memory for shuttling pfns from one
272  * place to another. Generally everything is made more efficient if operations
273  * work on the largest possible grouping of pfns. eg fewer lock/unlock cycles,
274  * better cache locality, etc
275  */
276 struct pfn_batch {
277 	unsigned long *pfns;
278 	u32 *npfns;
279 	unsigned int array_size;
280 	unsigned int end;
281 	unsigned int total_pfns;
282 	enum batch_kind kind;
283 };
284 enum { MAX_NPFNS = type_max(typeof(((struct pfn_batch *)0)->npfns[0])) };
285 
batch_clear(struct pfn_batch * batch)286 static void batch_clear(struct pfn_batch *batch)
287 {
288 	batch->total_pfns = 0;
289 	batch->end = 0;
290 	batch->pfns[0] = 0;
291 	batch->npfns[0] = 0;
292 	batch->kind = 0;
293 }
294 
295 /*
296  * Carry means we carry a portion of the final hugepage over to the front of the
297  * batch
298  */
batch_clear_carry(struct pfn_batch * batch,unsigned int keep_pfns)299 static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns)
300 {
301 	if (!keep_pfns)
302 		return batch_clear(batch);
303 
304 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
305 		WARN_ON(!batch->end ||
306 			batch->npfns[batch->end - 1] < keep_pfns);
307 
308 	batch->total_pfns = keep_pfns;
309 	batch->pfns[0] = batch->pfns[batch->end - 1] +
310 			 (batch->npfns[batch->end - 1] - keep_pfns);
311 	batch->npfns[0] = keep_pfns;
312 	batch->end = 1;
313 }
314 
batch_skip_carry(struct pfn_batch * batch,unsigned int skip_pfns)315 static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns)
316 {
317 	if (!batch->total_pfns)
318 		return;
319 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
320 		WARN_ON(batch->total_pfns != batch->npfns[0]);
321 	skip_pfns = min(batch->total_pfns, skip_pfns);
322 	batch->pfns[0] += skip_pfns;
323 	batch->npfns[0] -= skip_pfns;
324 	batch->total_pfns -= skip_pfns;
325 }
326 
__batch_init(struct pfn_batch * batch,size_t max_pages,void * backup,size_t backup_len)327 static int __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup,
328 			size_t backup_len)
329 {
330 	const size_t elmsz = sizeof(*batch->pfns) + sizeof(*batch->npfns);
331 	size_t size = max_pages * elmsz;
332 
333 	batch->pfns = temp_kmalloc(&size, backup, backup_len);
334 	if (!batch->pfns)
335 		return -ENOMEM;
336 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(size < elmsz))
337 		return -EINVAL;
338 	batch->array_size = size / elmsz;
339 	batch->npfns = (u32 *)(batch->pfns + batch->array_size);
340 	batch_clear(batch);
341 	return 0;
342 }
343 
batch_init(struct pfn_batch * batch,size_t max_pages)344 static int batch_init(struct pfn_batch *batch, size_t max_pages)
345 {
346 	return __batch_init(batch, max_pages, NULL, 0);
347 }
348 
batch_init_backup(struct pfn_batch * batch,size_t max_pages,void * backup,size_t backup_len)349 static void batch_init_backup(struct pfn_batch *batch, size_t max_pages,
350 			      void *backup, size_t backup_len)
351 {
352 	__batch_init(batch, max_pages, backup, backup_len);
353 }
354 
batch_destroy(struct pfn_batch * batch,void * backup)355 static void batch_destroy(struct pfn_batch *batch, void *backup)
356 {
357 	if (batch->pfns != backup)
358 		kfree(batch->pfns);
359 }
360 
batch_add_pfn_num(struct pfn_batch * batch,unsigned long pfn,u32 nr,enum batch_kind kind)361 static bool batch_add_pfn_num(struct pfn_batch *batch, unsigned long pfn,
362 			      u32 nr, enum batch_kind kind)
363 {
364 	unsigned int end = batch->end;
365 
366 	if (batch->kind != kind) {
367 		/* One kind per batch */
368 		if (batch->end != 0)
369 			return false;
370 		batch->kind = kind;
371 	}
372 
373 	if (end && pfn == batch->pfns[end - 1] + batch->npfns[end - 1] &&
374 	    nr <= MAX_NPFNS - batch->npfns[end - 1]) {
375 		batch->npfns[end - 1] += nr;
376 	} else if (end < batch->array_size) {
377 		batch->pfns[end] = pfn;
378 		batch->npfns[end] = nr;
379 		batch->end++;
380 	} else {
381 		return false;
382 	}
383 
384 	batch->total_pfns += nr;
385 	return true;
386 }
387 
batch_remove_pfn_num(struct pfn_batch * batch,unsigned long nr)388 static void batch_remove_pfn_num(struct pfn_batch *batch, unsigned long nr)
389 {
390 	batch->npfns[batch->end - 1] -= nr;
391 	if (batch->npfns[batch->end - 1] == 0)
392 		batch->end--;
393 	batch->total_pfns -= nr;
394 }
395 
396 /* true if the pfn was added, false otherwise */
batch_add_pfn(struct pfn_batch * batch,unsigned long pfn)397 static bool batch_add_pfn(struct pfn_batch *batch, unsigned long pfn)
398 {
399 	return batch_add_pfn_num(batch, pfn, 1, BATCH_CPU_MEMORY);
400 }
401 
402 /*
403  * Fill the batch with pfns from the domain. When the batch is full, or it
404  * reaches last_index, the function will return. The caller should use
405  * batch->total_pfns to determine the starting point for the next iteration.
406  */
batch_from_domain(struct pfn_batch * batch,struct iommu_domain * domain,struct iopt_area * area,unsigned long start_index,unsigned long last_index)407 static void batch_from_domain(struct pfn_batch *batch,
408 			      struct iommu_domain *domain,
409 			      struct iopt_area *area, unsigned long start_index,
410 			      unsigned long last_index)
411 {
412 	unsigned int page_offset = 0;
413 	unsigned long iova;
414 	phys_addr_t phys;
415 
416 	iova = iopt_area_index_to_iova(area, start_index);
417 	if (start_index == iopt_area_index(area))
418 		page_offset = area->page_offset;
419 	while (start_index <= last_index) {
420 		/*
421 		 * This is pretty slow, it would be nice to get the page size
422 		 * back from the driver, or have the driver directly fill the
423 		 * batch.
424 		 */
425 		phys = iommu_iova_to_phys(domain, iova) - page_offset;
426 		if (!batch_add_pfn(batch, PHYS_PFN(phys)))
427 			return;
428 		iova += PAGE_SIZE - page_offset;
429 		page_offset = 0;
430 		start_index++;
431 	}
432 }
433 
raw_pages_from_domain(struct iommu_domain * domain,struct iopt_area * area,unsigned long start_index,unsigned long last_index,struct page ** out_pages)434 static struct page **raw_pages_from_domain(struct iommu_domain *domain,
435 					   struct iopt_area *area,
436 					   unsigned long start_index,
437 					   unsigned long last_index,
438 					   struct page **out_pages)
439 {
440 	unsigned int page_offset = 0;
441 	unsigned long iova;
442 	phys_addr_t phys;
443 
444 	iova = iopt_area_index_to_iova(area, start_index);
445 	if (start_index == iopt_area_index(area))
446 		page_offset = area->page_offset;
447 	while (start_index <= last_index) {
448 		phys = iommu_iova_to_phys(domain, iova) - page_offset;
449 		*(out_pages++) = pfn_to_page(PHYS_PFN(phys));
450 		iova += PAGE_SIZE - page_offset;
451 		page_offset = 0;
452 		start_index++;
453 	}
454 	return out_pages;
455 }
456 
457 /* Continues reading a domain until we reach a discontinuity in the pfns. */
batch_from_domain_continue(struct pfn_batch * batch,struct iommu_domain * domain,struct iopt_area * area,unsigned long start_index,unsigned long last_index)458 static void batch_from_domain_continue(struct pfn_batch *batch,
459 				       struct iommu_domain *domain,
460 				       struct iopt_area *area,
461 				       unsigned long start_index,
462 				       unsigned long last_index)
463 {
464 	unsigned int array_size = batch->array_size;
465 
466 	batch->array_size = batch->end;
467 	batch_from_domain(batch, domain, area, start_index, last_index);
468 	batch->array_size = array_size;
469 }
470 
471 /*
472  * This is part of the VFIO compatibility support for VFIO_TYPE1_IOMMU. That
473  * mode permits splitting a mapped area up, and then one of the splits is
474  * unmapped. Doing this normally would cause us to violate our invariant of
475  * pairing map/unmap. Thus, to support old VFIO compatibility disable support
476  * for batching consecutive PFNs. All PFNs mapped into the iommu are done in
477  * PAGE_SIZE units, not larger or smaller.
478  */
batch_iommu_map_small(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)479 static int batch_iommu_map_small(struct iommu_domain *domain,
480 				 unsigned long iova, phys_addr_t paddr,
481 				 size_t size, int prot)
482 {
483 	unsigned long start_iova = iova;
484 	int rc;
485 
486 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
487 		WARN_ON(paddr % PAGE_SIZE || iova % PAGE_SIZE ||
488 			size % PAGE_SIZE);
489 
490 	while (size) {
491 		rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot,
492 			       GFP_KERNEL_ACCOUNT);
493 		if (rc)
494 			goto err_unmap;
495 		iova += PAGE_SIZE;
496 		paddr += PAGE_SIZE;
497 		size -= PAGE_SIZE;
498 	}
499 	return 0;
500 
501 err_unmap:
502 	if (start_iova != iova)
503 		iommu_unmap_nofail(domain, start_iova, iova - start_iova);
504 	return rc;
505 }
506 
batch_to_domain(struct pfn_batch * batch,struct iommu_domain * domain,struct iopt_area * area,unsigned long start_index)507 static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain,
508 			   struct iopt_area *area, unsigned long start_index)
509 {
510 	bool disable_large_pages = area->iopt->disable_large_pages;
511 	unsigned long last_iova = iopt_area_last_iova(area);
512 	int iommu_prot = area->iommu_prot;
513 	unsigned int page_offset = 0;
514 	unsigned long start_iova;
515 	unsigned long next_iova;
516 	unsigned int cur = 0;
517 	unsigned long iova;
518 	int rc;
519 
520 	if (batch->kind == BATCH_MMIO) {
521 		iommu_prot &= ~IOMMU_CACHE;
522 		iommu_prot |= IOMMU_MMIO;
523 	}
524 
525 	/* The first index might be a partial page */
526 	if (start_index == iopt_area_index(area))
527 		page_offset = area->page_offset;
528 	next_iova = iova = start_iova =
529 		iopt_area_index_to_iova(area, start_index);
530 	while (cur < batch->end) {
531 		next_iova = min(last_iova + 1,
532 				next_iova + batch->npfns[cur] * PAGE_SIZE -
533 					page_offset);
534 		if (disable_large_pages)
535 			rc = batch_iommu_map_small(
536 				domain, iova,
537 				PFN_PHYS(batch->pfns[cur]) + page_offset,
538 				next_iova - iova, iommu_prot);
539 		else
540 			rc = iommu_map(domain, iova,
541 				       PFN_PHYS(batch->pfns[cur]) + page_offset,
542 				       next_iova - iova, iommu_prot,
543 				       GFP_KERNEL_ACCOUNT);
544 		if (rc)
545 			goto err_unmap;
546 		iova = next_iova;
547 		page_offset = 0;
548 		cur++;
549 	}
550 	return 0;
551 err_unmap:
552 	if (start_iova != iova)
553 		iommu_unmap_nofail(domain, start_iova, iova - start_iova);
554 	return rc;
555 }
556 
batch_from_xarray(struct pfn_batch * batch,struct xarray * xa,unsigned long start_index,unsigned long last_index)557 static void batch_from_xarray(struct pfn_batch *batch, struct xarray *xa,
558 			      unsigned long start_index,
559 			      unsigned long last_index)
560 {
561 	XA_STATE(xas, xa, start_index);
562 	void *entry;
563 
564 	rcu_read_lock();
565 	while (true) {
566 		entry = xas_next(&xas);
567 		if (xas_retry(&xas, entry))
568 			continue;
569 		WARN_ON(!xa_is_value(entry));
570 		if (!batch_add_pfn(batch, xa_to_value(entry)) ||
571 		    start_index == last_index)
572 			break;
573 		start_index++;
574 	}
575 	rcu_read_unlock();
576 }
577 
batch_from_xarray_clear(struct pfn_batch * batch,struct xarray * xa,unsigned long start_index,unsigned long last_index)578 static void batch_from_xarray_clear(struct pfn_batch *batch, struct xarray *xa,
579 				    unsigned long start_index,
580 				    unsigned long last_index)
581 {
582 	XA_STATE(xas, xa, start_index);
583 	void *entry;
584 
585 	xas_lock(&xas);
586 	while (true) {
587 		entry = xas_next(&xas);
588 		if (xas_retry(&xas, entry))
589 			continue;
590 		WARN_ON(!xa_is_value(entry));
591 		if (!batch_add_pfn(batch, xa_to_value(entry)))
592 			break;
593 		xas_store(&xas, NULL);
594 		if (start_index == last_index)
595 			break;
596 		start_index++;
597 	}
598 	xas_unlock(&xas);
599 }
600 
clear_xarray(struct xarray * xa,unsigned long start_index,unsigned long last_index)601 static void clear_xarray(struct xarray *xa, unsigned long start_index,
602 			 unsigned long last_index)
603 {
604 	XA_STATE(xas, xa, start_index);
605 	void *entry;
606 
607 	xas_lock(&xas);
608 	xas_for_each(&xas, entry, last_index)
609 		xas_store(&xas, NULL);
610 	xas_unlock(&xas);
611 }
612 
pages_to_xarray(struct xarray * xa,unsigned long start_index,unsigned long last_index,struct page ** pages)613 static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
614 			   unsigned long last_index, struct page **pages)
615 {
616 	struct page **end_pages = pages + (last_index - start_index) + 1;
617 	struct page **half_pages = pages + (end_pages - pages) / 2;
618 	XA_STATE(xas, xa, start_index);
619 
620 	do {
621 		void *old;
622 
623 		xas_lock(&xas);
624 		while (pages != end_pages) {
625 			/* xarray does not participate in fault injection */
626 			if (pages == half_pages && iommufd_should_fail()) {
627 				xas_set_err(&xas, -EINVAL);
628 				xas_unlock(&xas);
629 				/* aka xas_destroy() */
630 				xas_nomem(&xas, GFP_KERNEL);
631 				goto err_clear;
632 			}
633 
634 			old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages)));
635 			if (xas_error(&xas))
636 				break;
637 			WARN_ON(old);
638 			pages++;
639 			xas_next(&xas);
640 		}
641 		xas_unlock(&xas);
642 	} while (xas_nomem(&xas, GFP_KERNEL));
643 
644 err_clear:
645 	if (xas_error(&xas)) {
646 		if (xas.xa_index != start_index)
647 			clear_xarray(xa, start_index, xas.xa_index - 1);
648 		return xas_error(&xas);
649 	}
650 	return 0;
651 }
652 
batch_from_pages(struct pfn_batch * batch,struct page ** pages,size_t npages)653 static void batch_from_pages(struct pfn_batch *batch, struct page **pages,
654 			     size_t npages)
655 {
656 	struct page **end = pages + npages;
657 
658 	for (; pages != end; pages++)
659 		if (!batch_add_pfn(batch, page_to_pfn(*pages)))
660 			break;
661 }
662 
batch_from_folios(struct pfn_batch * batch,struct folio *** folios_p,unsigned long * offset_p,unsigned long npages)663 static int batch_from_folios(struct pfn_batch *batch, struct folio ***folios_p,
664 			     unsigned long *offset_p, unsigned long npages)
665 {
666 	int rc = 0;
667 	struct folio **folios = *folios_p;
668 	unsigned long offset = *offset_p;
669 
670 	while (npages) {
671 		struct folio *folio = *folios;
672 		unsigned long nr = folio_nr_pages(folio) - offset;
673 		unsigned long pfn = page_to_pfn(folio_page(folio, offset));
674 
675 		nr = min(nr, npages);
676 		npages -= nr;
677 
678 		if (!batch_add_pfn_num(batch, pfn, nr, BATCH_CPU_MEMORY))
679 			break;
680 		if (nr > 1) {
681 			rc = folio_add_pins(folio, nr - 1);
682 			if (rc) {
683 				batch_remove_pfn_num(batch, nr);
684 				goto out;
685 			}
686 		}
687 
688 		folios++;
689 		offset = 0;
690 	}
691 
692 out:
693 	*folios_p = folios;
694 	*offset_p = offset;
695 	return rc;
696 }
697 
batch_unpin(struct pfn_batch * batch,struct iopt_pages * pages,unsigned int first_page_off,size_t npages)698 static void batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages,
699 			unsigned int first_page_off, size_t npages)
700 {
701 	unsigned int cur = 0;
702 
703 	while (first_page_off) {
704 		if (batch->npfns[cur] > first_page_off)
705 			break;
706 		first_page_off -= batch->npfns[cur];
707 		cur++;
708 	}
709 
710 	while (npages) {
711 		size_t to_unpin = min_t(size_t, npages,
712 					batch->npfns[cur] - first_page_off);
713 
714 		unpin_user_page_range_dirty_lock(
715 			pfn_to_page(batch->pfns[cur] + first_page_off),
716 			to_unpin, pages->writable);
717 		iopt_pages_sub_npinned(pages, to_unpin);
718 		cur++;
719 		first_page_off = 0;
720 		npages -= to_unpin;
721 	}
722 }
723 
copy_data_page(struct page * page,void * data,unsigned long offset,size_t length,unsigned int flags)724 static void copy_data_page(struct page *page, void *data, unsigned long offset,
725 			   size_t length, unsigned int flags)
726 {
727 	void *mem;
728 
729 	mem = kmap_local_page(page);
730 	if (flags & IOMMUFD_ACCESS_RW_WRITE) {
731 		memcpy(mem + offset, data, length);
732 		set_page_dirty_lock(page);
733 	} else {
734 		memcpy(data, mem + offset, length);
735 	}
736 	kunmap_local(mem);
737 }
738 
batch_rw(struct pfn_batch * batch,void * data,unsigned long offset,unsigned long length,unsigned int flags)739 static unsigned long batch_rw(struct pfn_batch *batch, void *data,
740 			      unsigned long offset, unsigned long length,
741 			      unsigned int flags)
742 {
743 	unsigned long copied = 0;
744 	unsigned int npage = 0;
745 	unsigned int cur = 0;
746 
747 	while (cur < batch->end) {
748 		unsigned long bytes = min(length, PAGE_SIZE - offset);
749 
750 		copy_data_page(pfn_to_page(batch->pfns[cur] + npage), data,
751 			       offset, bytes, flags);
752 		offset = 0;
753 		length -= bytes;
754 		data += bytes;
755 		copied += bytes;
756 		npage++;
757 		if (npage == batch->npfns[cur]) {
758 			npage = 0;
759 			cur++;
760 		}
761 		if (!length)
762 			break;
763 	}
764 	return copied;
765 }
766 
767 /* pfn_reader_user is just the pin_user_pages() path */
768 struct pfn_reader_user {
769 	struct page **upages;
770 	size_t upages_len;
771 	unsigned long upages_start;
772 	unsigned long upages_end;
773 	unsigned int gup_flags;
774 	/*
775 	 * 1 means mmget() and mmap_read_lock(), 0 means only mmget(), -1 is
776 	 * neither
777 	 */
778 	int locked;
779 
780 	/* The following are only valid if file != NULL. */
781 	struct file *file;
782 	struct folio **ufolios;
783 	size_t ufolios_len;
784 	unsigned long ufolios_offset;
785 	struct folio **ufolios_next;
786 };
787 
pfn_reader_user_init(struct pfn_reader_user * user,struct iopt_pages * pages)788 static void pfn_reader_user_init(struct pfn_reader_user *user,
789 				 struct iopt_pages *pages)
790 {
791 	user->upages = NULL;
792 	user->upages_len = 0;
793 	user->upages_start = 0;
794 	user->upages_end = 0;
795 	user->locked = -1;
796 	user->gup_flags = FOLL_LONGTERM;
797 	if (pages->writable)
798 		user->gup_flags |= FOLL_WRITE;
799 
800 	user->file = (pages->type == IOPT_ADDRESS_FILE) ? pages->file : NULL;
801 	user->ufolios = NULL;
802 	user->ufolios_len = 0;
803 	user->ufolios_next = NULL;
804 	user->ufolios_offset = 0;
805 }
806 
pfn_reader_user_destroy(struct pfn_reader_user * user,struct iopt_pages * pages)807 static void pfn_reader_user_destroy(struct pfn_reader_user *user,
808 				    struct iopt_pages *pages)
809 {
810 	if (user->locked != -1) {
811 		if (user->locked)
812 			mmap_read_unlock(pages->source_mm);
813 		if (!user->file && pages->source_mm != current->mm)
814 			mmput(pages->source_mm);
815 		user->locked = -1;
816 	}
817 
818 	kfree(user->upages);
819 	user->upages = NULL;
820 	kfree(user->ufolios);
821 	user->ufolios = NULL;
822 }
823 
pin_memfd_pages(struct pfn_reader_user * user,unsigned long start,unsigned long npages)824 static long pin_memfd_pages(struct pfn_reader_user *user, unsigned long start,
825 			    unsigned long npages)
826 {
827 	unsigned long i;
828 	unsigned long offset;
829 	unsigned long npages_out = 0;
830 	struct page **upages = user->upages;
831 	unsigned long end = start + (npages << PAGE_SHIFT) - 1;
832 	long nfolios = user->ufolios_len / sizeof(*user->ufolios);
833 
834 	/*
835 	 * todo: memfd_pin_folios should return the last pinned offset so
836 	 * we can compute npages pinned, and avoid looping over folios here
837 	 * if upages == NULL.
838 	 */
839 	nfolios = memfd_pin_folios(user->file, start, end, user->ufolios,
840 				   nfolios, &offset);
841 	if (nfolios <= 0)
842 		return nfolios;
843 
844 	offset >>= PAGE_SHIFT;
845 	user->ufolios_next = user->ufolios;
846 	user->ufolios_offset = offset;
847 
848 	for (i = 0; i < nfolios; i++) {
849 		struct folio *folio = user->ufolios[i];
850 		unsigned long nr = folio_nr_pages(folio);
851 		unsigned long npin = min(nr - offset, npages);
852 
853 		npages -= npin;
854 		npages_out += npin;
855 
856 		if (upages) {
857 			if (npin == 1) {
858 				*upages++ = folio_page(folio, offset);
859 			} else {
860 				int rc = folio_add_pins(folio, npin - 1);
861 
862 				if (rc)
863 					return rc;
864 
865 				while (npin--)
866 					*upages++ = folio_page(folio, offset++);
867 			}
868 		}
869 
870 		offset = 0;
871 	}
872 
873 	return npages_out;
874 }
875 
pfn_reader_user_pin(struct pfn_reader_user * user,struct iopt_pages * pages,unsigned long start_index,unsigned long last_index)876 static int pfn_reader_user_pin(struct pfn_reader_user *user,
877 			       struct iopt_pages *pages,
878 			       unsigned long start_index,
879 			       unsigned long last_index)
880 {
881 	bool remote_mm = pages->source_mm != current->mm;
882 	unsigned long npages = last_index - start_index + 1;
883 	unsigned long start;
884 	unsigned long unum;
885 	uintptr_t uptr;
886 	long rc;
887 
888 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
889 	    WARN_ON(last_index < start_index))
890 		return -EINVAL;
891 
892 	if (!user->file && !user->upages) {
893 		/* All undone in pfn_reader_destroy() */
894 		user->upages_len = npages * sizeof(*user->upages);
895 		user->upages = temp_kmalloc(&user->upages_len, NULL, 0);
896 		if (!user->upages)
897 			return -ENOMEM;
898 	}
899 
900 	if (user->file && !user->ufolios) {
901 		user->ufolios_len = npages * sizeof(*user->ufolios);
902 		user->ufolios = temp_kmalloc(&user->ufolios_len, NULL, 0);
903 		if (!user->ufolios)
904 			return -ENOMEM;
905 	}
906 
907 	if (user->locked == -1) {
908 		/*
909 		 * The majority of usages will run the map task within the mm
910 		 * providing the pages, so we can optimize into
911 		 * get_user_pages_fast()
912 		 */
913 		if (!user->file && remote_mm) {
914 			if (!mmget_not_zero(pages->source_mm))
915 				return -EFAULT;
916 		}
917 		user->locked = 0;
918 	}
919 
920 	unum = user->file ? user->ufolios_len / sizeof(*user->ufolios) :
921 			    user->upages_len / sizeof(*user->upages);
922 	npages = min_t(unsigned long, npages, unum);
923 
924 	if (iommufd_should_fail())
925 		return -EFAULT;
926 
927 	if (user->file) {
928 		start = pages->start + (start_index * PAGE_SIZE);
929 		rc = pin_memfd_pages(user, start, npages);
930 	} else if (!remote_mm) {
931 		uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE);
932 		rc = pin_user_pages_fast(uptr, npages, user->gup_flags,
933 					 user->upages);
934 	} else {
935 		uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE);
936 		if (!user->locked) {
937 			mmap_read_lock(pages->source_mm);
938 			user->locked = 1;
939 		}
940 		rc = pin_user_pages_remote(pages->source_mm, uptr, npages,
941 					   user->gup_flags, user->upages,
942 					   &user->locked);
943 	}
944 	if (rc <= 0) {
945 		if (WARN_ON(!rc))
946 			return -EFAULT;
947 		return rc;
948 	}
949 	iopt_pages_add_npinned(pages, rc);
950 	user->upages_start = start_index;
951 	user->upages_end = start_index + rc;
952 	return 0;
953 }
954 
955 /* This is the "modern" and faster accounting method used by io_uring */
incr_user_locked_vm(struct iopt_pages * pages,unsigned long npages)956 static int incr_user_locked_vm(struct iopt_pages *pages, unsigned long npages)
957 {
958 	unsigned long lock_limit;
959 	unsigned long cur_pages;
960 	unsigned long new_pages;
961 
962 	lock_limit = task_rlimit(pages->source_task, RLIMIT_MEMLOCK) >>
963 		     PAGE_SHIFT;
964 
965 	cur_pages = atomic_long_read(&pages->source_user->locked_vm);
966 	do {
967 		new_pages = cur_pages + npages;
968 		if (new_pages > lock_limit)
969 			return -ENOMEM;
970 	} while (!atomic_long_try_cmpxchg(&pages->source_user->locked_vm,
971 					  &cur_pages, new_pages));
972 	return 0;
973 }
974 
decr_user_locked_vm(struct iopt_pages * pages,unsigned long npages)975 static void decr_user_locked_vm(struct iopt_pages *pages, unsigned long npages)
976 {
977 	if (WARN_ON(atomic_long_read(&pages->source_user->locked_vm) < npages))
978 		return;
979 	atomic_long_sub(npages, &pages->source_user->locked_vm);
980 }
981 
982 /* This is the accounting method used for compatibility with VFIO */
update_mm_locked_vm(struct iopt_pages * pages,unsigned long npages,bool inc,struct pfn_reader_user * user)983 static int update_mm_locked_vm(struct iopt_pages *pages, unsigned long npages,
984 			       bool inc, struct pfn_reader_user *user)
985 {
986 	bool do_put = false;
987 	int rc;
988 
989 	if (user && user->locked) {
990 		mmap_read_unlock(pages->source_mm);
991 		user->locked = 0;
992 		/* If we had the lock then we also have a get */
993 
994 	} else if ((!user || (!user->upages && !user->ufolios)) &&
995 		   pages->source_mm != current->mm) {
996 		if (!mmget_not_zero(pages->source_mm))
997 			return -EINVAL;
998 		do_put = true;
999 	}
1000 
1001 	mmap_write_lock(pages->source_mm);
1002 	rc = __account_locked_vm(pages->source_mm, npages, inc,
1003 				 pages->source_task, false);
1004 	mmap_write_unlock(pages->source_mm);
1005 
1006 	if (do_put)
1007 		mmput(pages->source_mm);
1008 	return rc;
1009 }
1010 
iopt_pages_update_pinned(struct iopt_pages * pages,unsigned long npages,bool inc,struct pfn_reader_user * user)1011 int iopt_pages_update_pinned(struct iopt_pages *pages, unsigned long npages,
1012 			     bool inc, struct pfn_reader_user *user)
1013 {
1014 	int rc = 0;
1015 
1016 	switch (pages->account_mode) {
1017 	case IOPT_PAGES_ACCOUNT_NONE:
1018 		break;
1019 	case IOPT_PAGES_ACCOUNT_USER:
1020 		if (inc)
1021 			rc = incr_user_locked_vm(pages, npages);
1022 		else
1023 			decr_user_locked_vm(pages, npages);
1024 		break;
1025 	case IOPT_PAGES_ACCOUNT_MM:
1026 		rc = update_mm_locked_vm(pages, npages, inc, user);
1027 		break;
1028 	}
1029 	if (rc)
1030 		return rc;
1031 
1032 	pages->last_npinned = pages->npinned;
1033 	if (inc)
1034 		atomic64_add(npages, &pages->source_mm->pinned_vm);
1035 	else
1036 		atomic64_sub(npages, &pages->source_mm->pinned_vm);
1037 	return 0;
1038 }
1039 
update_unpinned(struct iopt_pages * pages)1040 static void update_unpinned(struct iopt_pages *pages)
1041 {
1042 	if (WARN_ON(pages->npinned > pages->last_npinned))
1043 		return;
1044 	if (pages->npinned == pages->last_npinned)
1045 		return;
1046 	iopt_pages_update_pinned(pages, pages->last_npinned - pages->npinned,
1047 				 false, NULL);
1048 }
1049 
1050 /*
1051  * Changes in the number of pages pinned is done after the pages have been read
1052  * and processed. If the user lacked the limit then the error unwind will unpin
1053  * everything that was just pinned. This is because it is expensive to calculate
1054  * how many pages we have already pinned within a range to generate an accurate
1055  * prediction in advance of doing the work to actually pin them.
1056  */
pfn_reader_user_update_pinned(struct pfn_reader_user * user,struct iopt_pages * pages)1057 static int pfn_reader_user_update_pinned(struct pfn_reader_user *user,
1058 					 struct iopt_pages *pages)
1059 {
1060 	unsigned long npages;
1061 	bool inc;
1062 
1063 	lockdep_assert_held(&pages->mutex);
1064 
1065 	if (pages->npinned == pages->last_npinned)
1066 		return 0;
1067 
1068 	if (pages->npinned < pages->last_npinned) {
1069 		npages = pages->last_npinned - pages->npinned;
1070 		inc = false;
1071 	} else {
1072 		if (iommufd_should_fail())
1073 			return -ENOMEM;
1074 		npages = pages->npinned - pages->last_npinned;
1075 		inc = true;
1076 	}
1077 	return iopt_pages_update_pinned(pages, npages, inc, user);
1078 }
1079 
1080 struct pfn_reader_dmabuf {
1081 	struct dma_buf_phys_vec phys;
1082 	unsigned long start_offset;
1083 };
1084 
pfn_reader_dmabuf_init(struct pfn_reader_dmabuf * dmabuf,struct iopt_pages * pages)1085 static int pfn_reader_dmabuf_init(struct pfn_reader_dmabuf *dmabuf,
1086 				  struct iopt_pages *pages)
1087 {
1088 	/* Callers must not get here if the dmabuf was already revoked */
1089 	if (WARN_ON(iopt_dmabuf_revoked(pages)))
1090 		return -EINVAL;
1091 
1092 	dmabuf->phys = pages->dmabuf.phys;
1093 	dmabuf->start_offset = pages->dmabuf.start;
1094 	return 0;
1095 }
1096 
pfn_reader_fill_dmabuf(struct pfn_reader_dmabuf * dmabuf,struct pfn_batch * batch,unsigned long start_index,unsigned long last_index)1097 static int pfn_reader_fill_dmabuf(struct pfn_reader_dmabuf *dmabuf,
1098 				  struct pfn_batch *batch,
1099 				  unsigned long start_index,
1100 				  unsigned long last_index)
1101 {
1102 	unsigned long start = dmabuf->start_offset + start_index * PAGE_SIZE;
1103 
1104 	/*
1105 	 * start/last_index and start are all PAGE_SIZE aligned, the batch is
1106 	 * always filled using page size aligned PFNs just like the other types.
1107 	 * If the dmabuf has been sliced on a sub page offset then the common
1108 	 * batch to domain code will adjust it before mapping to the domain.
1109 	 */
1110 	batch_add_pfn_num(batch, PHYS_PFN(dmabuf->phys.paddr + start),
1111 			  last_index - start_index + 1, BATCH_MMIO);
1112 	return 0;
1113 }
1114 
1115 /*
1116  * PFNs are stored in three places, in order of preference:
1117  * - The iopt_pages xarray. This is only populated if there is a
1118  *   iopt_pages_access
1119  * - The iommu_domain under an area
1120  * - The original PFN source, ie pages->source_mm
1121  *
1122  * This iterator reads the pfns optimizing to load according to the
1123  * above order.
1124  */
1125 struct pfn_reader {
1126 	struct iopt_pages *pages;
1127 	struct interval_tree_double_span_iter span;
1128 	struct pfn_batch batch;
1129 	unsigned long batch_start_index;
1130 	unsigned long batch_end_index;
1131 	unsigned long last_index;
1132 
1133 	union {
1134 		struct pfn_reader_user user;
1135 		struct pfn_reader_dmabuf dmabuf;
1136 	};
1137 };
1138 
pfn_reader_update_pinned(struct pfn_reader * pfns)1139 static int pfn_reader_update_pinned(struct pfn_reader *pfns)
1140 {
1141 	return pfn_reader_user_update_pinned(&pfns->user, pfns->pages);
1142 }
1143 
1144 /*
1145  * The batch can contain a mixture of pages that are still in use and pages that
1146  * need to be unpinned. Unpin only pages that are not held anywhere else.
1147  */
pfn_reader_unpin(struct pfn_reader * pfns)1148 static void pfn_reader_unpin(struct pfn_reader *pfns)
1149 {
1150 	unsigned long last = pfns->batch_end_index - 1;
1151 	unsigned long start = pfns->batch_start_index;
1152 	struct interval_tree_double_span_iter span;
1153 	struct iopt_pages *pages = pfns->pages;
1154 
1155 	lockdep_assert_held(&pages->mutex);
1156 
1157 	interval_tree_for_each_double_span(&span, &pages->access_itree,
1158 					   &pages->domains_itree, start, last) {
1159 		if (span.is_used)
1160 			continue;
1161 
1162 		batch_unpin(&pfns->batch, pages, span.start_hole - start,
1163 			    span.last_hole - span.start_hole + 1);
1164 	}
1165 }
1166 
1167 /* Process a single span to load it from the proper storage */
pfn_reader_fill_span(struct pfn_reader * pfns)1168 static int pfn_reader_fill_span(struct pfn_reader *pfns)
1169 {
1170 	struct interval_tree_double_span_iter *span = &pfns->span;
1171 	unsigned long start_index = pfns->batch_end_index;
1172 	struct pfn_reader_user *user;
1173 	unsigned long npages;
1174 	struct iopt_area *area;
1175 	int rc;
1176 
1177 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
1178 	    WARN_ON(span->last_used < start_index))
1179 		return -EINVAL;
1180 
1181 	if (span->is_used == 1) {
1182 		batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns,
1183 				  start_index, span->last_used);
1184 		return 0;
1185 	}
1186 
1187 	if (span->is_used == 2) {
1188 		/*
1189 		 * Pull as many pages from the first domain we find in the
1190 		 * target span. If it is too small then we will be called again
1191 		 * and we'll find another area.
1192 		 */
1193 		area = iopt_pages_find_domain_area(pfns->pages, start_index);
1194 		if (WARN_ON(!area))
1195 			return -EINVAL;
1196 
1197 		/* The storage_domain cannot change without the pages mutex */
1198 		batch_from_domain(
1199 			&pfns->batch, area->storage_domain, area, start_index,
1200 			min(iopt_area_last_index(area), span->last_used));
1201 		return 0;
1202 	}
1203 
1204 	if (iopt_is_dmabuf(pfns->pages))
1205 		return pfn_reader_fill_dmabuf(&pfns->dmabuf, &pfns->batch,
1206 					      start_index, span->last_hole);
1207 
1208 	user = &pfns->user;
1209 	if (start_index >= user->upages_end) {
1210 		rc = pfn_reader_user_pin(user, pfns->pages, start_index,
1211 					 span->last_hole);
1212 		if (rc)
1213 			return rc;
1214 	}
1215 
1216 	npages = user->upages_end - start_index;
1217 	start_index -= user->upages_start;
1218 	rc = 0;
1219 
1220 	if (!user->file)
1221 		batch_from_pages(&pfns->batch, user->upages + start_index,
1222 				 npages);
1223 	else
1224 		rc = batch_from_folios(&pfns->batch, &user->ufolios_next,
1225 				       &user->ufolios_offset, npages);
1226 	return rc;
1227 }
1228 
pfn_reader_done(struct pfn_reader * pfns)1229 static bool pfn_reader_done(struct pfn_reader *pfns)
1230 {
1231 	return pfns->batch_start_index == pfns->last_index + 1;
1232 }
1233 
pfn_reader_next(struct pfn_reader * pfns)1234 static int pfn_reader_next(struct pfn_reader *pfns)
1235 {
1236 	int rc;
1237 
1238 	batch_clear(&pfns->batch);
1239 	pfns->batch_start_index = pfns->batch_end_index;
1240 
1241 	while (pfns->batch_end_index != pfns->last_index + 1) {
1242 		unsigned int npfns = pfns->batch.total_pfns;
1243 
1244 		if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
1245 		    WARN_ON(interval_tree_double_span_iter_done(&pfns->span)))
1246 			return -EINVAL;
1247 
1248 		rc = pfn_reader_fill_span(pfns);
1249 		if (rc)
1250 			return rc;
1251 
1252 		if (WARN_ON(!pfns->batch.total_pfns))
1253 			return -EINVAL;
1254 
1255 		pfns->batch_end_index =
1256 			pfns->batch_start_index + pfns->batch.total_pfns;
1257 		if (pfns->batch_end_index == pfns->span.last_used + 1)
1258 			interval_tree_double_span_iter_next(&pfns->span);
1259 
1260 		/* Batch is full */
1261 		if (npfns == pfns->batch.total_pfns)
1262 			return 0;
1263 	}
1264 	return 0;
1265 }
1266 
pfn_reader_init(struct pfn_reader * pfns,struct iopt_pages * pages,unsigned long start_index,unsigned long last_index)1267 static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages,
1268 			   unsigned long start_index, unsigned long last_index)
1269 {
1270 	int rc;
1271 
1272 	lockdep_assert_held(&pages->mutex);
1273 
1274 	pfns->pages = pages;
1275 	pfns->batch_start_index = start_index;
1276 	pfns->batch_end_index = start_index;
1277 	pfns->last_index = last_index;
1278 	if (iopt_is_dmabuf(pages))
1279 		pfn_reader_dmabuf_init(&pfns->dmabuf, pages);
1280 	else
1281 		pfn_reader_user_init(&pfns->user, pages);
1282 	rc = batch_init(&pfns->batch, last_index - start_index + 1);
1283 	if (rc)
1284 		return rc;
1285 	interval_tree_double_span_iter_first(&pfns->span, &pages->access_itree,
1286 					     &pages->domains_itree, start_index,
1287 					     last_index);
1288 	return 0;
1289 }
1290 
1291 /*
1292  * There are many assertions regarding the state of pages->npinned vs
1293  * pages->last_pinned, for instance something like unmapping a domain must only
1294  * decrement the npinned, and pfn_reader_destroy() must be called only after all
1295  * the pins are updated. This is fine for success flows, but error flows
1296  * sometimes need to release the pins held inside the pfn_reader before going on
1297  * to complete unmapping and releasing pins held in domains.
1298  */
pfn_reader_release_pins(struct pfn_reader * pfns)1299 static void pfn_reader_release_pins(struct pfn_reader *pfns)
1300 {
1301 	struct iopt_pages *pages = pfns->pages;
1302 	struct pfn_reader_user *user;
1303 
1304 	if (iopt_is_dmabuf(pages))
1305 		return;
1306 
1307 	user = &pfns->user;
1308 	if (user->upages_end > pfns->batch_end_index) {
1309 		/* Any pages not transferred to the batch are just unpinned */
1310 
1311 		unsigned long npages = user->upages_end - pfns->batch_end_index;
1312 		unsigned long start_index = pfns->batch_end_index -
1313 					    user->upages_start;
1314 
1315 		if (!user->file) {
1316 			unpin_user_pages(user->upages + start_index, npages);
1317 		} else {
1318 			long n = user->ufolios_len / sizeof(*user->ufolios);
1319 
1320 			unpin_folios(user->ufolios_next,
1321 				     user->ufolios + n - user->ufolios_next);
1322 		}
1323 		iopt_pages_sub_npinned(pages, npages);
1324 		user->upages_end = pfns->batch_end_index;
1325 	}
1326 	if (pfns->batch_start_index != pfns->batch_end_index) {
1327 		pfn_reader_unpin(pfns);
1328 		pfns->batch_start_index = pfns->batch_end_index;
1329 	}
1330 }
1331 
pfn_reader_destroy(struct pfn_reader * pfns)1332 static void pfn_reader_destroy(struct pfn_reader *pfns)
1333 {
1334 	struct iopt_pages *pages = pfns->pages;
1335 
1336 	pfn_reader_release_pins(pfns);
1337 	if (!iopt_is_dmabuf(pfns->pages))
1338 		pfn_reader_user_destroy(&pfns->user, pfns->pages);
1339 	batch_destroy(&pfns->batch, NULL);
1340 	WARN_ON(pages->last_npinned != pages->npinned);
1341 }
1342 
pfn_reader_first(struct pfn_reader * pfns,struct iopt_pages * pages,unsigned long start_index,unsigned long last_index)1343 static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages,
1344 			    unsigned long start_index, unsigned long last_index)
1345 {
1346 	int rc;
1347 
1348 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
1349 	    WARN_ON(last_index < start_index))
1350 		return -EINVAL;
1351 
1352 	rc = pfn_reader_init(pfns, pages, start_index, last_index);
1353 	if (rc)
1354 		return rc;
1355 	rc = pfn_reader_next(pfns);
1356 	if (rc) {
1357 		pfn_reader_destroy(pfns);
1358 		return rc;
1359 	}
1360 	return 0;
1361 }
1362 
iopt_alloc_pages(unsigned long start_byte,unsigned long length,bool writable)1363 static struct iopt_pages *iopt_alloc_pages(unsigned long start_byte,
1364 					   unsigned long length, bool writable)
1365 {
1366 	struct iopt_pages *pages;
1367 
1368 	/*
1369 	 * The iommu API uses size_t as the length, and protect the DIV_ROUND_UP
1370 	 * below from overflow
1371 	 */
1372 	if (length > SIZE_MAX - PAGE_SIZE || length == 0)
1373 		return ERR_PTR(-EINVAL);
1374 
1375 	pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT);
1376 	if (!pages)
1377 		return ERR_PTR(-ENOMEM);
1378 
1379 	kref_init(&pages->kref);
1380 	xa_init_flags(&pages->pinned_pfns, XA_FLAGS_ACCOUNT);
1381 	mutex_init(&pages->mutex);
1382 	pages->source_mm = current->mm;
1383 	mmgrab(pages->source_mm);
1384 	pages->npages = DIV_ROUND_UP(length + start_byte, PAGE_SIZE);
1385 	pages->access_itree = RB_ROOT_CACHED;
1386 	pages->domains_itree = RB_ROOT_CACHED;
1387 	pages->writable = writable;
1388 	if (capable(CAP_IPC_LOCK))
1389 		pages->account_mode = IOPT_PAGES_ACCOUNT_NONE;
1390 	else
1391 		pages->account_mode = IOPT_PAGES_ACCOUNT_USER;
1392 	pages->source_task = current->group_leader;
1393 	get_task_struct(current->group_leader);
1394 	pages->source_user = get_uid(current_user());
1395 	return pages;
1396 }
1397 
iopt_alloc_user_pages(void __user * uptr,unsigned long length,bool writable)1398 struct iopt_pages *iopt_alloc_user_pages(void __user *uptr,
1399 					 unsigned long length, bool writable)
1400 {
1401 	struct iopt_pages *pages;
1402 	unsigned long end;
1403 	void __user *uptr_down =
1404 		(void __user *)ALIGN_DOWN((uintptr_t)uptr, PAGE_SIZE);
1405 
1406 	if (check_add_overflow((unsigned long)uptr, length, &end))
1407 		return ERR_PTR(-EOVERFLOW);
1408 
1409 	pages = iopt_alloc_pages(uptr - uptr_down, length, writable);
1410 	if (IS_ERR(pages))
1411 		return pages;
1412 	pages->uptr = uptr_down;
1413 	pages->type = IOPT_ADDRESS_USER;
1414 	return pages;
1415 }
1416 
iopt_alloc_file_pages(struct file * file,unsigned long start_byte,unsigned long start,unsigned long length,bool writable)1417 struct iopt_pages *iopt_alloc_file_pages(struct file *file,
1418 					 unsigned long start_byte,
1419 					 unsigned long start,
1420 					 unsigned long length, bool writable)
1421 
1422 {
1423 	struct iopt_pages *pages;
1424 
1425 	pages = iopt_alloc_pages(start_byte, length, writable);
1426 	if (IS_ERR(pages))
1427 		return pages;
1428 	pages->file = get_file(file);
1429 	pages->start = start - start_byte;
1430 	pages->type = IOPT_ADDRESS_FILE;
1431 	return pages;
1432 }
1433 
iopt_revoke_notify(struct dma_buf_attachment * attach)1434 static void iopt_revoke_notify(struct dma_buf_attachment *attach)
1435 {
1436 	struct iopt_pages *pages = attach->importer_priv;
1437 	struct iopt_pages_dmabuf_track *track;
1438 
1439 	guard(mutex)(&pages->mutex);
1440 	if (iopt_dmabuf_revoked(pages))
1441 		return;
1442 
1443 	list_for_each_entry(track, &pages->dmabuf.tracker, elm) {
1444 		struct iopt_area *area = track->area;
1445 
1446 		iopt_area_unmap_domain_range(area, track->domain,
1447 					     iopt_area_index(area),
1448 					     iopt_area_last_index(area));
1449 	}
1450 	pages->dmabuf.phys.len = 0;
1451 }
1452 
1453 static struct dma_buf_attach_ops iopt_dmabuf_attach_revoke_ops = {
1454 	.allow_peer2peer = true,
1455 	.move_notify = iopt_revoke_notify,
1456 };
1457 
1458 /*
1459  * iommufd and vfio have a circular dependency. Future work for a phys
1460  * based private interconnect will remove this.
1461  */
1462 static int
sym_vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment * attachment,struct dma_buf_phys_vec * phys)1463 sym_vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
1464 				 struct dma_buf_phys_vec *phys)
1465 {
1466 	typeof(&vfio_pci_dma_buf_iommufd_map) fn;
1467 	int rc;
1468 
1469 	rc = iommufd_test_dma_buf_iommufd_map(attachment, phys);
1470 	if (rc != -EOPNOTSUPP)
1471 		return rc;
1472 
1473 	if (!IS_ENABLED(CONFIG_VFIO_PCI_DMABUF))
1474 		return -EOPNOTSUPP;
1475 
1476 	fn = symbol_get(vfio_pci_dma_buf_iommufd_map);
1477 	if (!fn)
1478 		return -EOPNOTSUPP;
1479 	rc = fn(attachment, phys);
1480 	symbol_put(vfio_pci_dma_buf_iommufd_map);
1481 	return rc;
1482 }
1483 
iopt_map_dmabuf(struct iommufd_ctx * ictx,struct iopt_pages * pages,struct dma_buf * dmabuf)1484 static int iopt_map_dmabuf(struct iommufd_ctx *ictx, struct iopt_pages *pages,
1485 			   struct dma_buf *dmabuf)
1486 {
1487 	struct dma_buf_attachment *attach;
1488 	int rc;
1489 
1490 	attach = dma_buf_dynamic_attach(dmabuf, iommufd_global_device(),
1491 					&iopt_dmabuf_attach_revoke_ops, pages);
1492 	if (IS_ERR(attach))
1493 		return PTR_ERR(attach);
1494 
1495 	dma_resv_lock(dmabuf->resv, NULL);
1496 	/*
1497 	 * Lock ordering requires the mutex to be taken inside the reservation,
1498 	 * make sure lockdep sees this.
1499 	 */
1500 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
1501 		mutex_lock(&pages->mutex);
1502 		mutex_unlock(&pages->mutex);
1503 	}
1504 
1505 	rc = sym_vfio_pci_dma_buf_iommufd_map(attach, &pages->dmabuf.phys);
1506 	if (rc)
1507 		goto err_detach;
1508 
1509 	dma_resv_unlock(dmabuf->resv);
1510 
1511 	/* On success iopt_release_pages() will detach and put the dmabuf. */
1512 	pages->dmabuf.attach = attach;
1513 	return 0;
1514 
1515 err_detach:
1516 	dma_resv_unlock(dmabuf->resv);
1517 	dma_buf_detach(dmabuf, attach);
1518 	return rc;
1519 }
1520 
iopt_alloc_dmabuf_pages(struct iommufd_ctx * ictx,struct dma_buf * dmabuf,unsigned long start_byte,unsigned long start,unsigned long length,bool writable)1521 struct iopt_pages *iopt_alloc_dmabuf_pages(struct iommufd_ctx *ictx,
1522 					   struct dma_buf *dmabuf,
1523 					   unsigned long start_byte,
1524 					   unsigned long start,
1525 					   unsigned long length, bool writable)
1526 {
1527 	static struct lock_class_key pages_dmabuf_mutex_key;
1528 	struct iopt_pages *pages;
1529 	int rc;
1530 
1531 	if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
1532 		return ERR_PTR(-EOPNOTSUPP);
1533 
1534 	if (dmabuf->size <= (start + length - 1) ||
1535 	    length / PAGE_SIZE >= MAX_NPFNS)
1536 		return ERR_PTR(-EINVAL);
1537 
1538 	pages = iopt_alloc_pages(start_byte, length, writable);
1539 	if (IS_ERR(pages))
1540 		return pages;
1541 
1542 	/*
1543 	 * The mmap_lock can be held when obtaining the dmabuf reservation lock
1544 	 * which creates a locking cycle with the pages mutex which is held
1545 	 * while obtaining the mmap_lock. This locking path is not present for
1546 	 * IOPT_ADDRESS_DMABUF so split the lock class.
1547 	 */
1548 	lockdep_set_class(&pages->mutex, &pages_dmabuf_mutex_key);
1549 
1550 	/* dmabuf does not use pinned page accounting. */
1551 	pages->account_mode = IOPT_PAGES_ACCOUNT_NONE;
1552 	pages->type = IOPT_ADDRESS_DMABUF;
1553 	pages->dmabuf.start = start - start_byte;
1554 	INIT_LIST_HEAD(&pages->dmabuf.tracker);
1555 
1556 	rc = iopt_map_dmabuf(ictx, pages, dmabuf);
1557 	if (rc) {
1558 		iopt_put_pages(pages);
1559 		return ERR_PTR(rc);
1560 	}
1561 
1562 	return pages;
1563 }
1564 
iopt_dmabuf_track_domain(struct iopt_pages * pages,struct iopt_area * area,struct iommu_domain * domain)1565 int iopt_dmabuf_track_domain(struct iopt_pages *pages, struct iopt_area *area,
1566 			     struct iommu_domain *domain)
1567 {
1568 	struct iopt_pages_dmabuf_track *track;
1569 
1570 	lockdep_assert_held(&pages->mutex);
1571 	if (WARN_ON(!iopt_is_dmabuf(pages)))
1572 		return -EINVAL;
1573 
1574 	list_for_each_entry(track, &pages->dmabuf.tracker, elm)
1575 		if (WARN_ON(track->domain == domain && track->area == area))
1576 			return -EINVAL;
1577 
1578 	track = kzalloc(sizeof(*track), GFP_KERNEL);
1579 	if (!track)
1580 		return -ENOMEM;
1581 	track->domain = domain;
1582 	track->area = area;
1583 	list_add_tail(&track->elm, &pages->dmabuf.tracker);
1584 
1585 	return 0;
1586 }
1587 
iopt_dmabuf_untrack_domain(struct iopt_pages * pages,struct iopt_area * area,struct iommu_domain * domain)1588 void iopt_dmabuf_untrack_domain(struct iopt_pages *pages,
1589 				struct iopt_area *area,
1590 				struct iommu_domain *domain)
1591 {
1592 	struct iopt_pages_dmabuf_track *track;
1593 
1594 	lockdep_assert_held(&pages->mutex);
1595 	WARN_ON(!iopt_is_dmabuf(pages));
1596 
1597 	list_for_each_entry(track, &pages->dmabuf.tracker, elm) {
1598 		if (track->domain == domain && track->area == area) {
1599 			list_del(&track->elm);
1600 			kfree(track);
1601 			return;
1602 		}
1603 	}
1604 	WARN_ON(true);
1605 }
1606 
iopt_dmabuf_track_all_domains(struct iopt_area * area,struct iopt_pages * pages)1607 int iopt_dmabuf_track_all_domains(struct iopt_area *area,
1608 				  struct iopt_pages *pages)
1609 {
1610 	struct iopt_pages_dmabuf_track *track;
1611 	struct iommu_domain *domain;
1612 	unsigned long index;
1613 	int rc;
1614 
1615 	list_for_each_entry(track, &pages->dmabuf.tracker, elm)
1616 		if (WARN_ON(track->area == area))
1617 			return -EINVAL;
1618 
1619 	xa_for_each(&area->iopt->domains, index, domain) {
1620 		rc = iopt_dmabuf_track_domain(pages, area, domain);
1621 		if (rc)
1622 			goto err_untrack;
1623 	}
1624 	return 0;
1625 err_untrack:
1626 	iopt_dmabuf_untrack_all_domains(area, pages);
1627 	return rc;
1628 }
1629 
iopt_dmabuf_untrack_all_domains(struct iopt_area * area,struct iopt_pages * pages)1630 void iopt_dmabuf_untrack_all_domains(struct iopt_area *area,
1631 				     struct iopt_pages *pages)
1632 {
1633 	struct iopt_pages_dmabuf_track *track;
1634 	struct iopt_pages_dmabuf_track *tmp;
1635 
1636 	list_for_each_entry_safe(track, tmp, &pages->dmabuf.tracker,
1637 				 elm) {
1638 		if (track->area == area) {
1639 			list_del(&track->elm);
1640 			kfree(track);
1641 		}
1642 	}
1643 }
1644 
iopt_release_pages(struct kref * kref)1645 void iopt_release_pages(struct kref *kref)
1646 {
1647 	struct iopt_pages *pages = container_of(kref, struct iopt_pages, kref);
1648 
1649 	WARN_ON(!RB_EMPTY_ROOT(&pages->access_itree.rb_root));
1650 	WARN_ON(!RB_EMPTY_ROOT(&pages->domains_itree.rb_root));
1651 	WARN_ON(pages->npinned);
1652 	WARN_ON(!xa_empty(&pages->pinned_pfns));
1653 	mmdrop(pages->source_mm);
1654 	mutex_destroy(&pages->mutex);
1655 	put_task_struct(pages->source_task);
1656 	free_uid(pages->source_user);
1657 	if (iopt_is_dmabuf(pages) && pages->dmabuf.attach) {
1658 		struct dma_buf *dmabuf = pages->dmabuf.attach->dmabuf;
1659 
1660 		dma_buf_detach(dmabuf, pages->dmabuf.attach);
1661 		dma_buf_put(dmabuf);
1662 		WARN_ON(!list_empty(&pages->dmabuf.tracker));
1663 	} else if (pages->type == IOPT_ADDRESS_FILE) {
1664 		fput(pages->file);
1665 	}
1666 	kfree(pages);
1667 }
1668 
1669 static void
iopt_area_unpin_domain(struct pfn_batch * batch,struct iopt_area * area,struct iopt_pages * pages,struct iommu_domain * domain,unsigned long start_index,unsigned long last_index,unsigned long * unmapped_end_index,unsigned long real_last_index)1670 iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area,
1671 		       struct iopt_pages *pages, struct iommu_domain *domain,
1672 		       unsigned long start_index, unsigned long last_index,
1673 		       unsigned long *unmapped_end_index,
1674 		       unsigned long real_last_index)
1675 {
1676 	while (start_index <= last_index) {
1677 		unsigned long batch_last_index;
1678 
1679 		if (*unmapped_end_index <= last_index) {
1680 			unsigned long start =
1681 				max(start_index, *unmapped_end_index);
1682 
1683 			if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
1684 			    batch->total_pfns)
1685 				WARN_ON(*unmapped_end_index -
1686 						batch->total_pfns !=
1687 					start_index);
1688 			batch_from_domain(batch, domain, area, start,
1689 					  last_index);
1690 			batch_last_index = start_index + batch->total_pfns - 1;
1691 		} else {
1692 			batch_last_index = last_index;
1693 		}
1694 
1695 		if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
1696 			WARN_ON(batch_last_index > real_last_index);
1697 
1698 		/*
1699 		 * unmaps must always 'cut' at a place where the pfns are not
1700 		 * contiguous to pair with the maps that always install
1701 		 * contiguous pages. Thus, if we have to stop unpinning in the
1702 		 * middle of the domains we need to keep reading pfns until we
1703 		 * find a cut point to do the unmap. The pfns we read are
1704 		 * carried over and either skipped or integrated into the next
1705 		 * batch.
1706 		 */
1707 		if (batch_last_index == last_index &&
1708 		    last_index != real_last_index)
1709 			batch_from_domain_continue(batch, domain, area,
1710 						   last_index + 1,
1711 						   real_last_index);
1712 
1713 		if (*unmapped_end_index <= batch_last_index) {
1714 			iopt_area_unmap_domain_range(
1715 				area, domain, *unmapped_end_index,
1716 				start_index + batch->total_pfns - 1);
1717 			*unmapped_end_index = start_index + batch->total_pfns;
1718 		}
1719 
1720 		/* unpin must follow unmap */
1721 		batch_unpin(batch, pages, 0,
1722 			    batch_last_index - start_index + 1);
1723 		start_index = batch_last_index + 1;
1724 
1725 		batch_clear_carry(batch,
1726 				  *unmapped_end_index - batch_last_index - 1);
1727 	}
1728 }
1729 
__iopt_area_unfill_domain(struct iopt_area * area,struct iopt_pages * pages,struct iommu_domain * domain,unsigned long last_index)1730 static void __iopt_area_unfill_domain(struct iopt_area *area,
1731 				      struct iopt_pages *pages,
1732 				      struct iommu_domain *domain,
1733 				      unsigned long last_index)
1734 {
1735 	struct interval_tree_double_span_iter span;
1736 	unsigned long start_index = iopt_area_index(area);
1737 	unsigned long unmapped_end_index = start_index;
1738 	u64 backup[BATCH_BACKUP_SIZE];
1739 	struct pfn_batch batch;
1740 
1741 	lockdep_assert_held(&pages->mutex);
1742 
1743 	if (iopt_is_dmabuf(pages)) {
1744 		if (WARN_ON(iopt_dmabuf_revoked(pages)))
1745 			return;
1746 		iopt_area_unmap_domain_range(area, domain, start_index,
1747 					     last_index);
1748 		return;
1749 	}
1750 
1751 	/*
1752 	 * For security we must not unpin something that is still DMA mapped,
1753 	 * so this must unmap any IOVA before we go ahead and unpin the pages.
1754 	 * This creates a complexity where we need to skip over unpinning pages
1755 	 * held in the xarray, but continue to unmap from the domain.
1756 	 *
1757 	 * The domain unmap cannot stop in the middle of a contiguous range of
1758 	 * PFNs. To solve this problem the unpinning step will read ahead to the
1759 	 * end of any contiguous span, unmap that whole span, and then only
1760 	 * unpin the leading part that does not have any accesses. The residual
1761 	 * PFNs that were unmapped but not unpinned are called a "carry" in the
1762 	 * batch as they are moved to the front of the PFN list and continue on
1763 	 * to the next iteration(s).
1764 	 */
1765 	batch_init_backup(&batch, last_index + 1, backup, sizeof(backup));
1766 	interval_tree_for_each_double_span(&span, &pages->domains_itree,
1767 					   &pages->access_itree, start_index,
1768 					   last_index) {
1769 		if (span.is_used) {
1770 			batch_skip_carry(&batch,
1771 					 span.last_used - span.start_used + 1);
1772 			continue;
1773 		}
1774 		iopt_area_unpin_domain(&batch, area, pages, domain,
1775 				       span.start_hole, span.last_hole,
1776 				       &unmapped_end_index, last_index);
1777 	}
1778 	/*
1779 	 * If the range ends in a access then we do the residual unmap without
1780 	 * any unpins.
1781 	 */
1782 	if (unmapped_end_index != last_index + 1)
1783 		iopt_area_unmap_domain_range(area, domain, unmapped_end_index,
1784 					     last_index);
1785 	WARN_ON(batch.total_pfns);
1786 	batch_destroy(&batch, backup);
1787 	update_unpinned(pages);
1788 }
1789 
iopt_area_unfill_partial_domain(struct iopt_area * area,struct iopt_pages * pages,struct iommu_domain * domain,unsigned long end_index)1790 static void iopt_area_unfill_partial_domain(struct iopt_area *area,
1791 					    struct iopt_pages *pages,
1792 					    struct iommu_domain *domain,
1793 					    unsigned long end_index)
1794 {
1795 	if (end_index != iopt_area_index(area))
1796 		__iopt_area_unfill_domain(area, pages, domain, end_index - 1);
1797 }
1798 
1799 /**
1800  * iopt_area_unmap_domain() - Unmap without unpinning PFNs in a domain
1801  * @area: The IOVA range to unmap
1802  * @domain: The domain to unmap
1803  *
1804  * The caller must know that unpinning is not required, usually because there
1805  * are other domains in the iopt.
1806  */
iopt_area_unmap_domain(struct iopt_area * area,struct iommu_domain * domain)1807 void iopt_area_unmap_domain(struct iopt_area *area, struct iommu_domain *domain)
1808 {
1809 	iommu_unmap_nofail(domain, iopt_area_iova(area),
1810 			   iopt_area_length(area));
1811 }
1812 
1813 /**
1814  * iopt_area_unfill_domain() - Unmap and unpin PFNs in a domain
1815  * @area: IOVA area to use
1816  * @pages: page supplier for the area (area->pages is NULL)
1817  * @domain: Domain to unmap from
1818  *
1819  * The domain should be removed from the domains_itree before calling. The
1820  * domain will always be unmapped, but the PFNs may not be unpinned if there are
1821  * still accesses.
1822  */
iopt_area_unfill_domain(struct iopt_area * area,struct iopt_pages * pages,struct iommu_domain * domain)1823 void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages,
1824 			     struct iommu_domain *domain)
1825 {
1826 	if (iopt_dmabuf_revoked(pages))
1827 		return;
1828 
1829 	__iopt_area_unfill_domain(area, pages, domain,
1830 				  iopt_area_last_index(area));
1831 }
1832 
1833 /**
1834  * iopt_area_fill_domain() - Map PFNs from the area into a domain
1835  * @area: IOVA area to use
1836  * @domain: Domain to load PFNs into
1837  *
1838  * Read the pfns from the area's underlying iopt_pages and map them into the
1839  * given domain. Called when attaching a new domain to an io_pagetable.
1840  */
iopt_area_fill_domain(struct iopt_area * area,struct iommu_domain * domain)1841 int iopt_area_fill_domain(struct iopt_area *area, struct iommu_domain *domain)
1842 {
1843 	unsigned long done_end_index;
1844 	struct pfn_reader pfns;
1845 	int rc;
1846 
1847 	lockdep_assert_held(&area->pages->mutex);
1848 
1849 	if (iopt_dmabuf_revoked(area->pages))
1850 		return 0;
1851 
1852 	rc = pfn_reader_first(&pfns, area->pages, iopt_area_index(area),
1853 			      iopt_area_last_index(area));
1854 	if (rc)
1855 		return rc;
1856 
1857 	while (!pfn_reader_done(&pfns)) {
1858 		done_end_index = pfns.batch_start_index;
1859 		rc = batch_to_domain(&pfns.batch, domain, area,
1860 				     pfns.batch_start_index);
1861 		if (rc)
1862 			goto out_unmap;
1863 		done_end_index = pfns.batch_end_index;
1864 
1865 		rc = pfn_reader_next(&pfns);
1866 		if (rc)
1867 			goto out_unmap;
1868 	}
1869 
1870 	rc = pfn_reader_update_pinned(&pfns);
1871 	if (rc)
1872 		goto out_unmap;
1873 	goto out_destroy;
1874 
1875 out_unmap:
1876 	pfn_reader_release_pins(&pfns);
1877 	iopt_area_unfill_partial_domain(area, area->pages, domain,
1878 					done_end_index);
1879 out_destroy:
1880 	pfn_reader_destroy(&pfns);
1881 	return rc;
1882 }
1883 
1884 /**
1885  * iopt_area_fill_domains() - Install PFNs into the area's domains
1886  * @area: The area to act on
1887  * @pages: The pages associated with the area (area->pages is NULL)
1888  *
1889  * Called during area creation. The area is freshly created and not inserted in
1890  * the domains_itree yet. PFNs are read and loaded into every domain held in the
1891  * area's io_pagetable and the area is installed in the domains_itree.
1892  *
1893  * On failure all domains are left unchanged.
1894  */
iopt_area_fill_domains(struct iopt_area * area,struct iopt_pages * pages)1895 int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages)
1896 {
1897 	unsigned long done_first_end_index;
1898 	unsigned long done_all_end_index;
1899 	struct iommu_domain *domain;
1900 	unsigned long unmap_index;
1901 	struct pfn_reader pfns;
1902 	unsigned long index;
1903 	int rc;
1904 
1905 	lockdep_assert_held(&area->iopt->domains_rwsem);
1906 
1907 	if (xa_empty(&area->iopt->domains))
1908 		return 0;
1909 
1910 	mutex_lock(&pages->mutex);
1911 	if (iopt_is_dmabuf(pages)) {
1912 		rc = iopt_dmabuf_track_all_domains(area, pages);
1913 		if (rc)
1914 			goto out_unlock;
1915 	}
1916 
1917 	if (!iopt_dmabuf_revoked(pages)) {
1918 		rc = pfn_reader_first(&pfns, pages, iopt_area_index(area),
1919 				      iopt_area_last_index(area));
1920 		if (rc)
1921 			goto out_untrack;
1922 
1923 		while (!pfn_reader_done(&pfns)) {
1924 			done_first_end_index = pfns.batch_end_index;
1925 			done_all_end_index = pfns.batch_start_index;
1926 			xa_for_each(&area->iopt->domains, index, domain) {
1927 				rc = batch_to_domain(&pfns.batch, domain, area,
1928 						     pfns.batch_start_index);
1929 				if (rc)
1930 					goto out_unmap;
1931 			}
1932 			done_all_end_index = done_first_end_index;
1933 
1934 			rc = pfn_reader_next(&pfns);
1935 			if (rc)
1936 				goto out_unmap;
1937 		}
1938 		rc = pfn_reader_update_pinned(&pfns);
1939 		if (rc)
1940 			goto out_unmap;
1941 
1942 		pfn_reader_destroy(&pfns);
1943 	}
1944 
1945 	area->storage_domain = xa_load(&area->iopt->domains, 0);
1946 	interval_tree_insert(&area->pages_node, &pages->domains_itree);
1947 	mutex_unlock(&pages->mutex);
1948 	return 0;
1949 
1950 out_unmap:
1951 	pfn_reader_release_pins(&pfns);
1952 	xa_for_each(&area->iopt->domains, unmap_index, domain) {
1953 		unsigned long end_index;
1954 
1955 		if (unmap_index < index)
1956 			end_index = done_first_end_index;
1957 		else
1958 			end_index = done_all_end_index;
1959 
1960 		/*
1961 		 * The area is not yet part of the domains_itree so we have to
1962 		 * manage the unpinning specially. The last domain does the
1963 		 * unpin, every other domain is just unmapped.
1964 		 */
1965 		if (unmap_index != area->iopt->next_domain_id - 1) {
1966 			if (end_index != iopt_area_index(area))
1967 				iopt_area_unmap_domain_range(
1968 					area, domain, iopt_area_index(area),
1969 					end_index - 1);
1970 		} else {
1971 			iopt_area_unfill_partial_domain(area, pages, domain,
1972 							end_index);
1973 		}
1974 	}
1975 	pfn_reader_destroy(&pfns);
1976 out_untrack:
1977 	if (iopt_is_dmabuf(pages))
1978 		iopt_dmabuf_untrack_all_domains(area, pages);
1979 out_unlock:
1980 	mutex_unlock(&pages->mutex);
1981 	return rc;
1982 }
1983 
1984 /**
1985  * iopt_area_unfill_domains() - unmap PFNs from the area's domains
1986  * @area: The area to act on
1987  * @pages: The pages associated with the area (area->pages is NULL)
1988  *
1989  * Called during area destruction. This unmaps the iova's covered by all the
1990  * area's domains and releases the PFNs.
1991  */
iopt_area_unfill_domains(struct iopt_area * area,struct iopt_pages * pages)1992 void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages)
1993 {
1994 	struct io_pagetable *iopt = area->iopt;
1995 	struct iommu_domain *domain;
1996 	unsigned long index;
1997 
1998 	lockdep_assert_held(&iopt->domains_rwsem);
1999 
2000 	mutex_lock(&pages->mutex);
2001 	if (!area->storage_domain)
2002 		goto out_unlock;
2003 
2004 	xa_for_each(&iopt->domains, index, domain) {
2005 		if (domain == area->storage_domain)
2006 			continue;
2007 
2008 		if (!iopt_dmabuf_revoked(pages))
2009 			iopt_area_unmap_domain_range(
2010 				area, domain, iopt_area_index(area),
2011 				iopt_area_last_index(area));
2012 	}
2013 
2014 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
2015 		WARN_ON(RB_EMPTY_NODE(&area->pages_node.rb));
2016 	interval_tree_remove(&area->pages_node, &pages->domains_itree);
2017 	iopt_area_unfill_domain(area, pages, area->storage_domain);
2018 	if (iopt_is_dmabuf(pages))
2019 		iopt_dmabuf_untrack_all_domains(area, pages);
2020 	area->storage_domain = NULL;
2021 out_unlock:
2022 	mutex_unlock(&pages->mutex);
2023 }
2024 
iopt_pages_unpin_xarray(struct pfn_batch * batch,struct iopt_pages * pages,unsigned long start_index,unsigned long end_index)2025 static void iopt_pages_unpin_xarray(struct pfn_batch *batch,
2026 				    struct iopt_pages *pages,
2027 				    unsigned long start_index,
2028 				    unsigned long end_index)
2029 {
2030 	while (start_index <= end_index) {
2031 		batch_from_xarray_clear(batch, &pages->pinned_pfns, start_index,
2032 					end_index);
2033 		batch_unpin(batch, pages, 0, batch->total_pfns);
2034 		start_index += batch->total_pfns;
2035 		batch_clear(batch);
2036 	}
2037 }
2038 
2039 /**
2040  * iopt_pages_unfill_xarray() - Update the xarry after removing an access
2041  * @pages: The pages to act on
2042  * @start_index: Starting PFN index
2043  * @last_index: Last PFN index
2044  *
2045  * Called when an iopt_pages_access is removed, removes pages from the itree.
2046  * The access should already be removed from the access_itree.
2047  */
iopt_pages_unfill_xarray(struct iopt_pages * pages,unsigned long start_index,unsigned long last_index)2048 void iopt_pages_unfill_xarray(struct iopt_pages *pages,
2049 			      unsigned long start_index,
2050 			      unsigned long last_index)
2051 {
2052 	struct interval_tree_double_span_iter span;
2053 	u64 backup[BATCH_BACKUP_SIZE];
2054 	struct pfn_batch batch;
2055 	bool batch_inited = false;
2056 
2057 	lockdep_assert_held(&pages->mutex);
2058 
2059 	interval_tree_for_each_double_span(&span, &pages->access_itree,
2060 					   &pages->domains_itree, start_index,
2061 					   last_index) {
2062 		if (!span.is_used) {
2063 			if (!batch_inited) {
2064 				batch_init_backup(&batch,
2065 						  last_index - start_index + 1,
2066 						  backup, sizeof(backup));
2067 				batch_inited = true;
2068 			}
2069 			iopt_pages_unpin_xarray(&batch, pages, span.start_hole,
2070 						span.last_hole);
2071 		} else if (span.is_used == 2) {
2072 			/* Covered by a domain */
2073 			clear_xarray(&pages->pinned_pfns, span.start_used,
2074 				     span.last_used);
2075 		}
2076 		/* Otherwise covered by an existing access */
2077 	}
2078 	if (batch_inited)
2079 		batch_destroy(&batch, backup);
2080 	update_unpinned(pages);
2081 }
2082 
2083 /**
2084  * iopt_pages_fill_from_xarray() - Fast path for reading PFNs
2085  * @pages: The pages to act on
2086  * @start_index: The first page index in the range
2087  * @last_index: The last page index in the range
2088  * @out_pages: The output array to return the pages
2089  *
2090  * This can be called if the caller is holding a refcount on an
2091  * iopt_pages_access that is known to have already been filled. It quickly reads
2092  * the pages directly from the xarray.
2093  *
2094  * This is part of the SW iommu interface to read pages for in-kernel use.
2095  */
iopt_pages_fill_from_xarray(struct iopt_pages * pages,unsigned long start_index,unsigned long last_index,struct page ** out_pages)2096 void iopt_pages_fill_from_xarray(struct iopt_pages *pages,
2097 				 unsigned long start_index,
2098 				 unsigned long last_index,
2099 				 struct page **out_pages)
2100 {
2101 	XA_STATE(xas, &pages->pinned_pfns, start_index);
2102 	void *entry;
2103 
2104 	rcu_read_lock();
2105 	while (start_index <= last_index) {
2106 		entry = xas_next(&xas);
2107 		if (xas_retry(&xas, entry))
2108 			continue;
2109 		WARN_ON(!xa_is_value(entry));
2110 		*(out_pages++) = pfn_to_page(xa_to_value(entry));
2111 		start_index++;
2112 	}
2113 	rcu_read_unlock();
2114 }
2115 
iopt_pages_fill_from_domain(struct iopt_pages * pages,unsigned long start_index,unsigned long last_index,struct page ** out_pages)2116 static int iopt_pages_fill_from_domain(struct iopt_pages *pages,
2117 				       unsigned long start_index,
2118 				       unsigned long last_index,
2119 				       struct page **out_pages)
2120 {
2121 	while (start_index != last_index + 1) {
2122 		unsigned long domain_last;
2123 		struct iopt_area *area;
2124 
2125 		area = iopt_pages_find_domain_area(pages, start_index);
2126 		if (WARN_ON(!area))
2127 			return -EINVAL;
2128 
2129 		domain_last = min(iopt_area_last_index(area), last_index);
2130 		out_pages = raw_pages_from_domain(area->storage_domain, area,
2131 						  start_index, domain_last,
2132 						  out_pages);
2133 		start_index = domain_last + 1;
2134 	}
2135 	return 0;
2136 }
2137 
iopt_pages_fill(struct iopt_pages * pages,struct pfn_reader_user * user,unsigned long start_index,unsigned long last_index,struct page ** out_pages)2138 static int iopt_pages_fill(struct iopt_pages *pages,
2139 			   struct pfn_reader_user *user,
2140 			   unsigned long start_index,
2141 			   unsigned long last_index,
2142 			   struct page **out_pages)
2143 {
2144 	unsigned long cur_index = start_index;
2145 	int rc;
2146 
2147 	while (cur_index != last_index + 1) {
2148 		user->upages = out_pages + (cur_index - start_index);
2149 		rc = pfn_reader_user_pin(user, pages, cur_index, last_index);
2150 		if (rc)
2151 			goto out_unpin;
2152 		cur_index = user->upages_end;
2153 	}
2154 	return 0;
2155 
2156 out_unpin:
2157 	if (start_index != cur_index)
2158 		iopt_pages_err_unpin(pages, start_index, cur_index - 1,
2159 				     out_pages);
2160 	return rc;
2161 }
2162 
2163 /**
2164  * iopt_pages_fill_xarray() - Read PFNs
2165  * @pages: The pages to act on
2166  * @start_index: The first page index in the range
2167  * @last_index: The last page index in the range
2168  * @out_pages: The output array to return the pages, may be NULL
2169  *
2170  * This populates the xarray and returns the pages in out_pages. As the slow
2171  * path this is able to copy pages from other storage tiers into the xarray.
2172  *
2173  * On failure the xarray is left unchanged.
2174  *
2175  * This is part of the SW iommu interface to read pages for in-kernel use.
2176  */
iopt_pages_fill_xarray(struct iopt_pages * pages,unsigned long start_index,unsigned long last_index,struct page ** out_pages)2177 int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start_index,
2178 			   unsigned long last_index, struct page **out_pages)
2179 {
2180 	struct interval_tree_double_span_iter span;
2181 	unsigned long xa_end = start_index;
2182 	struct pfn_reader_user user;
2183 	int rc;
2184 
2185 	lockdep_assert_held(&pages->mutex);
2186 
2187 	pfn_reader_user_init(&user, pages);
2188 	user.upages_len = (last_index - start_index + 1) * sizeof(*out_pages);
2189 	interval_tree_for_each_double_span(&span, &pages->access_itree,
2190 					   &pages->domains_itree, start_index,
2191 					   last_index) {
2192 		struct page **cur_pages;
2193 
2194 		if (span.is_used == 1) {
2195 			cur_pages = out_pages + (span.start_used - start_index);
2196 			iopt_pages_fill_from_xarray(pages, span.start_used,
2197 						    span.last_used, cur_pages);
2198 			continue;
2199 		}
2200 
2201 		if (span.is_used == 2) {
2202 			cur_pages = out_pages + (span.start_used - start_index);
2203 			iopt_pages_fill_from_domain(pages, span.start_used,
2204 						    span.last_used, cur_pages);
2205 			rc = pages_to_xarray(&pages->pinned_pfns,
2206 					     span.start_used, span.last_used,
2207 					     cur_pages);
2208 			if (rc)
2209 				goto out_clean_xa;
2210 			xa_end = span.last_used + 1;
2211 			continue;
2212 		}
2213 
2214 		/* hole */
2215 		cur_pages = out_pages + (span.start_hole - start_index);
2216 		rc = iopt_pages_fill(pages, &user, span.start_hole,
2217 				     span.last_hole, cur_pages);
2218 		if (rc)
2219 			goto out_clean_xa;
2220 		rc = pages_to_xarray(&pages->pinned_pfns, span.start_hole,
2221 				     span.last_hole, cur_pages);
2222 		if (rc) {
2223 			iopt_pages_err_unpin(pages, span.start_hole,
2224 					     span.last_hole, cur_pages);
2225 			goto out_clean_xa;
2226 		}
2227 		xa_end = span.last_hole + 1;
2228 	}
2229 	rc = pfn_reader_user_update_pinned(&user, pages);
2230 	if (rc)
2231 		goto out_clean_xa;
2232 	user.upages = NULL;
2233 	pfn_reader_user_destroy(&user, pages);
2234 	return 0;
2235 
2236 out_clean_xa:
2237 	if (start_index != xa_end)
2238 		iopt_pages_unfill_xarray(pages, start_index, xa_end - 1);
2239 	user.upages = NULL;
2240 	pfn_reader_user_destroy(&user, pages);
2241 	return rc;
2242 }
2243 
2244 /*
2245  * This uses the pfn_reader instead of taking a shortcut by using the mm. It can
2246  * do every scenario and is fully consistent with what an iommu_domain would
2247  * see.
2248  */
iopt_pages_rw_slow(struct iopt_pages * pages,unsigned long start_index,unsigned long last_index,unsigned long offset,void * data,unsigned long length,unsigned int flags)2249 static int iopt_pages_rw_slow(struct iopt_pages *pages,
2250 			      unsigned long start_index,
2251 			      unsigned long last_index, unsigned long offset,
2252 			      void *data, unsigned long length,
2253 			      unsigned int flags)
2254 {
2255 	struct pfn_reader pfns;
2256 	int rc;
2257 
2258 	mutex_lock(&pages->mutex);
2259 
2260 	rc = pfn_reader_first(&pfns, pages, start_index, last_index);
2261 	if (rc)
2262 		goto out_unlock;
2263 
2264 	while (!pfn_reader_done(&pfns)) {
2265 		unsigned long done;
2266 
2267 		done = batch_rw(&pfns.batch, data, offset, length, flags);
2268 		data += done;
2269 		length -= done;
2270 		offset = 0;
2271 		pfn_reader_unpin(&pfns);
2272 
2273 		rc = pfn_reader_next(&pfns);
2274 		if (rc)
2275 			goto out_destroy;
2276 	}
2277 	if (WARN_ON(length != 0))
2278 		rc = -EINVAL;
2279 out_destroy:
2280 	pfn_reader_destroy(&pfns);
2281 out_unlock:
2282 	mutex_unlock(&pages->mutex);
2283 	return rc;
2284 }
2285 
2286 /*
2287  * A medium speed path that still allows DMA inconsistencies, but doesn't do any
2288  * memory allocations or interval tree searches.
2289  */
iopt_pages_rw_page(struct iopt_pages * pages,unsigned long index,unsigned long offset,void * data,unsigned long length,unsigned int flags)2290 static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index,
2291 			      unsigned long offset, void *data,
2292 			      unsigned long length, unsigned int flags)
2293 {
2294 	struct page *page = NULL;
2295 	int rc;
2296 
2297 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
2298 	    WARN_ON(pages->type != IOPT_ADDRESS_USER))
2299 		return -EINVAL;
2300 
2301 	if (!mmget_not_zero(pages->source_mm))
2302 		return iopt_pages_rw_slow(pages, index, index, offset, data,
2303 					  length, flags);
2304 
2305 	if (iommufd_should_fail()) {
2306 		rc = -EINVAL;
2307 		goto out_mmput;
2308 	}
2309 
2310 	mmap_read_lock(pages->source_mm);
2311 	rc = pin_user_pages_remote(
2312 		pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE),
2313 		1, (flags & IOMMUFD_ACCESS_RW_WRITE) ? FOLL_WRITE : 0, &page,
2314 		NULL);
2315 	mmap_read_unlock(pages->source_mm);
2316 	if (rc != 1) {
2317 		if (WARN_ON(rc >= 0))
2318 			rc = -EINVAL;
2319 		goto out_mmput;
2320 	}
2321 	copy_data_page(page, data, offset, length, flags);
2322 	unpin_user_page(page);
2323 	rc = 0;
2324 
2325 out_mmput:
2326 	mmput(pages->source_mm);
2327 	return rc;
2328 }
2329 
2330 /**
2331  * iopt_pages_rw_access - Copy to/from a linear slice of the pages
2332  * @pages: pages to act on
2333  * @start_byte: First byte of pages to copy to/from
2334  * @data: Kernel buffer to get/put the data
2335  * @length: Number of bytes to copy
2336  * @flags: IOMMUFD_ACCESS_RW_* flags
2337  *
2338  * This will find each page in the range, kmap it and then memcpy to/from
2339  * the given kernel buffer.
2340  */
iopt_pages_rw_access(struct iopt_pages * pages,unsigned long start_byte,void * data,unsigned long length,unsigned int flags)2341 int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte,
2342 			 void *data, unsigned long length, unsigned int flags)
2343 {
2344 	unsigned long start_index = start_byte / PAGE_SIZE;
2345 	unsigned long last_index = (start_byte + length - 1) / PAGE_SIZE;
2346 	bool change_mm = current->mm != pages->source_mm;
2347 	int rc = 0;
2348 
2349 	if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
2350 	    (flags & __IOMMUFD_ACCESS_RW_SLOW_PATH))
2351 		change_mm = true;
2352 
2353 	if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable)
2354 		return -EPERM;
2355 
2356 	if (iopt_is_dmabuf(pages))
2357 		return -EINVAL;
2358 
2359 	if (pages->type != IOPT_ADDRESS_USER)
2360 		return iopt_pages_rw_slow(pages, start_index, last_index,
2361 					  start_byte % PAGE_SIZE, data, length,
2362 					  flags);
2363 
2364 	if (!(flags & IOMMUFD_ACCESS_RW_KTHREAD) && change_mm) {
2365 		if (start_index == last_index)
2366 			return iopt_pages_rw_page(pages, start_index,
2367 						  start_byte % PAGE_SIZE, data,
2368 						  length, flags);
2369 		return iopt_pages_rw_slow(pages, start_index, last_index,
2370 					  start_byte % PAGE_SIZE, data, length,
2371 					  flags);
2372 	}
2373 
2374 	/*
2375 	 * Try to copy using copy_to_user(). We do this as a fast path and
2376 	 * ignore any pinning inconsistencies, unlike a real DMA path.
2377 	 */
2378 	if (change_mm) {
2379 		if (!mmget_not_zero(pages->source_mm))
2380 			return iopt_pages_rw_slow(pages, start_index,
2381 						  last_index,
2382 						  start_byte % PAGE_SIZE, data,
2383 						  length, flags);
2384 		kthread_use_mm(pages->source_mm);
2385 	}
2386 
2387 	if (flags & IOMMUFD_ACCESS_RW_WRITE) {
2388 		if (copy_to_user(pages->uptr + start_byte, data, length))
2389 			rc = -EFAULT;
2390 	} else {
2391 		if (copy_from_user(data, pages->uptr + start_byte, length))
2392 			rc = -EFAULT;
2393 	}
2394 
2395 	if (change_mm) {
2396 		kthread_unuse_mm(pages->source_mm);
2397 		mmput(pages->source_mm);
2398 	}
2399 
2400 	return rc;
2401 }
2402 
2403 static struct iopt_pages_access *
iopt_pages_get_exact_access(struct iopt_pages * pages,unsigned long index,unsigned long last)2404 iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index,
2405 			    unsigned long last)
2406 {
2407 	struct interval_tree_node *node;
2408 
2409 	lockdep_assert_held(&pages->mutex);
2410 
2411 	/* There can be overlapping ranges in this interval tree */
2412 	for (node = interval_tree_iter_first(&pages->access_itree, index, last);
2413 	     node; node = interval_tree_iter_next(node, index, last))
2414 		if (node->start == index && node->last == last)
2415 			return container_of(node, struct iopt_pages_access,
2416 					    node);
2417 	return NULL;
2418 }
2419 
2420 /**
2421  * iopt_area_add_access() - Record an in-knerel access for PFNs
2422  * @area: The source of PFNs
2423  * @start_index: First page index
2424  * @last_index: Inclusive last page index
2425  * @out_pages: Output list of struct page's representing the PFNs
2426  * @flags: IOMMUFD_ACCESS_RW_* flags
2427  * @lock_area: Fail userspace munmap on this area
2428  *
2429  * Record that an in-kernel access will be accessing the pages, ensure they are
2430  * pinned, and return the PFNs as a simple list of 'struct page *'.
2431  *
2432  * This should be undone through a matching call to iopt_area_remove_access()
2433  */
iopt_area_add_access(struct iopt_area * area,unsigned long start_index,unsigned long last_index,struct page ** out_pages,unsigned int flags,bool lock_area)2434 int iopt_area_add_access(struct iopt_area *area, unsigned long start_index,
2435 			 unsigned long last_index, struct page **out_pages,
2436 			 unsigned int flags, bool lock_area)
2437 {
2438 	struct iopt_pages *pages = area->pages;
2439 	struct iopt_pages_access *access;
2440 	int rc;
2441 
2442 	if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable)
2443 		return -EPERM;
2444 
2445 	mutex_lock(&pages->mutex);
2446 	access = iopt_pages_get_exact_access(pages, start_index, last_index);
2447 	if (access) {
2448 		area->num_accesses++;
2449 		if (lock_area)
2450 			area->num_locks++;
2451 		access->users++;
2452 		iopt_pages_fill_from_xarray(pages, start_index, last_index,
2453 					    out_pages);
2454 		mutex_unlock(&pages->mutex);
2455 		return 0;
2456 	}
2457 
2458 	access = kzalloc(sizeof(*access), GFP_KERNEL_ACCOUNT);
2459 	if (!access) {
2460 		rc = -ENOMEM;
2461 		goto err_unlock;
2462 	}
2463 
2464 	rc = iopt_pages_fill_xarray(pages, start_index, last_index, out_pages);
2465 	if (rc)
2466 		goto err_free;
2467 
2468 	access->node.start = start_index;
2469 	access->node.last = last_index;
2470 	access->users = 1;
2471 	area->num_accesses++;
2472 	if (lock_area)
2473 		area->num_locks++;
2474 	interval_tree_insert(&access->node, &pages->access_itree);
2475 	mutex_unlock(&pages->mutex);
2476 	return 0;
2477 
2478 err_free:
2479 	kfree(access);
2480 err_unlock:
2481 	mutex_unlock(&pages->mutex);
2482 	return rc;
2483 }
2484 
2485 /**
2486  * iopt_area_remove_access() - Release an in-kernel access for PFNs
2487  * @area: The source of PFNs
2488  * @start_index: First page index
2489  * @last_index: Inclusive last page index
2490  * @unlock_area: Must match the matching iopt_area_add_access()'s lock_area
2491  *
2492  * Undo iopt_area_add_access() and unpin the pages if necessary. The caller
2493  * must stop using the PFNs before calling this.
2494  */
iopt_area_remove_access(struct iopt_area * area,unsigned long start_index,unsigned long last_index,bool unlock_area)2495 void iopt_area_remove_access(struct iopt_area *area, unsigned long start_index,
2496 			     unsigned long last_index, bool unlock_area)
2497 {
2498 	struct iopt_pages *pages = area->pages;
2499 	struct iopt_pages_access *access;
2500 
2501 	mutex_lock(&pages->mutex);
2502 	access = iopt_pages_get_exact_access(pages, start_index, last_index);
2503 	if (WARN_ON(!access))
2504 		goto out_unlock;
2505 
2506 	WARN_ON(area->num_accesses == 0 || access->users == 0);
2507 	if (unlock_area) {
2508 		WARN_ON(area->num_locks == 0);
2509 		area->num_locks--;
2510 	}
2511 	area->num_accesses--;
2512 	access->users--;
2513 	if (access->users)
2514 		goto out_unlock;
2515 
2516 	interval_tree_remove(&access->node, &pages->access_itree);
2517 	iopt_pages_unfill_xarray(pages, start_index, last_index);
2518 	kfree(access);
2519 out_unlock:
2520 	mutex_unlock(&pages->mutex);
2521 }
2522