xref: /linux/drivers/vdpa/vdpa_user/iova_domain.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * MMU-based software IOTLB.
4  *
5  * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
6  *
7  * Author: Xie Yongji <xieyongji@bytedance.com>
8  *
9  */
10 
11 #include <linux/slab.h>
12 #include <linux/file.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/highmem.h>
15 #include <linux/vmalloc.h>
16 #include <linux/vdpa.h>
17 
18 #include "iova_domain.h"
19 
20 static int vduse_iotlb_add_range(struct vduse_iova_domain *domain,
21 				 u64 start, u64 last,
22 				 u64 addr, unsigned int perm,
23 				 struct file *file, u64 offset)
24 {
25 	struct vdpa_map_file *map_file;
26 	int ret;
27 
28 	map_file = kmalloc(sizeof(*map_file), GFP_ATOMIC);
29 	if (!map_file)
30 		return -ENOMEM;
31 
32 	map_file->file = get_file(file);
33 	map_file->offset = offset;
34 
35 	ret = vhost_iotlb_add_range_ctx(domain->iotlb, start, last,
36 					addr, perm, map_file);
37 	if (ret) {
38 		fput(map_file->file);
39 		kfree(map_file);
40 		return ret;
41 	}
42 	return 0;
43 }
44 
45 static void vduse_iotlb_del_range(struct vduse_iova_domain *domain,
46 				  u64 start, u64 last)
47 {
48 	struct vdpa_map_file *map_file;
49 	struct vhost_iotlb_map *map;
50 
51 	while ((map = vhost_iotlb_itree_first(domain->iotlb, start, last))) {
52 		map_file = (struct vdpa_map_file *)map->opaque;
53 		fput(map_file->file);
54 		kfree(map_file);
55 		vhost_iotlb_map_free(domain->iotlb, map);
56 	}
57 }
58 
59 int vduse_domain_set_map(struct vduse_iova_domain *domain,
60 			 struct vhost_iotlb *iotlb)
61 {
62 	struct vdpa_map_file *map_file;
63 	struct vhost_iotlb_map *map;
64 	u64 start = 0ULL, last = ULLONG_MAX;
65 	int ret;
66 
67 	spin_lock(&domain->iotlb_lock);
68 	vduse_iotlb_del_range(domain, start, last);
69 
70 	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
71 	     map = vhost_iotlb_itree_next(map, start, last)) {
72 		map_file = (struct vdpa_map_file *)map->opaque;
73 		ret = vduse_iotlb_add_range(domain, map->start, map->last,
74 					    map->addr, map->perm,
75 					    map_file->file,
76 					    map_file->offset);
77 		if (ret)
78 			goto err;
79 	}
80 	spin_unlock(&domain->iotlb_lock);
81 
82 	return 0;
83 err:
84 	vduse_iotlb_del_range(domain, start, last);
85 	spin_unlock(&domain->iotlb_lock);
86 	return ret;
87 }
88 
89 void vduse_domain_clear_map(struct vduse_iova_domain *domain,
90 			    struct vhost_iotlb *iotlb)
91 {
92 	struct vhost_iotlb_map *map;
93 	u64 start = 0ULL, last = ULLONG_MAX;
94 
95 	spin_lock(&domain->iotlb_lock);
96 	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
97 	     map = vhost_iotlb_itree_next(map, start, last)) {
98 		vduse_iotlb_del_range(domain, map->start, map->last);
99 	}
100 	spin_unlock(&domain->iotlb_lock);
101 }
102 
103 static int vduse_domain_map_bounce_page(struct vduse_iova_domain *domain,
104 					 u64 iova, u64 size, u64 paddr)
105 {
106 	struct vduse_bounce_map *map, *head_map;
107 	struct page *tmp_page;
108 	u64 last = iova + size - 1;
109 
110 	while (iova <= last) {
111 		/*
112 		 * When PAGE_SIZE is larger than 4KB, multiple adjacent bounce_maps will
113 		 * point to the same memory page of PAGE_SIZE. Since bounce_maps originate
114 		 * from IO requests, we may not be able to guarantee that the orig_phys
115 		 * values of all IO requests within the same 64KB memory page are contiguous.
116 		 * Therefore, we need to store them separately.
117 		 *
118 		 * Bounce pages are allocated on demand. As a result, it may occur that
119 		 * multiple bounce pages corresponding to the same 64KB memory page attempt
120 		 * to allocate memory simultaneously, so we use cmpxchg to handle this
121 		 * concurrency.
122 		 */
123 		map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT];
124 		if (!map->bounce_page) {
125 			head_map = &domain->bounce_maps[(iova & PAGE_MASK) >> BOUNCE_MAP_SHIFT];
126 			if (!head_map->bounce_page) {
127 				tmp_page = alloc_page(GFP_ATOMIC);
128 				if (!tmp_page)
129 					return -ENOMEM;
130 				if (cmpxchg(&head_map->bounce_page, NULL, tmp_page))
131 					__free_page(tmp_page);
132 			}
133 			map->bounce_page = head_map->bounce_page;
134 		}
135 		map->orig_phys = paddr;
136 		paddr += BOUNCE_MAP_SIZE;
137 		iova += BOUNCE_MAP_SIZE;
138 	}
139 	return 0;
140 }
141 
142 static void vduse_domain_unmap_bounce_page(struct vduse_iova_domain *domain,
143 					   u64 iova, u64 size)
144 {
145 	struct vduse_bounce_map *map;
146 	u64 last = iova + size - 1;
147 
148 	while (iova <= last) {
149 		map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT];
150 		map->orig_phys = INVALID_PHYS_ADDR;
151 		iova += BOUNCE_MAP_SIZE;
152 	}
153 }
154 
155 static unsigned int offset_in_bounce_page(dma_addr_t addr)
156 {
157 	return (addr & ~BOUNCE_MAP_MASK);
158 }
159 
160 static void do_bounce(phys_addr_t orig, void *addr, size_t size,
161 		      enum dma_data_direction dir)
162 {
163 	unsigned long pfn = PFN_DOWN(orig);
164 	unsigned int offset = offset_in_page(orig);
165 	struct page *page;
166 	unsigned int sz = 0;
167 
168 	while (size) {
169 		sz = min_t(size_t, PAGE_SIZE - offset, size);
170 
171 		page = pfn_to_page(pfn);
172 		if (dir == DMA_TO_DEVICE)
173 			memcpy_from_page(addr, page, offset, sz);
174 		else
175 			memcpy_to_page(page, offset, addr, sz);
176 
177 		size -= sz;
178 		pfn++;
179 		addr += sz;
180 		offset = 0;
181 	}
182 }
183 
184 static void vduse_domain_bounce(struct vduse_iova_domain *domain,
185 				dma_addr_t iova, size_t size,
186 				enum dma_data_direction dir)
187 {
188 	struct vduse_bounce_map *map;
189 	struct page *page;
190 	unsigned int offset, head_offset;
191 	void *addr;
192 	size_t sz;
193 
194 	if (iova >= domain->bounce_size)
195 		return;
196 
197 	while (size) {
198 		map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT];
199 		head_offset = offset_in_page(iova);
200 		offset = offset_in_bounce_page(iova);
201 		sz = min_t(size_t, BOUNCE_MAP_SIZE - offset, size);
202 
203 		if (WARN_ON(!map->bounce_page ||
204 			    map->orig_phys == INVALID_PHYS_ADDR))
205 			return;
206 
207 		page = domain->user_bounce_pages ?
208 		       map->user_bounce_page : map->bounce_page;
209 
210 		addr = kmap_local_page(page);
211 		do_bounce(map->orig_phys + offset, addr + head_offset, sz, dir);
212 		kunmap_local(addr);
213 		size -= sz;
214 		iova += sz;
215 	}
216 }
217 
218 static struct page *
219 vduse_domain_get_coherent_page(struct vduse_iova_domain *domain, u64 iova)
220 {
221 	u64 start = iova & PAGE_MASK;
222 	u64 last = start + PAGE_SIZE - 1;
223 	struct vhost_iotlb_map *map;
224 	struct page *page = NULL;
225 
226 	spin_lock(&domain->iotlb_lock);
227 	map = vhost_iotlb_itree_first(domain->iotlb, start, last);
228 	if (!map)
229 		goto out;
230 
231 	page = pfn_to_page((map->addr + iova - map->start) >> PAGE_SHIFT);
232 	get_page(page);
233 out:
234 	spin_unlock(&domain->iotlb_lock);
235 
236 	return page;
237 }
238 
239 static struct page *
240 vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
241 {
242 	struct vduse_bounce_map *map;
243 	struct page *page = NULL;
244 
245 	read_lock(&domain->bounce_lock);
246 	map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT];
247 	if (domain->user_bounce_pages || !map->bounce_page)
248 		goto out;
249 
250 	page = map->bounce_page;
251 	get_page(page);
252 out:
253 	read_unlock(&domain->bounce_lock);
254 
255 	return page;
256 }
257 
258 static void
259 vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
260 {
261 	struct vduse_bounce_map *map;
262 	unsigned long pfn, bounce_pfns;
263 
264 	bounce_pfns = domain->bounce_size >> BOUNCE_MAP_SHIFT;
265 
266 	for (pfn = 0; pfn < bounce_pfns; pfn++) {
267 		map = &domain->bounce_maps[pfn];
268 		if (WARN_ON(map->orig_phys != INVALID_PHYS_ADDR))
269 			continue;
270 
271 		if (!map->bounce_page)
272 			continue;
273 
274 		if (!((pfn << BOUNCE_MAP_SHIFT) & ~PAGE_MASK))
275 			__free_page(map->bounce_page);
276 		map->bounce_page = NULL;
277 	}
278 }
279 
280 int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
281 				       struct page **pages, int count)
282 {
283 	struct vduse_bounce_map *map, *head_map;
284 	int i, j, ret;
285 	int inner_pages = PAGE_SIZE / BOUNCE_MAP_SIZE;
286 	int bounce_pfns = domain->bounce_size >> BOUNCE_MAP_SHIFT;
287 	struct page *head_page = NULL;
288 	bool need_copy;
289 
290 	/* Now we don't support partial mapping */
291 	if (count != (domain->bounce_size >> PAGE_SHIFT))
292 		return -EINVAL;
293 
294 	write_lock(&domain->bounce_lock);
295 	ret = -EEXIST;
296 	if (domain->user_bounce_pages)
297 		goto out;
298 
299 	for (i = 0; i < count; i++) {
300 		need_copy = false;
301 		head_map = &domain->bounce_maps[(i * inner_pages)];
302 		head_page = head_map->bounce_page;
303 		for (j = 0; j < inner_pages; j++) {
304 			if ((i * inner_pages + j) >= bounce_pfns)
305 				break;
306 			map = &domain->bounce_maps[(i * inner_pages + j)];
307 			/* Copy kernel page to user page if it's in use */
308 			if ((head_page) && (map->orig_phys != INVALID_PHYS_ADDR))
309 				need_copy = true;
310 			map->user_bounce_page = pages[i];
311 		}
312 		get_page(pages[i]);
313 		if ((head_page) && (need_copy))
314 			memcpy_to_page(pages[i], 0,
315 				       page_address(head_page),
316 				       PAGE_SIZE);
317 	}
318 	domain->user_bounce_pages = true;
319 	ret = 0;
320 out:
321 	write_unlock(&domain->bounce_lock);
322 
323 	return ret;
324 }
325 
326 void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
327 {
328 	struct vduse_bounce_map *map, *head_map;
329 	unsigned long i, j, count;
330 	int inner_pages = PAGE_SIZE / BOUNCE_MAP_SIZE;
331 	int bounce_pfns = domain->bounce_size >> BOUNCE_MAP_SHIFT;
332 	struct page *head_page = NULL;
333 	bool need_copy;
334 
335 	write_lock(&domain->bounce_lock);
336 	if (!domain->user_bounce_pages)
337 		goto out;
338 
339 	count = domain->bounce_size >> PAGE_SHIFT;
340 	for (i = 0; i < count; i++) {
341 		need_copy = false;
342 		head_map = &domain->bounce_maps[(i * inner_pages)];
343 		if (WARN_ON(!head_map->user_bounce_page))
344 			continue;
345 		head_page = head_map->user_bounce_page;
346 
347 		for (j = 0; j < inner_pages; j++) {
348 			if ((i * inner_pages + j) >= bounce_pfns)
349 				break;
350 			map = &domain->bounce_maps[(i * inner_pages + j)];
351 			if (WARN_ON(!map->user_bounce_page))
352 				continue;
353 			/* Copy user page to kernel page if it's in use */
354 			if ((map->orig_phys != INVALID_PHYS_ADDR) && (head_map->bounce_page))
355 				need_copy = true;
356 			map->user_bounce_page = NULL;
357 		}
358 		if (need_copy)
359 			memcpy_from_page(page_address(head_map->bounce_page),
360 					 head_page, 0, PAGE_SIZE);
361 		put_page(head_page);
362 	}
363 	domain->user_bounce_pages = false;
364 out:
365 	write_unlock(&domain->bounce_lock);
366 }
367 
368 void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
369 {
370 	if (!domain->bounce_map)
371 		return;
372 
373 	spin_lock(&domain->iotlb_lock);
374 	if (!domain->bounce_map)
375 		goto unlock;
376 
377 	vduse_iotlb_del_range(domain, 0, domain->bounce_size - 1);
378 	domain->bounce_map = 0;
379 unlock:
380 	spin_unlock(&domain->iotlb_lock);
381 }
382 
383 static int vduse_domain_init_bounce_map(struct vduse_iova_domain *domain)
384 {
385 	int ret = 0;
386 
387 	if (domain->bounce_map)
388 		return 0;
389 
390 	spin_lock(&domain->iotlb_lock);
391 	if (domain->bounce_map)
392 		goto unlock;
393 
394 	ret = vduse_iotlb_add_range(domain, 0, domain->bounce_size - 1,
395 				    0, VHOST_MAP_RW, domain->file, 0);
396 	if (ret)
397 		goto unlock;
398 
399 	domain->bounce_map = 1;
400 unlock:
401 	spin_unlock(&domain->iotlb_lock);
402 	return ret;
403 }
404 
405 static dma_addr_t
406 vduse_domain_alloc_iova(struct iova_domain *iovad,
407 			unsigned long size, unsigned long limit)
408 {
409 	unsigned long shift = iova_shift(iovad);
410 	unsigned long iova_len = iova_align(iovad, size) >> shift;
411 	unsigned long iova_pfn;
412 
413 	iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
414 
415 	return (dma_addr_t)iova_pfn << shift;
416 }
417 
418 static void vduse_domain_free_iova(struct iova_domain *iovad,
419 				   dma_addr_t iova, size_t size)
420 {
421 	unsigned long shift = iova_shift(iovad);
422 	unsigned long iova_len = iova_align(iovad, size) >> shift;
423 
424 	free_iova_fast(iovad, iova >> shift, iova_len);
425 }
426 
427 void vduse_domain_sync_single_for_device(struct vduse_iova_domain *domain,
428 				      dma_addr_t dma_addr, size_t size,
429 				      enum dma_data_direction dir)
430 {
431 	read_lock(&domain->bounce_lock);
432 	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
433 		vduse_domain_bounce(domain, dma_addr, size, DMA_TO_DEVICE);
434 	read_unlock(&domain->bounce_lock);
435 }
436 
437 void vduse_domain_sync_single_for_cpu(struct vduse_iova_domain *domain,
438 				      dma_addr_t dma_addr, size_t size,
439 				      enum dma_data_direction dir)
440 {
441 	read_lock(&domain->bounce_lock);
442 	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
443 		vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
444 	read_unlock(&domain->bounce_lock);
445 }
446 
447 dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
448 				 struct page *page, unsigned long offset,
449 				 size_t size, enum dma_data_direction dir,
450 				 unsigned long attrs)
451 {
452 	struct iova_domain *iovad = &domain->stream_iovad;
453 	unsigned long limit = domain->bounce_size - 1;
454 	phys_addr_t pa = page_to_phys(page) + offset;
455 	dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
456 
457 	if (!iova)
458 		return DMA_MAPPING_ERROR;
459 
460 	if (vduse_domain_init_bounce_map(domain))
461 		goto err;
462 
463 	read_lock(&domain->bounce_lock);
464 	if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
465 		goto err_unlock;
466 
467 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
468 	    (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
469 		vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
470 
471 	read_unlock(&domain->bounce_lock);
472 
473 	return iova;
474 err_unlock:
475 	read_unlock(&domain->bounce_lock);
476 err:
477 	vduse_domain_free_iova(iovad, iova, size);
478 	return DMA_MAPPING_ERROR;
479 }
480 
481 void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
482 			     dma_addr_t dma_addr, size_t size,
483 			     enum dma_data_direction dir, unsigned long attrs)
484 {
485 	struct iova_domain *iovad = &domain->stream_iovad;
486 	read_lock(&domain->bounce_lock);
487 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
488 	    (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
489 		vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
490 
491 	vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
492 	read_unlock(&domain->bounce_lock);
493 	vduse_domain_free_iova(iovad, dma_addr, size);
494 }
495 
496 void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
497 				  size_t size, dma_addr_t *dma_addr,
498 				  gfp_t flag)
499 {
500 	struct iova_domain *iovad = &domain->consistent_iovad;
501 	unsigned long limit = domain->iova_limit;
502 	dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
503 	void *orig = alloc_pages_exact(size, flag);
504 
505 	if (!iova || !orig)
506 		goto err;
507 
508 	spin_lock(&domain->iotlb_lock);
509 	if (vduse_iotlb_add_range(domain, (u64)iova, (u64)iova + size - 1,
510 				  virt_to_phys(orig), VHOST_MAP_RW,
511 				  domain->file, (u64)iova)) {
512 		spin_unlock(&domain->iotlb_lock);
513 		goto err;
514 	}
515 	spin_unlock(&domain->iotlb_lock);
516 
517 	*dma_addr = iova;
518 
519 	return orig;
520 err:
521 	*dma_addr = DMA_MAPPING_ERROR;
522 	if (orig)
523 		free_pages_exact(orig, size);
524 	if (iova)
525 		vduse_domain_free_iova(iovad, iova, size);
526 
527 	return NULL;
528 }
529 
530 void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
531 				void *vaddr, dma_addr_t dma_addr,
532 				unsigned long attrs)
533 {
534 	struct iova_domain *iovad = &domain->consistent_iovad;
535 	struct vhost_iotlb_map *map;
536 	struct vdpa_map_file *map_file;
537 	phys_addr_t pa;
538 
539 	spin_lock(&domain->iotlb_lock);
540 	map = vhost_iotlb_itree_first(domain->iotlb, (u64)dma_addr,
541 				      (u64)dma_addr + size - 1);
542 	if (WARN_ON(!map)) {
543 		spin_unlock(&domain->iotlb_lock);
544 		return;
545 	}
546 	map_file = (struct vdpa_map_file *)map->opaque;
547 	fput(map_file->file);
548 	kfree(map_file);
549 	pa = map->addr;
550 	vhost_iotlb_map_free(domain->iotlb, map);
551 	spin_unlock(&domain->iotlb_lock);
552 
553 	vduse_domain_free_iova(iovad, dma_addr, size);
554 	free_pages_exact(phys_to_virt(pa), size);
555 }
556 
557 static vm_fault_t vduse_domain_mmap_fault(struct vm_fault *vmf)
558 {
559 	struct vduse_iova_domain *domain = vmf->vma->vm_private_data;
560 	unsigned long iova = vmf->pgoff << PAGE_SHIFT;
561 	struct page *page;
562 
563 	if (!domain)
564 		return VM_FAULT_SIGBUS;
565 
566 	if (iova < domain->bounce_size)
567 		page = vduse_domain_get_bounce_page(domain, iova);
568 	else
569 		page = vduse_domain_get_coherent_page(domain, iova);
570 
571 	if (!page)
572 		return VM_FAULT_SIGBUS;
573 
574 	vmf->page = page;
575 
576 	return 0;
577 }
578 
579 static const struct vm_operations_struct vduse_domain_mmap_ops = {
580 	.fault = vduse_domain_mmap_fault,
581 };
582 
583 static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
584 {
585 	struct vduse_iova_domain *domain = file->private_data;
586 
587 	vm_flags_set(vma, VM_DONTDUMP | VM_DONTEXPAND);
588 	vma->vm_private_data = domain;
589 	vma->vm_ops = &vduse_domain_mmap_ops;
590 
591 	return 0;
592 }
593 
594 static int vduse_domain_release(struct inode *inode, struct file *file)
595 {
596 	struct vduse_iova_domain *domain = file->private_data;
597 
598 	spin_lock(&domain->iotlb_lock);
599 	vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
600 	vduse_domain_remove_user_bounce_pages(domain);
601 	vduse_domain_free_kernel_bounce_pages(domain);
602 	spin_unlock(&domain->iotlb_lock);
603 	put_iova_domain(&domain->stream_iovad);
604 	put_iova_domain(&domain->consistent_iovad);
605 	vhost_iotlb_free(domain->iotlb);
606 	vfree(domain->bounce_maps);
607 	kfree(domain);
608 
609 	return 0;
610 }
611 
612 static const struct file_operations vduse_domain_fops = {
613 	.owner = THIS_MODULE,
614 	.mmap = vduse_domain_mmap,
615 	.release = vduse_domain_release,
616 };
617 
618 void vduse_domain_destroy(struct vduse_iova_domain *domain)
619 {
620 	fput(domain->file);
621 }
622 
623 struct vduse_iova_domain *
624 vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
625 {
626 	struct vduse_iova_domain *domain;
627 	struct file *file;
628 	struct vduse_bounce_map *map;
629 	unsigned long pfn, bounce_pfns;
630 	int ret;
631 
632 	bounce_pfns = PAGE_ALIGN(bounce_size) >> BOUNCE_MAP_SHIFT;
633 	if (iova_limit <= bounce_size)
634 		return NULL;
635 
636 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
637 	if (!domain)
638 		return NULL;
639 
640 	domain->iotlb = vhost_iotlb_alloc(0, 0);
641 	if (!domain->iotlb)
642 		goto err_iotlb;
643 
644 	domain->iova_limit = iova_limit;
645 	domain->bounce_size = PAGE_ALIGN(bounce_size);
646 	domain->bounce_maps = vzalloc(bounce_pfns *
647 				sizeof(struct vduse_bounce_map));
648 	if (!domain->bounce_maps)
649 		goto err_map;
650 
651 	for (pfn = 0; pfn < bounce_pfns; pfn++) {
652 		map = &domain->bounce_maps[pfn];
653 		map->orig_phys = INVALID_PHYS_ADDR;
654 	}
655 	file = anon_inode_getfile("[vduse-domain]", &vduse_domain_fops,
656 				domain, O_RDWR);
657 	if (IS_ERR(file))
658 		goto err_file;
659 
660 	domain->file = file;
661 	rwlock_init(&domain->bounce_lock);
662 	spin_lock_init(&domain->iotlb_lock);
663 	init_iova_domain(&domain->stream_iovad,
664 			BOUNCE_MAP_SIZE, IOVA_START_PFN);
665 	ret = iova_domain_init_rcaches(&domain->stream_iovad);
666 	if (ret)
667 		goto err_iovad_stream;
668 	init_iova_domain(&domain->consistent_iovad,
669 			PAGE_SIZE, bounce_pfns);
670 	ret = iova_domain_init_rcaches(&domain->consistent_iovad);
671 	if (ret)
672 		goto err_iovad_consistent;
673 
674 	return domain;
675 err_iovad_consistent:
676 	put_iova_domain(&domain->stream_iovad);
677 err_iovad_stream:
678 	fput(file);
679 err_file:
680 	vfree(domain->bounce_maps);
681 err_map:
682 	vhost_iotlb_free(domain->iotlb);
683 err_iotlb:
684 	kfree(domain);
685 	return NULL;
686 }
687 
688 int vduse_domain_init(void)
689 {
690 	return iova_cache_get();
691 }
692 
693 void vduse_domain_exit(void)
694 {
695 	iova_cache_put();
696 }
697