xref: /linux/drivers/infiniband/core/umem_odp.c (revision 36c0f7f0f89984bb21e6d0f92d776faf7be73096)
1 /*
2  * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 #include <linux/hugetlb.h>
42 #include <linux/interval_tree_generic.h>
43 
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_umem.h>
46 #include <rdma/ib_umem_odp.h>
47 
48 /*
49  * The ib_umem list keeps track of memory regions for which the HW
50  * device request to receive notification when the related memory
51  * mapping is changed.
52  *
53  * ib_umem_lock protects the list.
54  */
55 
56 static u64 node_start(struct umem_odp_node *n)
57 {
58 	struct ib_umem_odp *umem_odp =
59 			container_of(n, struct ib_umem_odp, interval_tree);
60 
61 	return ib_umem_start(&umem_odp->umem);
62 }
63 
64 /* Note that the representation of the intervals in the interval tree
65  * considers the ending point as contained in the interval, while the
66  * function ib_umem_end returns the first address which is not contained
67  * in the umem.
68  */
69 static u64 node_last(struct umem_odp_node *n)
70 {
71 	struct ib_umem_odp *umem_odp =
72 			container_of(n, struct ib_umem_odp, interval_tree);
73 
74 	return ib_umem_end(&umem_odp->umem) - 1;
75 }
76 
77 INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
78 		     node_start, node_last, static, rbt_ib_umem)
79 
80 static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp)
81 {
82 	mutex_lock(&umem_odp->umem_mutex);
83 	if (umem_odp->notifiers_count++ == 0)
84 		/*
85 		 * Initialize the completion object for waiting on
86 		 * notifiers. Since notifier_count is zero, no one should be
87 		 * waiting right now.
88 		 */
89 		reinit_completion(&umem_odp->notifier_completion);
90 	mutex_unlock(&umem_odp->umem_mutex);
91 }
92 
93 static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp)
94 {
95 	mutex_lock(&umem_odp->umem_mutex);
96 	/*
97 	 * This sequence increase will notify the QP page fault that the page
98 	 * that is going to be mapped in the spte could have been freed.
99 	 */
100 	++umem_odp->notifiers_seq;
101 	if (--umem_odp->notifiers_count == 0)
102 		complete_all(&umem_odp->notifier_completion);
103 	mutex_unlock(&umem_odp->umem_mutex);
104 }
105 
106 static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
107 					       u64 start, u64 end, void *cookie)
108 {
109 	struct ib_umem *umem = &umem_odp->umem;
110 
111 	/*
112 	 * Increase the number of notifiers running, to
113 	 * prevent any further fault handling on this MR.
114 	 */
115 	ib_umem_notifier_start_account(umem_odp);
116 	umem_odp->dying = 1;
117 	/* Make sure that the fact the umem is dying is out before we release
118 	 * all pending page faults. */
119 	smp_wmb();
120 	complete_all(&umem_odp->notifier_completion);
121 	umem->context->invalidate_range(umem_odp, ib_umem_start(umem),
122 					ib_umem_end(umem));
123 	return 0;
124 }
125 
126 static void ib_umem_notifier_release(struct mmu_notifier *mn,
127 				     struct mm_struct *mm)
128 {
129 	struct ib_ucontext_per_mm *per_mm =
130 		container_of(mn, struct ib_ucontext_per_mm, mn);
131 
132 	down_read(&per_mm->umem_rwsem);
133 	if (per_mm->active)
134 		rbt_ib_umem_for_each_in_range(
135 			&per_mm->umem_tree, 0, ULLONG_MAX,
136 			ib_umem_notifier_release_trampoline, true, NULL);
137 	up_read(&per_mm->umem_rwsem);
138 }
139 
140 static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
141 					     u64 start, u64 end, void *cookie)
142 {
143 	ib_umem_notifier_start_account(item);
144 	item->umem.context->invalidate_range(item, start, end);
145 	return 0;
146 }
147 
148 static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
149 				const struct mmu_notifier_range *range)
150 {
151 	struct ib_ucontext_per_mm *per_mm =
152 		container_of(mn, struct ib_ucontext_per_mm, mn);
153 
154 	if (range->blockable)
155 		down_read(&per_mm->umem_rwsem);
156 	else if (!down_read_trylock(&per_mm->umem_rwsem))
157 		return -EAGAIN;
158 
159 	if (!per_mm->active) {
160 		up_read(&per_mm->umem_rwsem);
161 		/*
162 		 * At this point active is permanently set and visible to this
163 		 * CPU without a lock, that fact is relied on to skip the unlock
164 		 * in range_end.
165 		 */
166 		return 0;
167 	}
168 
169 	return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
170 					     range->end,
171 					     invalidate_range_start_trampoline,
172 					     range->blockable, NULL);
173 }
174 
175 static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
176 					   u64 end, void *cookie)
177 {
178 	ib_umem_notifier_end_account(item);
179 	return 0;
180 }
181 
182 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
183 				const struct mmu_notifier_range *range)
184 {
185 	struct ib_ucontext_per_mm *per_mm =
186 		container_of(mn, struct ib_ucontext_per_mm, mn);
187 
188 	if (unlikely(!per_mm->active))
189 		return;
190 
191 	rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
192 				      range->end,
193 				      invalidate_range_end_trampoline, true, NULL);
194 	up_read(&per_mm->umem_rwsem);
195 }
196 
197 static const struct mmu_notifier_ops ib_umem_notifiers = {
198 	.release                    = ib_umem_notifier_release,
199 	.invalidate_range_start     = ib_umem_notifier_invalidate_range_start,
200 	.invalidate_range_end       = ib_umem_notifier_invalidate_range_end,
201 };
202 
203 static void add_umem_to_per_mm(struct ib_umem_odp *umem_odp)
204 {
205 	struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
206 	struct ib_umem *umem = &umem_odp->umem;
207 
208 	down_write(&per_mm->umem_rwsem);
209 	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
210 		rbt_ib_umem_insert(&umem_odp->interval_tree,
211 				   &per_mm->umem_tree);
212 	up_write(&per_mm->umem_rwsem);
213 }
214 
215 static void remove_umem_from_per_mm(struct ib_umem_odp *umem_odp)
216 {
217 	struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
218 	struct ib_umem *umem = &umem_odp->umem;
219 
220 	down_write(&per_mm->umem_rwsem);
221 	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
222 		rbt_ib_umem_remove(&umem_odp->interval_tree,
223 				   &per_mm->umem_tree);
224 	complete_all(&umem_odp->notifier_completion);
225 
226 	up_write(&per_mm->umem_rwsem);
227 }
228 
229 static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx,
230 					       struct mm_struct *mm)
231 {
232 	struct ib_ucontext_per_mm *per_mm;
233 	int ret;
234 
235 	per_mm = kzalloc(sizeof(*per_mm), GFP_KERNEL);
236 	if (!per_mm)
237 		return ERR_PTR(-ENOMEM);
238 
239 	per_mm->context = ctx;
240 	per_mm->mm = mm;
241 	per_mm->umem_tree = RB_ROOT_CACHED;
242 	init_rwsem(&per_mm->umem_rwsem);
243 	per_mm->active = ctx->invalidate_range;
244 
245 	rcu_read_lock();
246 	per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
247 	rcu_read_unlock();
248 
249 	WARN_ON(mm != current->mm);
250 
251 	per_mm->mn.ops = &ib_umem_notifiers;
252 	ret = mmu_notifier_register(&per_mm->mn, per_mm->mm);
253 	if (ret) {
254 		dev_err(&ctx->device->dev,
255 			"Failed to register mmu_notifier %d\n", ret);
256 		goto out_pid;
257 	}
258 
259 	list_add(&per_mm->ucontext_list, &ctx->per_mm_list);
260 	return per_mm;
261 
262 out_pid:
263 	put_pid(per_mm->tgid);
264 	kfree(per_mm);
265 	return ERR_PTR(ret);
266 }
267 
268 static int get_per_mm(struct ib_umem_odp *umem_odp)
269 {
270 	struct ib_ucontext *ctx = umem_odp->umem.context;
271 	struct ib_ucontext_per_mm *per_mm;
272 
273 	/*
274 	 * Generally speaking we expect only one or two per_mm in this list,
275 	 * so no reason to optimize this search today.
276 	 */
277 	mutex_lock(&ctx->per_mm_list_lock);
278 	list_for_each_entry(per_mm, &ctx->per_mm_list, ucontext_list) {
279 		if (per_mm->mm == umem_odp->umem.owning_mm)
280 			goto found;
281 	}
282 
283 	per_mm = alloc_per_mm(ctx, umem_odp->umem.owning_mm);
284 	if (IS_ERR(per_mm)) {
285 		mutex_unlock(&ctx->per_mm_list_lock);
286 		return PTR_ERR(per_mm);
287 	}
288 
289 found:
290 	umem_odp->per_mm = per_mm;
291 	per_mm->odp_mrs_count++;
292 	mutex_unlock(&ctx->per_mm_list_lock);
293 
294 	return 0;
295 }
296 
297 static void free_per_mm(struct rcu_head *rcu)
298 {
299 	kfree(container_of(rcu, struct ib_ucontext_per_mm, rcu));
300 }
301 
302 void put_per_mm(struct ib_umem_odp *umem_odp)
303 {
304 	struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
305 	struct ib_ucontext *ctx = umem_odp->umem.context;
306 	bool need_free;
307 
308 	mutex_lock(&ctx->per_mm_list_lock);
309 	umem_odp->per_mm = NULL;
310 	per_mm->odp_mrs_count--;
311 	need_free = per_mm->odp_mrs_count == 0;
312 	if (need_free)
313 		list_del(&per_mm->ucontext_list);
314 	mutex_unlock(&ctx->per_mm_list_lock);
315 
316 	if (!need_free)
317 		return;
318 
319 	/*
320 	 * NOTE! mmu_notifier_unregister() can happen between a start/end
321 	 * callback, resulting in an start/end, and thus an unbalanced
322 	 * lock. This doesn't really matter to us since we are about to kfree
323 	 * the memory that holds the lock, however LOCKDEP doesn't like this.
324 	 */
325 	down_write(&per_mm->umem_rwsem);
326 	per_mm->active = false;
327 	up_write(&per_mm->umem_rwsem);
328 
329 	WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree.rb_root));
330 	mmu_notifier_unregister_no_release(&per_mm->mn, per_mm->mm);
331 	put_pid(per_mm->tgid);
332 	mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm);
333 }
334 
335 struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
336 				      unsigned long addr, size_t size)
337 {
338 	struct ib_ucontext *ctx = per_mm->context;
339 	struct ib_umem_odp *odp_data;
340 	struct ib_umem *umem;
341 	int pages = size >> PAGE_SHIFT;
342 	int ret;
343 
344 	odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
345 	if (!odp_data)
346 		return ERR_PTR(-ENOMEM);
347 	umem = &odp_data->umem;
348 	umem->context    = ctx;
349 	umem->length     = size;
350 	umem->address    = addr;
351 	umem->page_shift = PAGE_SHIFT;
352 	umem->writable   = 1;
353 	umem->is_odp = 1;
354 	odp_data->per_mm = per_mm;
355 	umem->owning_mm  = per_mm->mm;
356 	mmgrab(umem->owning_mm);
357 
358 	mutex_init(&odp_data->umem_mutex);
359 	init_completion(&odp_data->notifier_completion);
360 
361 	odp_data->page_list =
362 		vzalloc(array_size(pages, sizeof(*odp_data->page_list)));
363 	if (!odp_data->page_list) {
364 		ret = -ENOMEM;
365 		goto out_odp_data;
366 	}
367 
368 	odp_data->dma_list =
369 		vzalloc(array_size(pages, sizeof(*odp_data->dma_list)));
370 	if (!odp_data->dma_list) {
371 		ret = -ENOMEM;
372 		goto out_page_list;
373 	}
374 
375 	/*
376 	 * Caller must ensure that the umem_odp that the per_mm came from
377 	 * cannot be freed during the call to ib_alloc_odp_umem.
378 	 */
379 	mutex_lock(&ctx->per_mm_list_lock);
380 	per_mm->odp_mrs_count++;
381 	mutex_unlock(&ctx->per_mm_list_lock);
382 	add_umem_to_per_mm(odp_data);
383 
384 	return odp_data;
385 
386 out_page_list:
387 	vfree(odp_data->page_list);
388 out_odp_data:
389 	mmdrop(umem->owning_mm);
390 	kfree(odp_data);
391 	return ERR_PTR(ret);
392 }
393 EXPORT_SYMBOL(ib_alloc_odp_umem);
394 
395 int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
396 {
397 	struct ib_umem *umem = &umem_odp->umem;
398 	/*
399 	 * NOTE: This must called in a process context where umem->owning_mm
400 	 * == current->mm
401 	 */
402 	struct mm_struct *mm = umem->owning_mm;
403 	int ret_val;
404 
405 	if (access & IB_ACCESS_HUGETLB) {
406 		struct vm_area_struct *vma;
407 		struct hstate *h;
408 
409 		down_read(&mm->mmap_sem);
410 		vma = find_vma(mm, ib_umem_start(umem));
411 		if (!vma || !is_vm_hugetlb_page(vma)) {
412 			up_read(&mm->mmap_sem);
413 			return -EINVAL;
414 		}
415 		h = hstate_vma(vma);
416 		umem->page_shift = huge_page_shift(h);
417 		up_read(&mm->mmap_sem);
418 		umem->hugetlb = 1;
419 	} else {
420 		umem->hugetlb = 0;
421 	}
422 
423 	mutex_init(&umem_odp->umem_mutex);
424 
425 	init_completion(&umem_odp->notifier_completion);
426 
427 	if (ib_umem_num_pages(umem)) {
428 		umem_odp->page_list =
429 			vzalloc(array_size(sizeof(*umem_odp->page_list),
430 					   ib_umem_num_pages(umem)));
431 		if (!umem_odp->page_list)
432 			return -ENOMEM;
433 
434 		umem_odp->dma_list =
435 			vzalloc(array_size(sizeof(*umem_odp->dma_list),
436 					   ib_umem_num_pages(umem)));
437 		if (!umem_odp->dma_list) {
438 			ret_val = -ENOMEM;
439 			goto out_page_list;
440 		}
441 	}
442 
443 	ret_val = get_per_mm(umem_odp);
444 	if (ret_val)
445 		goto out_dma_list;
446 	add_umem_to_per_mm(umem_odp);
447 
448 	return 0;
449 
450 out_dma_list:
451 	vfree(umem_odp->dma_list);
452 out_page_list:
453 	vfree(umem_odp->page_list);
454 	return ret_val;
455 }
456 
457 void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
458 {
459 	struct ib_umem *umem = &umem_odp->umem;
460 
461 	/*
462 	 * Ensure that no more pages are mapped in the umem.
463 	 *
464 	 * It is the driver's responsibility to ensure, before calling us,
465 	 * that the hardware will not attempt to access the MR any more.
466 	 */
467 	ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem),
468 				    ib_umem_end(umem));
469 
470 	remove_umem_from_per_mm(umem_odp);
471 	put_per_mm(umem_odp);
472 	vfree(umem_odp->dma_list);
473 	vfree(umem_odp->page_list);
474 }
475 
476 /*
477  * Map for DMA and insert a single page into the on-demand paging page tables.
478  *
479  * @umem: the umem to insert the page to.
480  * @page_index: index in the umem to add the page to.
481  * @page: the page struct to map and add.
482  * @access_mask: access permissions needed for this page.
483  * @current_seq: sequence number for synchronization with invalidations.
484  *               the sequence number is taken from
485  *               umem_odp->notifiers_seq.
486  *
487  * The function returns -EFAULT if the DMA mapping operation fails. It returns
488  * -EAGAIN if a concurrent invalidation prevents us from updating the page.
489  *
490  * The page is released via put_page even if the operation failed. For
491  * on-demand pinning, the page is released whenever it isn't stored in the
492  * umem.
493  */
494 static int ib_umem_odp_map_dma_single_page(
495 		struct ib_umem_odp *umem_odp,
496 		int page_index,
497 		struct page *page,
498 		u64 access_mask,
499 		unsigned long current_seq)
500 {
501 	struct ib_umem *umem = &umem_odp->umem;
502 	struct ib_device *dev = umem->context->device;
503 	dma_addr_t dma_addr;
504 	int stored_page = 0;
505 	int remove_existing_mapping = 0;
506 	int ret = 0;
507 
508 	/*
509 	 * Note: we avoid writing if seq is different from the initial seq, to
510 	 * handle case of a racing notifier. This check also allows us to bail
511 	 * early if we have a notifier running in parallel with us.
512 	 */
513 	if (ib_umem_mmu_notifier_retry(umem_odp, current_seq)) {
514 		ret = -EAGAIN;
515 		goto out;
516 	}
517 	if (!(umem_odp->dma_list[page_index])) {
518 		dma_addr = ib_dma_map_page(dev,
519 					   page,
520 					   0, BIT(umem->page_shift),
521 					   DMA_BIDIRECTIONAL);
522 		if (ib_dma_mapping_error(dev, dma_addr)) {
523 			ret = -EFAULT;
524 			goto out;
525 		}
526 		umem_odp->dma_list[page_index] = dma_addr | access_mask;
527 		umem_odp->page_list[page_index] = page;
528 		umem->npages++;
529 		stored_page = 1;
530 	} else if (umem_odp->page_list[page_index] == page) {
531 		umem_odp->dma_list[page_index] |= access_mask;
532 	} else {
533 		pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
534 		       umem_odp->page_list[page_index], page);
535 		/* Better remove the mapping now, to prevent any further
536 		 * damage. */
537 		remove_existing_mapping = 1;
538 	}
539 
540 out:
541 	/* On Demand Paging - avoid pinning the page */
542 	if (umem->context->invalidate_range || !stored_page)
543 		put_page(page);
544 
545 	if (remove_existing_mapping && umem->context->invalidate_range) {
546 		ib_umem_notifier_start_account(umem_odp);
547 		umem->context->invalidate_range(
548 			umem_odp,
549 			ib_umem_start(umem) + (page_index << umem->page_shift),
550 			ib_umem_start(umem) +
551 				((page_index + 1) << umem->page_shift));
552 		ib_umem_notifier_end_account(umem_odp);
553 		ret = -EAGAIN;
554 	}
555 
556 	return ret;
557 }
558 
559 /**
560  * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
561  *
562  * Pins the range of pages passed in the argument, and maps them to
563  * DMA addresses. The DMA addresses of the mapped pages is updated in
564  * umem_odp->dma_list.
565  *
566  * Returns the number of pages mapped in success, negative error code
567  * for failure.
568  * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
569  * the function from completing its task.
570  * An -ENOENT error code indicates that userspace process is being terminated
571  * and mm was already destroyed.
572  * @umem_odp: the umem to map and pin
573  * @user_virt: the address from which we need to map.
574  * @bcnt: the minimal number of bytes to pin and map. The mapping might be
575  *        bigger due to alignment, and may also be smaller in case of an error
576  *        pinning or mapping a page. The actual pages mapped is returned in
577  *        the return value.
578  * @access_mask: bit mask of the requested access permissions for the given
579  *               range.
580  * @current_seq: the MMU notifiers sequance value for synchronization with
581  *               invalidations. the sequance number is read from
582  *               umem_odp->notifiers_seq before calling this function
583  */
584 int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
585 			      u64 bcnt, u64 access_mask,
586 			      unsigned long current_seq)
587 {
588 	struct ib_umem *umem = &umem_odp->umem;
589 	struct task_struct *owning_process  = NULL;
590 	struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
591 	struct page       **local_page_list = NULL;
592 	u64 page_mask, off;
593 	int j, k, ret = 0, start_idx, npages = 0, page_shift;
594 	unsigned int flags = 0;
595 	phys_addr_t p = 0;
596 
597 	if (access_mask == 0)
598 		return -EINVAL;
599 
600 	if (user_virt < ib_umem_start(umem) ||
601 	    user_virt + bcnt > ib_umem_end(umem))
602 		return -EFAULT;
603 
604 	local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
605 	if (!local_page_list)
606 		return -ENOMEM;
607 
608 	page_shift = umem->page_shift;
609 	page_mask = ~(BIT(page_shift) - 1);
610 	off = user_virt & (~page_mask);
611 	user_virt = user_virt & page_mask;
612 	bcnt += off; /* Charge for the first page offset as well. */
613 
614 	/*
615 	 * owning_process is allowed to be NULL, this means somehow the mm is
616 	 * existing beyond the lifetime of the originating process.. Presumably
617 	 * mmget_not_zero will fail in this case.
618 	 */
619 	owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID);
620 	if (WARN_ON(!mmget_not_zero(umem_odp->umem.owning_mm))) {
621 		ret = -EINVAL;
622 		goto out_put_task;
623 	}
624 
625 	if (access_mask & ODP_WRITE_ALLOWED_BIT)
626 		flags |= FOLL_WRITE;
627 
628 	start_idx = (user_virt - ib_umem_start(umem)) >> page_shift;
629 	k = start_idx;
630 
631 	while (bcnt > 0) {
632 		const size_t gup_num_pages = min_t(size_t,
633 				(bcnt + BIT(page_shift) - 1) >> page_shift,
634 				PAGE_SIZE / sizeof(struct page *));
635 
636 		down_read(&owning_mm->mmap_sem);
637 		/*
638 		 * Note: this might result in redundent page getting. We can
639 		 * avoid this by checking dma_list to be 0 before calling
640 		 * get_user_pages. However, this make the code much more
641 		 * complex (and doesn't gain us much performance in most use
642 		 * cases).
643 		 */
644 		npages = get_user_pages_remote(owning_process, owning_mm,
645 				user_virt, gup_num_pages,
646 				flags, local_page_list, NULL, NULL);
647 		up_read(&owning_mm->mmap_sem);
648 
649 		if (npages < 0) {
650 			if (npages != -EAGAIN)
651 				pr_warn("fail to get %zu user pages with error %d\n", gup_num_pages, npages);
652 			else
653 				pr_debug("fail to get %zu user pages with error %d\n", gup_num_pages, npages);
654 			break;
655 		}
656 
657 		bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
658 		mutex_lock(&umem_odp->umem_mutex);
659 		for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
660 			if (user_virt & ~page_mask) {
661 				p += PAGE_SIZE;
662 				if (page_to_phys(local_page_list[j]) != p) {
663 					ret = -EFAULT;
664 					break;
665 				}
666 				put_page(local_page_list[j]);
667 				continue;
668 			}
669 
670 			ret = ib_umem_odp_map_dma_single_page(
671 					umem_odp, k, local_page_list[j],
672 					access_mask, current_seq);
673 			if (ret < 0) {
674 				if (ret != -EAGAIN)
675 					pr_warn("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
676 				else
677 					pr_debug("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
678 				break;
679 			}
680 
681 			p = page_to_phys(local_page_list[j]);
682 			k++;
683 		}
684 		mutex_unlock(&umem_odp->umem_mutex);
685 
686 		if (ret < 0) {
687 			/* Release left over pages when handling errors. */
688 			for (++j; j < npages; ++j)
689 				put_page(local_page_list[j]);
690 			break;
691 		}
692 	}
693 
694 	if (ret >= 0) {
695 		if (npages < 0 && k == start_idx)
696 			ret = npages;
697 		else
698 			ret = k - start_idx;
699 	}
700 
701 	mmput(owning_mm);
702 out_put_task:
703 	if (owning_process)
704 		put_task_struct(owning_process);
705 	free_page((unsigned long)local_page_list);
706 	return ret;
707 }
708 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
709 
710 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
711 				 u64 bound)
712 {
713 	struct ib_umem *umem = &umem_odp->umem;
714 	int idx;
715 	u64 addr;
716 	struct ib_device *dev = umem->context->device;
717 
718 	virt  = max_t(u64, virt,  ib_umem_start(umem));
719 	bound = min_t(u64, bound, ib_umem_end(umem));
720 	/* Note that during the run of this function, the
721 	 * notifiers_count of the MR is > 0, preventing any racing
722 	 * faults from completion. We might be racing with other
723 	 * invalidations, so we must make sure we free each page only
724 	 * once. */
725 	mutex_lock(&umem_odp->umem_mutex);
726 	for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
727 		idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
728 		if (umem_odp->page_list[idx]) {
729 			struct page *page = umem_odp->page_list[idx];
730 			dma_addr_t dma = umem_odp->dma_list[idx];
731 			dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
732 
733 			WARN_ON(!dma_addr);
734 
735 			ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
736 					  DMA_BIDIRECTIONAL);
737 			if (dma & ODP_WRITE_ALLOWED_BIT) {
738 				struct page *head_page = compound_head(page);
739 				/*
740 				 * set_page_dirty prefers being called with
741 				 * the page lock. However, MMU notifiers are
742 				 * called sometimes with and sometimes without
743 				 * the lock. We rely on the umem_mutex instead
744 				 * to prevent other mmu notifiers from
745 				 * continuing and allowing the page mapping to
746 				 * be removed.
747 				 */
748 				set_page_dirty(head_page);
749 			}
750 			/* on demand pinning support */
751 			if (!umem->context->invalidate_range)
752 				put_page(page);
753 			umem_odp->page_list[idx] = NULL;
754 			umem_odp->dma_list[idx] = 0;
755 			umem->npages--;
756 		}
757 	}
758 	mutex_unlock(&umem_odp->umem_mutex);
759 }
760 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
761 
762 /* @last is not a part of the interval. See comment for function
763  * node_last.
764  */
765 int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
766 				  u64 start, u64 last,
767 				  umem_call_back cb,
768 				  bool blockable,
769 				  void *cookie)
770 {
771 	int ret_val = 0;
772 	struct umem_odp_node *node, *next;
773 	struct ib_umem_odp *umem;
774 
775 	if (unlikely(start == last))
776 		return ret_val;
777 
778 	for (node = rbt_ib_umem_iter_first(root, start, last - 1);
779 			node; node = next) {
780 		/* TODO move the blockable decision up to the callback */
781 		if (!blockable)
782 			return -EAGAIN;
783 		next = rbt_ib_umem_iter_next(node, start, last - 1);
784 		umem = container_of(node, struct ib_umem_odp, interval_tree);
785 		ret_val = cb(umem, start, last, cookie) || ret_val;
786 	}
787 
788 	return ret_val;
789 }
790 EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
791 
792 struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
793 				       u64 addr, u64 length)
794 {
795 	struct umem_odp_node *node;
796 
797 	node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
798 	if (node)
799 		return container_of(node, struct ib_umem_odp, interval_tree);
800 	return NULL;
801 
802 }
803 EXPORT_SYMBOL(rbt_ib_umem_lookup);
804