xref: /linux/drivers/infiniband/hw/hfi1/user_exp_rcv.c (revision bd4af432cc71b5fbfe4833510359a6ad3ada250d)
1 /*
2  * Copyright(c) 2015-2018 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 #include <asm/page.h>
48 #include <linux/string.h>
49 
50 #include "mmu_rb.h"
51 #include "user_exp_rcv.h"
52 #include "trace.h"
53 
54 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
55 			    struct exp_tid_set *set,
56 			    struct hfi1_filedata *fd);
57 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
58 static int set_rcvarray_entry(struct hfi1_filedata *fd,
59 			      struct tid_user_buf *tbuf,
60 			      u32 rcventry, struct tid_group *grp,
61 			      u16 pageidx, unsigned int npages);
62 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
63 				    struct tid_rb_node *tnode);
64 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
65 			      const struct mmu_notifier_range *range,
66 			      unsigned long cur_seq);
67 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
68 			    struct tid_group *grp,
69 			    unsigned int start, u16 count,
70 			    u32 *tidlist, unsigned int *tididx,
71 			    unsigned int *pmapped);
72 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
73 			      struct tid_group **grp);
74 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
75 
76 static const struct mmu_interval_notifier_ops tid_mn_ops = {
77 	.invalidate = tid_rb_invalidate,
78 };
79 
80 /*
81  * Initialize context and file private data needed for Expected
82  * receive caching. This needs to be done after the context has
83  * been configured with the eager/expected RcvEntry counts.
84  */
85 int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
86 			   struct hfi1_ctxtdata *uctxt)
87 {
88 	int ret = 0;
89 
90 	fd->entry_to_rb = kcalloc(uctxt->expected_count,
91 				  sizeof(struct rb_node *),
92 				  GFP_KERNEL);
93 	if (!fd->entry_to_rb)
94 		return -ENOMEM;
95 
96 	if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
97 		fd->invalid_tid_idx = 0;
98 		fd->invalid_tids = kcalloc(uctxt->expected_count,
99 					   sizeof(*fd->invalid_tids),
100 					   GFP_KERNEL);
101 		if (!fd->invalid_tids) {
102 			kfree(fd->entry_to_rb);
103 			fd->entry_to_rb = NULL;
104 			return -ENOMEM;
105 		}
106 		fd->use_mn = true;
107 	}
108 
109 	/*
110 	 * PSM does not have a good way to separate, count, and
111 	 * effectively enforce a limit on RcvArray entries used by
112 	 * subctxts (when context sharing is used) when TID caching
113 	 * is enabled. To help with that, we calculate a per-process
114 	 * RcvArray entry share and enforce that.
115 	 * If TID caching is not in use, PSM deals with usage on its
116 	 * own. In that case, we allow any subctxt to take all of the
117 	 * entries.
118 	 *
119 	 * Make sure that we set the tid counts only after successful
120 	 * init.
121 	 */
122 	spin_lock(&fd->tid_lock);
123 	if (uctxt->subctxt_cnt && fd->use_mn) {
124 		u16 remainder;
125 
126 		fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
127 		remainder = uctxt->expected_count % uctxt->subctxt_cnt;
128 		if (remainder && fd->subctxt < remainder)
129 			fd->tid_limit++;
130 	} else {
131 		fd->tid_limit = uctxt->expected_count;
132 	}
133 	spin_unlock(&fd->tid_lock);
134 
135 	return ret;
136 }
137 
138 void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
139 {
140 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
141 
142 	mutex_lock(&uctxt->exp_mutex);
143 	if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
144 		unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
145 	if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
146 		unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
147 	mutex_unlock(&uctxt->exp_mutex);
148 
149 	kfree(fd->invalid_tids);
150 	fd->invalid_tids = NULL;
151 
152 	kfree(fd->entry_to_rb);
153 	fd->entry_to_rb = NULL;
154 }
155 
156 /**
157  * Release pinned receive buffer pages.
158  *
159  * @mapped - true if the pages have been DMA mapped. false otherwise.
160  * @idx - Index of the first page to unpin.
161  * @npages - No of pages to unpin.
162  *
163  * If the pages have been DMA mapped (indicated by mapped parameter), their
164  * info will be passed via a struct tid_rb_node. If they haven't been mapped,
165  * their info will be passed via a struct tid_user_buf.
166  */
167 static void unpin_rcv_pages(struct hfi1_filedata *fd,
168 			    struct tid_user_buf *tidbuf,
169 			    struct tid_rb_node *node,
170 			    unsigned int idx,
171 			    unsigned int npages,
172 			    bool mapped)
173 {
174 	struct page **pages;
175 	struct hfi1_devdata *dd = fd->uctxt->dd;
176 
177 	if (mapped) {
178 		pci_unmap_single(dd->pcidev, node->dma_addr,
179 				 node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
180 		pages = &node->pages[idx];
181 	} else {
182 		pages = &tidbuf->pages[idx];
183 	}
184 	hfi1_release_user_pages(fd->mm, pages, npages, mapped);
185 	fd->tid_n_pinned -= npages;
186 }
187 
188 /**
189  * Pin receive buffer pages.
190  */
191 static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
192 {
193 	int pinned;
194 	unsigned int npages;
195 	unsigned long vaddr = tidbuf->vaddr;
196 	struct page **pages = NULL;
197 	struct hfi1_devdata *dd = fd->uctxt->dd;
198 
199 	/* Get the number of pages the user buffer spans */
200 	npages = num_user_pages(vaddr, tidbuf->length);
201 	if (!npages)
202 		return -EINVAL;
203 
204 	if (npages > fd->uctxt->expected_count) {
205 		dd_dev_err(dd, "Expected buffer too big\n");
206 		return -EINVAL;
207 	}
208 
209 	/* Verify that access is OK for the user buffer */
210 	if (!access_ok((void __user *)vaddr,
211 		       npages * PAGE_SIZE)) {
212 		dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
213 			   (void *)vaddr, npages);
214 		return -EFAULT;
215 	}
216 	/* Allocate the array of struct page pointers needed for pinning */
217 	pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
218 	if (!pages)
219 		return -ENOMEM;
220 
221 	/*
222 	 * Pin all the pages of the user buffer. If we can't pin all the
223 	 * pages, accept the amount pinned so far and program only that.
224 	 * User space knows how to deal with partially programmed buffers.
225 	 */
226 	if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
227 		kfree(pages);
228 		return -ENOMEM;
229 	}
230 
231 	pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
232 	if (pinned <= 0) {
233 		kfree(pages);
234 		return pinned;
235 	}
236 	tidbuf->pages = pages;
237 	tidbuf->npages = npages;
238 	fd->tid_n_pinned += pinned;
239 	return pinned;
240 }
241 
242 /*
243  * RcvArray entry allocation for Expected Receives is done by the
244  * following algorithm:
245  *
246  * The context keeps 3 lists of groups of RcvArray entries:
247  *   1. List of empty groups - tid_group_list
248  *      This list is created during user context creation and
249  *      contains elements which describe sets (of 8) of empty
250  *      RcvArray entries.
251  *   2. List of partially used groups - tid_used_list
252  *      This list contains sets of RcvArray entries which are
253  *      not completely used up. Another mapping request could
254  *      use some of all of the remaining entries.
255  *   3. List of full groups - tid_full_list
256  *      This is the list where sets that are completely used
257  *      up go.
258  *
259  * An attempt to optimize the usage of RcvArray entries is
260  * made by finding all sets of physically contiguous pages in a
261  * user's buffer.
262  * These physically contiguous sets are further split into
263  * sizes supported by the receive engine of the HFI. The
264  * resulting sets of pages are stored in struct tid_pageset,
265  * which describes the sets as:
266  *    * .count - number of pages in this set
267  *    * .idx - starting index into struct page ** array
268  *                    of this set
269  *
270  * From this point on, the algorithm deals with the page sets
271  * described above. The number of pagesets is divided by the
272  * RcvArray group size to produce the number of full groups
273  * needed.
274  *
275  * Groups from the 3 lists are manipulated using the following
276  * rules:
277  *   1. For each set of 8 pagesets, a complete group from
278  *      tid_group_list is taken, programmed, and moved to
279  *      the tid_full_list list.
280  *   2. For all remaining pagesets:
281  *      2.1 If the tid_used_list is empty and the tid_group_list
282  *          is empty, stop processing pageset and return only
283  *          what has been programmed up to this point.
284  *      2.2 If the tid_used_list is empty and the tid_group_list
285  *          is not empty, move a group from tid_group_list to
286  *          tid_used_list.
287  *      2.3 For each group is tid_used_group, program as much as
288  *          can fit into the group. If the group becomes fully
289  *          used, move it to tid_full_list.
290  */
291 int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
292 			    struct hfi1_tid_info *tinfo)
293 {
294 	int ret = 0, need_group = 0, pinned;
295 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
296 	struct hfi1_devdata *dd = uctxt->dd;
297 	unsigned int ngroups, pageidx = 0, pageset_count,
298 		tididx = 0, mapped, mapped_pages = 0;
299 	u32 *tidlist = NULL;
300 	struct tid_user_buf *tidbuf;
301 
302 	if (!PAGE_ALIGNED(tinfo->vaddr))
303 		return -EINVAL;
304 
305 	tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
306 	if (!tidbuf)
307 		return -ENOMEM;
308 
309 	tidbuf->vaddr = tinfo->vaddr;
310 	tidbuf->length = tinfo->length;
311 	tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
312 				GFP_KERNEL);
313 	if (!tidbuf->psets) {
314 		kfree(tidbuf);
315 		return -ENOMEM;
316 	}
317 
318 	pinned = pin_rcv_pages(fd, tidbuf);
319 	if (pinned <= 0) {
320 		kfree(tidbuf->psets);
321 		kfree(tidbuf);
322 		return pinned;
323 	}
324 
325 	/* Find sets of physically contiguous pages */
326 	tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
327 
328 	/*
329 	 * We don't need to access this under a lock since tid_used is per
330 	 * process and the same process cannot be in hfi1_user_exp_rcv_clear()
331 	 * and hfi1_user_exp_rcv_setup() at the same time.
332 	 */
333 	spin_lock(&fd->tid_lock);
334 	if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
335 		pageset_count = fd->tid_limit - fd->tid_used;
336 	else
337 		pageset_count = tidbuf->n_psets;
338 	spin_unlock(&fd->tid_lock);
339 
340 	if (!pageset_count)
341 		goto bail;
342 
343 	ngroups = pageset_count / dd->rcv_entries.group_size;
344 	tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
345 	if (!tidlist) {
346 		ret = -ENOMEM;
347 		goto nomem;
348 	}
349 
350 	tididx = 0;
351 
352 	/*
353 	 * From this point on, we are going to be using shared (between master
354 	 * and subcontexts) context resources. We need to take the lock.
355 	 */
356 	mutex_lock(&uctxt->exp_mutex);
357 	/*
358 	 * The first step is to program the RcvArray entries which are complete
359 	 * groups.
360 	 */
361 	while (ngroups && uctxt->tid_group_list.count) {
362 		struct tid_group *grp =
363 			tid_group_pop(&uctxt->tid_group_list);
364 
365 		ret = program_rcvarray(fd, tidbuf, grp,
366 				       pageidx, dd->rcv_entries.group_size,
367 				       tidlist, &tididx, &mapped);
368 		/*
369 		 * If there was a failure to program the RcvArray
370 		 * entries for the entire group, reset the grp fields
371 		 * and add the grp back to the free group list.
372 		 */
373 		if (ret <= 0) {
374 			tid_group_add_tail(grp, &uctxt->tid_group_list);
375 			hfi1_cdbg(TID,
376 				  "Failed to program RcvArray group %d", ret);
377 			goto unlock;
378 		}
379 
380 		tid_group_add_tail(grp, &uctxt->tid_full_list);
381 		ngroups--;
382 		pageidx += ret;
383 		mapped_pages += mapped;
384 	}
385 
386 	while (pageidx < pageset_count) {
387 		struct tid_group *grp, *ptr;
388 		/*
389 		 * If we don't have any partially used tid groups, check
390 		 * if we have empty groups. If so, take one from there and
391 		 * put in the partially used list.
392 		 */
393 		if (!uctxt->tid_used_list.count || need_group) {
394 			if (!uctxt->tid_group_list.count)
395 				goto unlock;
396 
397 			grp = tid_group_pop(&uctxt->tid_group_list);
398 			tid_group_add_tail(grp, &uctxt->tid_used_list);
399 			need_group = 0;
400 		}
401 		/*
402 		 * There is an optimization opportunity here - instead of
403 		 * fitting as many page sets as we can, check for a group
404 		 * later on in the list that could fit all of them.
405 		 */
406 		list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
407 					 list) {
408 			unsigned use = min_t(unsigned, pageset_count - pageidx,
409 					     grp->size - grp->used);
410 
411 			ret = program_rcvarray(fd, tidbuf, grp,
412 					       pageidx, use, tidlist,
413 					       &tididx, &mapped);
414 			if (ret < 0) {
415 				hfi1_cdbg(TID,
416 					  "Failed to program RcvArray entries %d",
417 					  ret);
418 				goto unlock;
419 			} else if (ret > 0) {
420 				if (grp->used == grp->size)
421 					tid_group_move(grp,
422 						       &uctxt->tid_used_list,
423 						       &uctxt->tid_full_list);
424 				pageidx += ret;
425 				mapped_pages += mapped;
426 				need_group = 0;
427 				/* Check if we are done so we break out early */
428 				if (pageidx >= pageset_count)
429 					break;
430 			} else if (WARN_ON(ret == 0)) {
431 				/*
432 				 * If ret is 0, we did not program any entries
433 				 * into this group, which can only happen if
434 				 * we've screwed up the accounting somewhere.
435 				 * Warn and try to continue.
436 				 */
437 				need_group = 1;
438 			}
439 		}
440 	}
441 unlock:
442 	mutex_unlock(&uctxt->exp_mutex);
443 nomem:
444 	hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
445 		  mapped_pages, ret);
446 	if (tididx) {
447 		spin_lock(&fd->tid_lock);
448 		fd->tid_used += tididx;
449 		spin_unlock(&fd->tid_lock);
450 		tinfo->tidcnt = tididx;
451 		tinfo->length = mapped_pages * PAGE_SIZE;
452 
453 		if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
454 				 tidlist, sizeof(tidlist[0]) * tididx)) {
455 			/*
456 			 * On failure to copy to the user level, we need to undo
457 			 * everything done so far so we don't leak resources.
458 			 */
459 			tinfo->tidlist = (unsigned long)&tidlist;
460 			hfi1_user_exp_rcv_clear(fd, tinfo);
461 			tinfo->tidlist = 0;
462 			ret = -EFAULT;
463 			goto bail;
464 		}
465 	}
466 
467 	/*
468 	 * If not everything was mapped (due to insufficient RcvArray entries,
469 	 * for example), unpin all unmapped pages so we can pin them nex time.
470 	 */
471 	if (mapped_pages != pinned)
472 		unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
473 				(pinned - mapped_pages), false);
474 bail:
475 	kfree(tidbuf->psets);
476 	kfree(tidlist);
477 	kfree(tidbuf->pages);
478 	kfree(tidbuf);
479 	return ret > 0 ? 0 : ret;
480 }
481 
482 int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
483 			    struct hfi1_tid_info *tinfo)
484 {
485 	int ret = 0;
486 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
487 	u32 *tidinfo;
488 	unsigned tididx;
489 
490 	if (unlikely(tinfo->tidcnt > fd->tid_used))
491 		return -EINVAL;
492 
493 	tidinfo = memdup_user(u64_to_user_ptr(tinfo->tidlist),
494 			      sizeof(tidinfo[0]) * tinfo->tidcnt);
495 	if (IS_ERR(tidinfo))
496 		return PTR_ERR(tidinfo);
497 
498 	mutex_lock(&uctxt->exp_mutex);
499 	for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
500 		ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
501 		if (ret) {
502 			hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
503 				  ret);
504 			break;
505 		}
506 	}
507 	spin_lock(&fd->tid_lock);
508 	fd->tid_used -= tididx;
509 	spin_unlock(&fd->tid_lock);
510 	tinfo->tidcnt = tididx;
511 	mutex_unlock(&uctxt->exp_mutex);
512 
513 	kfree(tidinfo);
514 	return ret;
515 }
516 
517 int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
518 			      struct hfi1_tid_info *tinfo)
519 {
520 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
521 	unsigned long *ev = uctxt->dd->events +
522 		(uctxt_offset(uctxt) + fd->subctxt);
523 	u32 *array;
524 	int ret = 0;
525 
526 	/*
527 	 * copy_to_user() can sleep, which will leave the invalid_lock
528 	 * locked and cause the MMU notifier to be blocked on the lock
529 	 * for a long time.
530 	 * Copy the data to a local buffer so we can release the lock.
531 	 */
532 	array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
533 	if (!array)
534 		return -EFAULT;
535 
536 	spin_lock(&fd->invalid_lock);
537 	if (fd->invalid_tid_idx) {
538 		memcpy(array, fd->invalid_tids, sizeof(*array) *
539 		       fd->invalid_tid_idx);
540 		memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
541 		       fd->invalid_tid_idx);
542 		tinfo->tidcnt = fd->invalid_tid_idx;
543 		fd->invalid_tid_idx = 0;
544 		/*
545 		 * Reset the user flag while still holding the lock.
546 		 * Otherwise, PSM can miss events.
547 		 */
548 		clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
549 	} else {
550 		tinfo->tidcnt = 0;
551 	}
552 	spin_unlock(&fd->invalid_lock);
553 
554 	if (tinfo->tidcnt) {
555 		if (copy_to_user((void __user *)tinfo->tidlist,
556 				 array, sizeof(*array) * tinfo->tidcnt))
557 			ret = -EFAULT;
558 	}
559 	kfree(array);
560 
561 	return ret;
562 }
563 
564 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
565 {
566 	unsigned pagecount, pageidx, setcount = 0, i;
567 	unsigned long pfn, this_pfn;
568 	struct page **pages = tidbuf->pages;
569 	struct tid_pageset *list = tidbuf->psets;
570 
571 	if (!npages)
572 		return 0;
573 
574 	/*
575 	 * Look for sets of physically contiguous pages in the user buffer.
576 	 * This will allow us to optimize Expected RcvArray entry usage by
577 	 * using the bigger supported sizes.
578 	 */
579 	pfn = page_to_pfn(pages[0]);
580 	for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
581 		this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
582 
583 		/*
584 		 * If the pfn's are not sequential, pages are not physically
585 		 * contiguous.
586 		 */
587 		if (this_pfn != ++pfn) {
588 			/*
589 			 * At this point we have to loop over the set of
590 			 * physically contiguous pages and break them down it
591 			 * sizes supported by the HW.
592 			 * There are two main constraints:
593 			 *     1. The max buffer size is MAX_EXPECTED_BUFFER.
594 			 *        If the total set size is bigger than that
595 			 *        program only a MAX_EXPECTED_BUFFER chunk.
596 			 *     2. The buffer size has to be a power of two. If
597 			 *        it is not, round down to the closes power of
598 			 *        2 and program that size.
599 			 */
600 			while (pagecount) {
601 				int maxpages = pagecount;
602 				u32 bufsize = pagecount * PAGE_SIZE;
603 
604 				if (bufsize > MAX_EXPECTED_BUFFER)
605 					maxpages =
606 						MAX_EXPECTED_BUFFER >>
607 						PAGE_SHIFT;
608 				else if (!is_power_of_2(bufsize))
609 					maxpages =
610 						rounddown_pow_of_two(bufsize) >>
611 						PAGE_SHIFT;
612 
613 				list[setcount].idx = pageidx;
614 				list[setcount].count = maxpages;
615 				pagecount -= maxpages;
616 				pageidx += maxpages;
617 				setcount++;
618 			}
619 			pageidx = i;
620 			pagecount = 1;
621 			pfn = this_pfn;
622 		} else {
623 			pagecount++;
624 		}
625 	}
626 	return setcount;
627 }
628 
629 /**
630  * program_rcvarray() - program an RcvArray group with receive buffers
631  * @fd: filedata pointer
632  * @tbuf: pointer to struct tid_user_buf that has the user buffer starting
633  *	  virtual address, buffer length, page pointers, pagesets (array of
634  *	  struct tid_pageset holding information on physically contiguous
635  *	  chunks from the user buffer), and other fields.
636  * @grp: RcvArray group
637  * @start: starting index into sets array
638  * @count: number of struct tid_pageset's to program
639  * @tidlist: the array of u32 elements when the information about the
640  *           programmed RcvArray entries is to be encoded.
641  * @tididx: starting offset into tidlist
642  * @pmapped: (output parameter) number of pages programmed into the RcvArray
643  *           entries.
644  *
645  * This function will program up to 'count' number of RcvArray entries from the
646  * group 'grp'. To make best use of write-combining writes, the function will
647  * perform writes to the unused RcvArray entries which will be ignored by the
648  * HW. Each RcvArray entry will be programmed with a physically contiguous
649  * buffer chunk from the user's virtual buffer.
650  *
651  * Return:
652  * -EINVAL if the requested count is larger than the size of the group,
653  * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
654  * number of RcvArray entries programmed.
655  */
656 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
657 			    struct tid_group *grp,
658 			    unsigned int start, u16 count,
659 			    u32 *tidlist, unsigned int *tididx,
660 			    unsigned int *pmapped)
661 {
662 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
663 	struct hfi1_devdata *dd = uctxt->dd;
664 	u16 idx;
665 	u32 tidinfo = 0, rcventry, useidx = 0;
666 	int mapped = 0;
667 
668 	/* Count should never be larger than the group size */
669 	if (count > grp->size)
670 		return -EINVAL;
671 
672 	/* Find the first unused entry in the group */
673 	for (idx = 0; idx < grp->size; idx++) {
674 		if (!(grp->map & (1 << idx))) {
675 			useidx = idx;
676 			break;
677 		}
678 		rcv_array_wc_fill(dd, grp->base + idx);
679 	}
680 
681 	idx = 0;
682 	while (idx < count) {
683 		u16 npages, pageidx, setidx = start + idx;
684 		int ret = 0;
685 
686 		/*
687 		 * If this entry in the group is used, move to the next one.
688 		 * If we go past the end of the group, exit the loop.
689 		 */
690 		if (useidx >= grp->size) {
691 			break;
692 		} else if (grp->map & (1 << useidx)) {
693 			rcv_array_wc_fill(dd, grp->base + useidx);
694 			useidx++;
695 			continue;
696 		}
697 
698 		rcventry = grp->base + useidx;
699 		npages = tbuf->psets[setidx].count;
700 		pageidx = tbuf->psets[setidx].idx;
701 
702 		ret = set_rcvarray_entry(fd, tbuf,
703 					 rcventry, grp, pageidx,
704 					 npages);
705 		if (ret)
706 			return ret;
707 		mapped += npages;
708 
709 		tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
710 			EXP_TID_SET(LEN, npages);
711 		tidlist[(*tididx)++] = tidinfo;
712 		grp->used++;
713 		grp->map |= 1 << useidx++;
714 		idx++;
715 	}
716 
717 	/* Fill the rest of the group with "blank" writes */
718 	for (; useidx < grp->size; useidx++)
719 		rcv_array_wc_fill(dd, grp->base + useidx);
720 	*pmapped = mapped;
721 	return idx;
722 }
723 
724 static int set_rcvarray_entry(struct hfi1_filedata *fd,
725 			      struct tid_user_buf *tbuf,
726 			      u32 rcventry, struct tid_group *grp,
727 			      u16 pageidx, unsigned int npages)
728 {
729 	int ret;
730 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
731 	struct tid_rb_node *node;
732 	struct hfi1_devdata *dd = uctxt->dd;
733 	dma_addr_t phys;
734 	struct page **pages = tbuf->pages + pageidx;
735 
736 	/*
737 	 * Allocate the node first so we can handle a potential
738 	 * failure before we've programmed anything.
739 	 */
740 	node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
741 		       GFP_KERNEL);
742 	if (!node)
743 		return -ENOMEM;
744 
745 	phys = pci_map_single(dd->pcidev,
746 			      __va(page_to_phys(pages[0])),
747 			      npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
748 	if (dma_mapping_error(&dd->pcidev->dev, phys)) {
749 		dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
750 			   phys);
751 		kfree(node);
752 		return -EFAULT;
753 	}
754 
755 	node->fdata = fd;
756 	node->phys = page_to_phys(pages[0]);
757 	node->npages = npages;
758 	node->rcventry = rcventry;
759 	node->dma_addr = phys;
760 	node->grp = grp;
761 	node->freed = false;
762 	memcpy(node->pages, pages, sizeof(struct page *) * npages);
763 
764 	if (fd->use_mn) {
765 		ret = mmu_interval_notifier_insert(
766 			&node->notifier, fd->mm,
767 			tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
768 			&tid_mn_ops);
769 		if (ret)
770 			goto out_unmap;
771 		/*
772 		 * FIXME: This is in the wrong order, the notifier should be
773 		 * established before the pages are pinned by pin_rcv_pages.
774 		 */
775 		mmu_interval_read_begin(&node->notifier);
776 	}
777 	fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
778 
779 	hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
780 	trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
781 			       node->notifier.interval_tree.start, node->phys,
782 			       phys);
783 	return 0;
784 
785 out_unmap:
786 	hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
787 		  node->rcventry, node->notifier.interval_tree.start,
788 		  node->phys, ret);
789 	pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
790 			 PCI_DMA_FROMDEVICE);
791 	kfree(node);
792 	return -EFAULT;
793 }
794 
795 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
796 			      struct tid_group **grp)
797 {
798 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
799 	struct hfi1_devdata *dd = uctxt->dd;
800 	struct tid_rb_node *node;
801 	u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
802 	u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
803 
804 	if (tididx >= uctxt->expected_count) {
805 		dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
806 			   tididx, uctxt->ctxt);
807 		return -EINVAL;
808 	}
809 
810 	if (tidctrl == 0x3)
811 		return -EINVAL;
812 
813 	rcventry = tididx + (tidctrl - 1);
814 
815 	node = fd->entry_to_rb[rcventry];
816 	if (!node || node->rcventry != (uctxt->expected_base + rcventry))
817 		return -EBADF;
818 
819 	if (grp)
820 		*grp = node->grp;
821 
822 	if (fd->use_mn)
823 		mmu_interval_notifier_remove(&node->notifier);
824 	cacheless_tid_rb_remove(fd, node);
825 
826 	return 0;
827 }
828 
829 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
830 {
831 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
832 	struct hfi1_devdata *dd = uctxt->dd;
833 
834 	trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
835 				 node->npages,
836 				 node->notifier.interval_tree.start, node->phys,
837 				 node->dma_addr);
838 
839 	/*
840 	 * Make sure device has seen the write before we unpin the
841 	 * pages.
842 	 */
843 	hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
844 
845 	unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
846 
847 	node->grp->used--;
848 	node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
849 
850 	if (node->grp->used == node->grp->size - 1)
851 		tid_group_move(node->grp, &uctxt->tid_full_list,
852 			       &uctxt->tid_used_list);
853 	else if (!node->grp->used)
854 		tid_group_move(node->grp, &uctxt->tid_used_list,
855 			       &uctxt->tid_group_list);
856 	kfree(node);
857 }
858 
859 /*
860  * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
861  * clearing nodes in the non-cached case.
862  */
863 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
864 			    struct exp_tid_set *set,
865 			    struct hfi1_filedata *fd)
866 {
867 	struct tid_group *grp, *ptr;
868 	int i;
869 
870 	list_for_each_entry_safe(grp, ptr, &set->list, list) {
871 		list_del_init(&grp->list);
872 
873 		for (i = 0; i < grp->size; i++) {
874 			if (grp->map & (1 << i)) {
875 				u16 rcventry = grp->base + i;
876 				struct tid_rb_node *node;
877 
878 				node = fd->entry_to_rb[rcventry -
879 							  uctxt->expected_base];
880 				if (!node || node->rcventry != rcventry)
881 					continue;
882 
883 				if (fd->use_mn)
884 					mmu_interval_notifier_remove(
885 						&node->notifier);
886 				cacheless_tid_rb_remove(fd, node);
887 			}
888 		}
889 	}
890 }
891 
892 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
893 			      const struct mmu_notifier_range *range,
894 			      unsigned long cur_seq)
895 {
896 	struct tid_rb_node *node =
897 		container_of(mni, struct tid_rb_node, notifier);
898 	struct hfi1_filedata *fdata = node->fdata;
899 	struct hfi1_ctxtdata *uctxt = fdata->uctxt;
900 
901 	if (node->freed)
902 		return true;
903 
904 	trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
905 				 node->notifier.interval_tree.start,
906 				 node->rcventry, node->npages, node->dma_addr);
907 	node->freed = true;
908 
909 	spin_lock(&fdata->invalid_lock);
910 	if (fdata->invalid_tid_idx < uctxt->expected_count) {
911 		fdata->invalid_tids[fdata->invalid_tid_idx] =
912 			rcventry2tidinfo(node->rcventry - uctxt->expected_base);
913 		fdata->invalid_tids[fdata->invalid_tid_idx] |=
914 			EXP_TID_SET(LEN, node->npages);
915 		if (!fdata->invalid_tid_idx) {
916 			unsigned long *ev;
917 
918 			/*
919 			 * hfi1_set_uevent_bits() sets a user event flag
920 			 * for all processes. Because calling into the
921 			 * driver to process TID cache invalidations is
922 			 * expensive and TID cache invalidations are
923 			 * handled on a per-process basis, we can
924 			 * optimize this to set the flag only for the
925 			 * process in question.
926 			 */
927 			ev = uctxt->dd->events +
928 				(uctxt_offset(uctxt) + fdata->subctxt);
929 			set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
930 		}
931 		fdata->invalid_tid_idx++;
932 	}
933 	spin_unlock(&fdata->invalid_lock);
934 	return true;
935 }
936 
937 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
938 				    struct tid_rb_node *tnode)
939 {
940 	u32 base = fdata->uctxt->expected_base;
941 
942 	fdata->entry_to_rb[tnode->rcventry - base] = NULL;
943 	clear_tid_node(fdata, tnode);
944 }
945