1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/libnvdimm.h>
8
9 #include "rxe.h"
10 #include "rxe_loc.h"
11
12 /* Return a random 8 bit key value that is
13 * different than the last_key. Set last_key to -1
14 * if this is the first key for an MR or MW
15 */
rxe_get_next_key(u32 last_key)16 u8 rxe_get_next_key(u32 last_key)
17 {
18 u8 key;
19
20 do {
21 get_random_bytes(&key, 1);
22 } while (key == last_key);
23
24 return key;
25 }
26
mr_check_range(struct rxe_mr * mr,u64 iova,size_t length)27 int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
28 {
29 switch (mr->ibmr.type) {
30 case IB_MR_TYPE_DMA:
31 return 0;
32
33 case IB_MR_TYPE_USER:
34 case IB_MR_TYPE_MEM_REG:
35 if (iova < mr->ibmr.iova ||
36 iova + length > mr->ibmr.iova + mr->ibmr.length) {
37 rxe_dbg_mr(mr, "iova/length out of range\n");
38 return -EINVAL;
39 }
40 return 0;
41
42 default:
43 rxe_dbg_mr(mr, "mr type not supported\n");
44 return -EINVAL;
45 }
46 }
47
rxe_mr_init(int access,struct rxe_mr * mr)48 void rxe_mr_init(int access, struct rxe_mr *mr)
49 {
50 u32 key = mr->elem.index << 8 | rxe_get_next_key(-1);
51
52 /* set ibmr->l/rkey and also copy into private l/rkey
53 * for user MRs these will always be the same
54 * for cases where caller 'owns' the key portion
55 * they may be different until REG_MR WQE is executed.
56 */
57 mr->lkey = mr->ibmr.lkey = key;
58 mr->rkey = mr->ibmr.rkey = key;
59
60 mr->access = access;
61 mr->ibmr.page_size = PAGE_SIZE;
62 mr->page_mask = PAGE_MASK;
63 mr->page_shift = PAGE_SHIFT;
64 mr->state = RXE_MR_STATE_INVALID;
65 }
66
rxe_mr_init_dma(int access,struct rxe_mr * mr)67 void rxe_mr_init_dma(int access, struct rxe_mr *mr)
68 {
69 rxe_mr_init(access, mr);
70
71 mr->state = RXE_MR_STATE_VALID;
72 mr->ibmr.type = IB_MR_TYPE_DMA;
73 }
74
75 /*
76 * Convert iova to page_info index. The page_info stores pages of size
77 * PAGE_SIZE, but MRs can have different page sizes. This function
78 * handles the conversion for all cases:
79 *
80 * 1. mr->page_size > PAGE_SIZE:
81 * The MR's iova may not be aligned to mr->page_size. We use the
82 * aligned base (iova & page_mask) as reference, then calculate
83 * which PAGE_SIZE sub-page the iova falls into.
84 *
85 * 2. mr->page_size <= PAGE_SIZE:
86 * Use simple shift arithmetic since each page_info entry corresponds
87 * to one or more MR pages.
88 */
rxe_mr_iova_to_index(struct rxe_mr * mr,u64 iova)89 static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova)
90 {
91 int idx;
92
93 if (mr_page_size(mr) > PAGE_SIZE)
94 idx = (iova - (mr->ibmr.iova & mr->page_mask)) >> PAGE_SHIFT;
95 else
96 idx = (iova >> mr->page_shift) -
97 (mr->ibmr.iova >> mr->page_shift);
98
99 WARN_ON(idx >= mr->nbuf);
100 return idx;
101 }
102
103 /*
104 * Convert iova to offset within the page_info entry.
105 *
106 * For mr_page_size > PAGE_SIZE, the offset is within the system page.
107 * For mr_page_size <= PAGE_SIZE, the offset is within the MR page size.
108 */
rxe_mr_iova_to_page_offset(struct rxe_mr * mr,u64 iova)109 static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova)
110 {
111 if (mr_page_size(mr) > PAGE_SIZE)
112 return iova & (PAGE_SIZE - 1);
113 else
114 return iova & (mr_page_size(mr) - 1);
115 }
116
is_pmem_page(struct page * pg)117 static bool is_pmem_page(struct page *pg)
118 {
119 unsigned long paddr = page_to_phys(pg);
120
121 return REGION_INTERSECTS ==
122 region_intersects(paddr, PAGE_SIZE, IORESOURCE_MEM,
123 IORES_DESC_PERSISTENT_MEMORY);
124 }
125
rxe_mr_fill_pages_from_sgt(struct rxe_mr * mr,struct sg_table * sgt)126 static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
127 {
128 struct sg_page_iter sg_iter;
129 struct page *page;
130 bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
131
132 WARN_ON(mr_page_size(mr) != PAGE_SIZE);
133
134 __sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0);
135 if (!__sg_page_iter_next(&sg_iter))
136 return 0;
137
138 while (true) {
139 page = sg_page_iter_page(&sg_iter);
140
141 if (persistent && !is_pmem_page(page)) {
142 rxe_dbg_mr(mr, "Page can't be persistent\n");
143 return -EINVAL;
144 }
145
146 mr->page_info[mr->nbuf].page = page;
147 mr->page_info[mr->nbuf].offset = 0;
148 mr->nbuf++;
149
150 if (!__sg_page_iter_next(&sg_iter))
151 break;
152 }
153
154 return 0;
155 }
156
__alloc_mr_page_info(struct rxe_mr * mr,int num_pages)157 static int __alloc_mr_page_info(struct rxe_mr *mr, int num_pages)
158 {
159 mr->page_info = kzalloc_objs(struct rxe_mr_page, num_pages);
160 if (!mr->page_info)
161 return -ENOMEM;
162
163 mr->max_allowed_buf = num_pages;
164 mr->nbuf = 0;
165
166 return 0;
167 }
168
alloc_mr_page_info(struct rxe_mr * mr,int num_pages)169 static int alloc_mr_page_info(struct rxe_mr *mr, int num_pages)
170 {
171 int ret;
172
173 WARN_ON(mr->num_buf);
174 ret = __alloc_mr_page_info(mr, num_pages);
175 if (ret)
176 return ret;
177
178 mr->num_buf = num_pages;
179
180 return 0;
181 }
182
free_mr_page_info(struct rxe_mr * mr)183 static void free_mr_page_info(struct rxe_mr *mr)
184 {
185 if (!mr->page_info)
186 return;
187
188 kfree(mr->page_info);
189 mr->page_info = NULL;
190 }
191
rxe_mr_init_user(struct rxe_dev * rxe,u64 start,u64 length,int access,struct rxe_mr * mr)192 int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
193 int access, struct rxe_mr *mr)
194 {
195 struct ib_umem *umem;
196 int err;
197
198 rxe_mr_init(access, mr);
199
200 umem = ib_umem_get(&rxe->ib_dev, start, length, access);
201 if (IS_ERR(umem)) {
202 rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n",
203 (int)PTR_ERR(umem));
204 return PTR_ERR(umem);
205 }
206
207 err = alloc_mr_page_info(mr, ib_umem_num_pages(umem));
208 if (err)
209 goto err2;
210
211 err = rxe_mr_fill_pages_from_sgt(mr, &umem->sgt_append.sgt);
212 if (err)
213 goto err1;
214
215 mr->umem = umem;
216 mr->ibmr.type = IB_MR_TYPE_USER;
217 mr->state = RXE_MR_STATE_VALID;
218
219 return 0;
220 err1:
221 free_mr_page_info(mr);
222 err2:
223 ib_umem_release(umem);
224 return err;
225 }
226
rxe_mr_init_fast(int max_pages,struct rxe_mr * mr)227 int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
228 {
229 int err;
230
231 /* always allow remote access for FMRs */
232 rxe_mr_init(RXE_ACCESS_REMOTE, mr);
233
234 err = alloc_mr_page_info(mr, max_pages);
235 if (err)
236 goto err1;
237
238 mr->state = RXE_MR_STATE_FREE;
239 mr->ibmr.type = IB_MR_TYPE_MEM_REG;
240
241 return 0;
242
243 err1:
244 return err;
245 }
246
247 /*
248 * I) MRs with page_size >= PAGE_SIZE,
249 * Split a large MR page (mr->page_size) into multiple PAGE_SIZE
250 * sub-pages and store them in page_info, offset is always 0.
251 *
252 * Called when mr->page_size > PAGE_SIZE. Each call to rxe_set_page()
253 * represents one mr->page_size region, which we must split into
254 * (mr->page_size >> PAGE_SHIFT) individual pages.
255 *
256 * II) MRs with page_size < PAGE_SIZE,
257 * Save each PAGE_SIZE page and its offset within the system page in page_info.
258 */
rxe_set_page(struct ib_mr * ibmr,u64 dma_addr)259 static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr)
260 {
261 struct rxe_mr *mr = to_rmr(ibmr);
262 bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
263 u32 i, pages_per_mr = mr_page_size(mr) >> PAGE_SHIFT;
264
265 pages_per_mr = MAX(1, pages_per_mr);
266
267 for (i = 0; i < pages_per_mr; i++) {
268 u64 addr = dma_addr + i * PAGE_SIZE;
269 struct page *sub_page = ib_virt_dma_to_page(addr);
270
271 if (unlikely(mr->nbuf >= mr->max_allowed_buf))
272 return -ENOMEM;
273
274 if (persistent && !is_pmem_page(sub_page)) {
275 rxe_dbg_mr(mr, "Page cannot be persistent\n");
276 return -EINVAL;
277 }
278
279 mr->page_info[mr->nbuf].page = sub_page;
280 mr->page_info[mr->nbuf].offset = addr & (PAGE_SIZE - 1);
281 mr->nbuf++;
282 }
283
284 return 0;
285 }
286
rxe_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sgl,int sg_nents,unsigned int * sg_offset)287 int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl,
288 int sg_nents, unsigned int *sg_offset)
289 {
290 struct rxe_mr *mr = to_rmr(ibmr);
291 unsigned int page_size = mr_page_size(mr);
292
293 /*
294 * Ensure page_size and PAGE_SIZE are compatible for mapping.
295 * We require one to be a multiple of the other for correct
296 * iova-to-page conversion.
297 */
298 if (!IS_ALIGNED(page_size, PAGE_SIZE) &&
299 !IS_ALIGNED(PAGE_SIZE, page_size)) {
300 rxe_dbg_mr(mr, "MR page size %u must be compatible with PAGE_SIZE %lu\n",
301 page_size, PAGE_SIZE);
302 return -EINVAL;
303 }
304
305 if (mr_page_size(mr) > PAGE_SIZE) {
306 /* resize page_info if needed */
307 u32 map_mr_pages = (page_size >> PAGE_SHIFT) * mr->num_buf;
308
309 if (map_mr_pages > mr->max_allowed_buf) {
310 rxe_dbg_mr(mr, "requested pages %u exceed max %u\n",
311 map_mr_pages, mr->max_allowed_buf);
312 free_mr_page_info(mr);
313 if (__alloc_mr_page_info(mr, map_mr_pages))
314 return -ENOMEM;
315 }
316 }
317
318 mr->nbuf = 0;
319 mr->page_shift = ilog2(page_size);
320 mr->page_mask = ~((u64)page_size - 1);
321
322 return ib_sg_to_pages(ibmr, sgl, sg_nents, sg_offset, rxe_set_page);
323 }
324
rxe_mr_copy_xarray(struct rxe_mr * mr,u64 iova,void * addr,unsigned int length,enum rxe_mr_copy_dir dir)325 static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
326 unsigned int length, enum rxe_mr_copy_dir dir)
327 {
328 unsigned int bytes;
329 u8 *va;
330
331 while (length) {
332 unsigned long index = rxe_mr_iova_to_index(mr, iova);
333 struct rxe_mr_page *info = &mr->page_info[index];
334 unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova);
335
336 if (!info->page)
337 return -EFAULT;
338
339 page_offset += info->offset;
340 bytes = min_t(unsigned int, length, PAGE_SIZE - page_offset);
341 va = kmap_local_page(info->page);
342
343 if (dir == RXE_FROM_MR_OBJ)
344 memcpy(addr, va + page_offset, bytes);
345 else
346 memcpy(va + page_offset, addr, bytes);
347 kunmap_local(va);
348
349 addr += bytes;
350 iova += bytes;
351 length -= bytes;
352 }
353
354 return 0;
355 }
356
rxe_mr_copy_dma(struct rxe_mr * mr,u64 dma_addr,void * addr,unsigned int length,enum rxe_mr_copy_dir dir)357 static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
358 unsigned int length, enum rxe_mr_copy_dir dir)
359 {
360 unsigned int page_offset = dma_addr & (PAGE_SIZE - 1);
361 unsigned int bytes;
362 struct page *page;
363 u8 *va;
364
365 while (length) {
366 page = ib_virt_dma_to_page(dma_addr);
367 bytes = min_t(unsigned int, length,
368 PAGE_SIZE - page_offset);
369 va = kmap_local_page(page);
370
371 if (dir == RXE_TO_MR_OBJ)
372 memcpy(va + page_offset, addr, bytes);
373 else
374 memcpy(addr, va + page_offset, bytes);
375
376 kunmap_local(va);
377 page_offset = 0;
378 dma_addr += bytes;
379 addr += bytes;
380 length -= bytes;
381 }
382 }
383
rxe_mr_copy(struct rxe_mr * mr,u64 iova,void * addr,unsigned int length,enum rxe_mr_copy_dir dir)384 int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
385 unsigned int length, enum rxe_mr_copy_dir dir)
386 {
387 int err;
388
389 if (length == 0)
390 return 0;
391
392 if (WARN_ON(!mr))
393 return -EINVAL;
394
395 if (mr->ibmr.type == IB_MR_TYPE_DMA) {
396 rxe_mr_copy_dma(mr, iova, addr, length, dir);
397 return 0;
398 }
399
400 err = mr_check_range(mr, iova, length);
401 if (unlikely(err)) {
402 rxe_dbg_mr(mr, "iova out of range\n");
403 return err;
404 }
405
406 if (is_odp_mr(mr))
407 return rxe_odp_mr_copy(mr, iova, addr, length, dir);
408 else
409 return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
410 }
411
412 /* copy data in or out of a wqe, i.e. sg list
413 * under the control of a dma descriptor
414 */
copy_data(struct rxe_pd * pd,int access,struct rxe_dma_info * dma,void * addr,int length,enum rxe_mr_copy_dir dir)415 int copy_data(
416 struct rxe_pd *pd,
417 int access,
418 struct rxe_dma_info *dma,
419 void *addr,
420 int length,
421 enum rxe_mr_copy_dir dir)
422 {
423 int bytes;
424 struct rxe_sge *sge = &dma->sge[dma->cur_sge];
425 int offset = dma->sge_offset;
426 int resid = dma->resid;
427 struct rxe_mr *mr = NULL;
428 u64 iova;
429 int err;
430
431 if (length == 0)
432 return 0;
433
434 if (length > resid) {
435 err = -EINVAL;
436 goto err2;
437 }
438
439 if (sge->length && (offset < sge->length)) {
440 mr = lookup_mr(pd, access, sge->lkey, RXE_LOOKUP_LOCAL);
441 if (!mr) {
442 err = -EINVAL;
443 goto err1;
444 }
445 }
446
447 while (length > 0) {
448 bytes = length;
449
450 if (offset >= sge->length) {
451 if (mr) {
452 rxe_put(mr);
453 mr = NULL;
454 }
455 sge++;
456 dma->cur_sge++;
457 offset = 0;
458
459 if (dma->cur_sge >= dma->num_sge) {
460 err = -ENOSPC;
461 goto err2;
462 }
463
464 if (sge->length) {
465 mr = lookup_mr(pd, access, sge->lkey,
466 RXE_LOOKUP_LOCAL);
467 if (!mr) {
468 err = -EINVAL;
469 goto err1;
470 }
471 } else {
472 continue;
473 }
474 }
475
476 if (bytes > sge->length - offset)
477 bytes = sge->length - offset;
478
479 if (bytes > 0) {
480 iova = sge->addr + offset;
481 err = rxe_mr_copy(mr, iova, addr, bytes, dir);
482 if (err)
483 goto err2;
484
485 offset += bytes;
486 resid -= bytes;
487 length -= bytes;
488 addr += bytes;
489 }
490 }
491
492 dma->sge_offset = offset;
493 dma->resid = resid;
494
495 if (mr)
496 rxe_put(mr);
497
498 return 0;
499
500 err2:
501 if (mr)
502 rxe_put(mr);
503 err1:
504 return err;
505 }
506
rxe_mr_flush_pmem_iova(struct rxe_mr * mr,u64 iova,unsigned int length)507 static int rxe_mr_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
508 {
509 unsigned int bytes;
510 int err;
511 u8 *va;
512
513 err = mr_check_range(mr, iova, length);
514 if (err)
515 return err;
516
517 while (length > 0) {
518 unsigned long index = rxe_mr_iova_to_index(mr, iova);
519 struct rxe_mr_page *info = &mr->page_info[index];
520 unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova);
521
522 if (!info->page)
523 return -EFAULT;
524
525 page_offset += info->offset;
526 bytes = min_t(unsigned int, length, PAGE_SIZE - page_offset);
527
528 va = kmap_local_page(info->page);
529 arch_wb_cache_pmem(va + page_offset, bytes);
530 kunmap_local(va);
531
532 length -= bytes;
533 iova += bytes;
534 }
535
536 return 0;
537 }
538
rxe_flush_pmem_iova(struct rxe_mr * mr,u64 start,unsigned int length)539 int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 start, unsigned int length)
540 {
541 int err;
542
543 /* mr must be valid even if length is zero */
544 if (WARN_ON(!mr))
545 return -EINVAL;
546
547 if (length == 0)
548 return 0;
549
550 if (mr->ibmr.type == IB_MR_TYPE_DMA)
551 return -EFAULT;
552
553 if (is_odp_mr(mr))
554 err = rxe_odp_flush_pmem_iova(mr, start, length);
555 else
556 err = rxe_mr_flush_pmem_iova(mr, start, length);
557
558 return err;
559 }
560
561 /* Guarantee atomicity of atomic operations at the machine level. */
562 DEFINE_SPINLOCK(atomic_ops_lock);
563
rxe_mr_do_atomic_op(struct rxe_mr * mr,u64 iova,int opcode,u64 compare,u64 swap_add,u64 * orig_val)564 enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
565 u64 compare, u64 swap_add, u64 *orig_val)
566 {
567 unsigned int page_offset;
568 struct page *page;
569 u64 value;
570 u64 *va;
571
572 if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
573 rxe_dbg_mr(mr, "mr not in valid state\n");
574 return RESPST_ERR_RKEY_VIOLATION;
575 }
576
577 if (mr->ibmr.type == IB_MR_TYPE_DMA) {
578 page_offset = iova & (PAGE_SIZE - 1);
579 page = ib_virt_dma_to_page(iova);
580 } else {
581 unsigned long index;
582 int err;
583 struct rxe_mr_page *info;
584
585 err = mr_check_range(mr, iova, sizeof(value));
586 if (err) {
587 rxe_dbg_mr(mr, "iova out of range\n");
588 return RESPST_ERR_RKEY_VIOLATION;
589 }
590 page_offset = rxe_mr_iova_to_page_offset(mr, iova);
591 index = rxe_mr_iova_to_index(mr, iova);
592 info = &mr->page_info[index];
593 if (!info->page)
594 return RESPST_ERR_RKEY_VIOLATION;
595
596 page_offset += info->offset;
597 page = info->page;
598 }
599
600 if (unlikely(page_offset & 0x7)) {
601 rxe_dbg_mr(mr, "iova not aligned\n");
602 return RESPST_ERR_MISALIGNED_ATOMIC;
603 }
604
605 va = kmap_local_page(page);
606
607 spin_lock_bh(&atomic_ops_lock);
608 value = *orig_val = va[page_offset >> 3];
609
610 if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
611 if (value == compare)
612 va[page_offset >> 3] = swap_add;
613 } else {
614 value += swap_add;
615 va[page_offset >> 3] = value;
616 }
617 spin_unlock_bh(&atomic_ops_lock);
618
619 kunmap_local(va);
620
621 return RESPST_NONE;
622 }
623
rxe_mr_do_atomic_write(struct rxe_mr * mr,u64 iova,u64 value)624 enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
625 {
626 unsigned int page_offset;
627 struct page *page;
628 u64 *va;
629
630 if (mr->ibmr.type == IB_MR_TYPE_DMA) {
631 page_offset = iova & (PAGE_SIZE - 1);
632 page = ib_virt_dma_to_page(iova);
633 } else {
634 unsigned long index;
635 int err;
636 struct rxe_mr_page *info;
637
638 /* See IBA oA19-28 */
639 err = mr_check_range(mr, iova, sizeof(value));
640 if (unlikely(err)) {
641 rxe_dbg_mr(mr, "iova out of range\n");
642 return RESPST_ERR_RKEY_VIOLATION;
643 }
644 page_offset = rxe_mr_iova_to_page_offset(mr, iova);
645 index = rxe_mr_iova_to_index(mr, iova);
646 info = &mr->page_info[index];
647 if (!info->page)
648 return RESPST_ERR_RKEY_VIOLATION;
649
650 page_offset += info->offset;
651 page = info->page;
652 }
653
654 /* See IBA A19.4.2 */
655 if (unlikely(page_offset & 0x7)) {
656 rxe_dbg_mr(mr, "misaligned address\n");
657 return RESPST_ERR_MISALIGNED_ATOMIC;
658 }
659
660 va = kmap_local_page(page);
661 /* Do atomic write after all prior operations have completed */
662 smp_store_release(&va[page_offset >> 3], value);
663 kunmap_local(va);
664
665 return RESPST_NONE;
666 }
667
advance_dma_data(struct rxe_dma_info * dma,unsigned int length)668 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
669 {
670 struct rxe_sge *sge = &dma->sge[dma->cur_sge];
671 int offset = dma->sge_offset;
672 int resid = dma->resid;
673
674 while (length) {
675 unsigned int bytes;
676
677 if (offset >= sge->length) {
678 sge++;
679 dma->cur_sge++;
680 offset = 0;
681 if (dma->cur_sge >= dma->num_sge)
682 return -ENOSPC;
683 }
684
685 bytes = length;
686
687 if (bytes > sge->length - offset)
688 bytes = sge->length - offset;
689
690 offset += bytes;
691 resid -= bytes;
692 length -= bytes;
693 }
694
695 dma->sge_offset = offset;
696 dma->resid = resid;
697
698 return 0;
699 }
700
lookup_mr(struct rxe_pd * pd,int access,u32 key,enum rxe_mr_lookup_type type)701 struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
702 enum rxe_mr_lookup_type type)
703 {
704 struct rxe_mr *mr;
705 struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
706 int index = key >> 8;
707
708 mr = rxe_pool_get_index(&rxe->mr_pool, index);
709 if (!mr)
710 return NULL;
711
712 if (unlikely((type == RXE_LOOKUP_LOCAL && mr->lkey != key) ||
713 (type == RXE_LOOKUP_REMOTE && mr->rkey != key) ||
714 mr_pd(mr) != pd || ((access & mr->access) != access) ||
715 mr->state != RXE_MR_STATE_VALID)) {
716 rxe_put(mr);
717 mr = NULL;
718 }
719
720 return mr;
721 }
722
rxe_invalidate_mr(struct rxe_qp * qp,u32 key)723 int rxe_invalidate_mr(struct rxe_qp *qp, u32 key)
724 {
725 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
726 struct rxe_mr *mr;
727 int remote;
728 int ret;
729
730 mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8);
731 if (!mr) {
732 rxe_dbg_qp(qp, "No MR for key %#x\n", key);
733 ret = -EINVAL;
734 goto err;
735 }
736
737 remote = mr->access & RXE_ACCESS_REMOTE;
738 if (remote ? (key != mr->rkey) : (key != mr->lkey)) {
739 rxe_dbg_mr(mr, "wr key (%#x) doesn't match mr key (%#x)\n",
740 key, (remote ? mr->rkey : mr->lkey));
741 ret = -EINVAL;
742 goto err_drop_ref;
743 }
744
745 if (atomic_read(&mr->num_mw) > 0) {
746 rxe_dbg_mr(mr, "Attempt to invalidate an MR while bound to MWs\n");
747 ret = -EINVAL;
748 goto err_drop_ref;
749 }
750
751 if (unlikely(mr->ibmr.type != IB_MR_TYPE_MEM_REG)) {
752 rxe_dbg_mr(mr, "Type (%d) is wrong\n", mr->ibmr.type);
753 ret = -EINVAL;
754 goto err_drop_ref;
755 }
756
757 mr->state = RXE_MR_STATE_FREE;
758 ret = 0;
759
760 err_drop_ref:
761 rxe_put(mr);
762 err:
763 return ret;
764 }
765
766 /* user can (re)register fast MR by executing a REG_MR WQE.
767 * user is expected to hold a reference on the ib mr until the
768 * WQE completes.
769 * Once a fast MR is created this is the only way to change the
770 * private keys. It is the responsibility of the user to maintain
771 * the ib mr keys in sync with rxe mr keys.
772 */
rxe_reg_fast_mr(struct rxe_qp * qp,struct rxe_send_wqe * wqe)773 int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
774 {
775 struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
776 u32 key = wqe->wr.wr.reg.key;
777 u32 access = wqe->wr.wr.reg.access;
778
779 /* user can only register MR in free state */
780 if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
781 rxe_dbg_mr(mr, "mr->lkey = 0x%x not free\n", mr->lkey);
782 return -EINVAL;
783 }
784
785 /* user can only register mr with qp in same protection domain */
786 if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) {
787 rxe_dbg_mr(mr, "qp->pd and mr->pd don't match\n");
788 return -EINVAL;
789 }
790
791 /* user is only allowed to change key portion of l/rkey */
792 if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) {
793 rxe_dbg_mr(mr, "key = 0x%x has wrong index mr->lkey = 0x%x\n",
794 key, mr->lkey);
795 return -EINVAL;
796 }
797
798 mr->access = access;
799 mr->lkey = key;
800 mr->rkey = key;
801 mr->ibmr.iova = wqe->wr.wr.reg.mr->iova;
802 mr->state = RXE_MR_STATE_VALID;
803
804 return 0;
805 }
806
rxe_mr_cleanup(struct rxe_pool_elem * elem)807 void rxe_mr_cleanup(struct rxe_pool_elem *elem)
808 {
809 struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
810
811 rxe_put(mr_pd(mr));
812 ib_umem_release(mr->umem);
813
814 if (mr->ibmr.type != IB_MR_TYPE_DMA)
815 free_mr_page_info(mr);
816 }
817