1 /* 2 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 3 */ 4 5 /* 6 * This file contains code imported from the OFED rds source file rdma.c 7 * Oracle elects to have and use the contents of rdma.c under and governed 8 * by the OpenIB.org BSD license (see below for full license text). However, 9 * the following notice accompanied the original version of this file: 10 */ 11 12 /* 13 * Copyright (c) 2007 Oracle. All rights reserved. 14 * 15 * This software is available to you under a choice of one of two 16 * licenses. You may choose to be licensed under the terms of the GNU 17 * General Public License (GPL) Version 2, available from the file 18 * COPYING in the main directory of this source tree, or the 19 * OpenIB.org BSD license below: 20 * 21 * Redistribution and use in source and binary forms, with or 22 * without modification, are permitted provided that the following 23 * conditions are met: 24 * 25 * - Redistributions of source code must retain the above 26 * copyright notice, this list of conditions and the following 27 * disclaimer. 28 * 29 * - Redistributions in binary form must reproduce the above 30 * copyright notice, this list of conditions and the following 31 * disclaimer in the documentation and/or other materials 32 * provided with the distribution. 33 * 34 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 35 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 36 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 37 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 38 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 39 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 40 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 41 * SOFTWARE. 42 * 43 */ 44 #include <sys/ib/clients/of/rdma/ib_verbs.h> 45 #include <sys/ib/clients/of/rdma/ib_addr.h> 46 #include <sys/ib/clients/of/rdma/rdma_cm.h> 47 48 #include <sys/ib/clients/rdsv3/ib.h> 49 #include <sys/ib/clients/rdsv3/rdma.h> 50 #include <sys/ib/clients/rdsv3/rdsv3_debug.h> 51 #include <sys/containerof.h> 52 53 #define DMA_TO_DEVICE 0 54 #define DMA_FROM_DEVICE 1 55 #define RB_CLEAR_NODE(nodep) AVL_SETPARENT(nodep, nodep); 56 57 /* 58 * XXX 59 * - build with sparse 60 * - should we limit the size of a mr region? let transport return failure? 61 * - should we detect duplicate keys on a socket? hmm. 62 * - an rdma is an mlock, apply rlimit? 63 */ 64 65 /* 66 * get the number of pages by looking at the page indices that the start and 67 * end addresses fall in. 68 * 69 * Returns 0 if the vec is invalid. It is invalid if the number of bytes 70 * causes the address to wrap or overflows an unsigned int. This comes 71 * from being stored in the 'length' member of 'struct rdsv3_scatterlist'. 72 */ 73 static unsigned int 74 rdsv3_pages_in_vec(struct rds_iovec *vec) 75 { 76 if ((vec->addr + vec->bytes <= vec->addr) || 77 (vec->bytes > (uint64_t)UINT_MAX)) { 78 return (0); 79 } 80 81 return (((vec->addr + vec->bytes + PAGESIZE - 1) >> 82 PAGESHIFT) - (vec->addr >> PAGESHIFT)); 83 } 84 85 static struct rdsv3_mr * 86 rdsv3_mr_tree_walk(struct avl_tree *root, uint32_t key, 87 struct rdsv3_mr *insert) 88 { 89 struct rdsv3_mr *mr; 90 avl_index_t where; 91 92 mr = avl_find(root, &key, &where); 93 if ((mr == NULL) && (insert != NULL)) { 94 avl_insert(root, (void *)insert, where); 95 atomic_inc_32(&insert->r_refcount); 96 return (NULL); 97 } 98 99 return (mr); 100 } 101 102 /* 103 * Destroy the transport-specific part of a MR. 104 */ 105 static void 106 rdsv3_destroy_mr(struct rdsv3_mr *mr) 107 { 108 struct rdsv3_sock *rs = mr->r_sock; 109 void *trans_private = NULL; 110 avl_node_t *np; 111 112 RDSV3_DPRINTF5("rdsv3_destroy_mr", 113 "RDS: destroy mr key is %x refcnt %u", 114 mr->r_key, atomic_get(&mr->r_refcount)); 115 116 if (test_and_set_bit(RDSV3_MR_DEAD, &mr->r_state)) 117 return; 118 119 mutex_enter(&rs->rs_rdma_lock); 120 np = &mr->r_rb_node; 121 if (AVL_XPARENT(np) != np) 122 avl_remove(&rs->rs_rdma_keys, mr); 123 trans_private = mr->r_trans_private; 124 mr->r_trans_private = NULL; 125 mutex_exit(&rs->rs_rdma_lock); 126 127 if (trans_private) 128 mr->r_trans->free_mr(trans_private, mr->r_invalidate); 129 } 130 131 void 132 __rdsv3_put_mr_final(struct rdsv3_mr *mr) 133 { 134 rdsv3_destroy_mr(mr); 135 kmem_free(mr, sizeof (*mr)); 136 } 137 138 /* 139 * By the time this is called we can't have any more ioctls called on 140 * the socket so we don't need to worry about racing with others. 141 */ 142 void 143 rdsv3_rdma_drop_keys(struct rdsv3_sock *rs) 144 { 145 struct rdsv3_mr *mr; 146 struct avl_node *node; 147 148 /* Release any MRs associated with this socket */ 149 mutex_enter(&rs->rs_rdma_lock); 150 while ((node = avl_first(&rs->rs_rdma_keys))) { 151 mr = __containerof(node, struct rdsv3_mr, r_rb_node); 152 if (mr->r_trans == rs->rs_transport) 153 mr->r_invalidate = 0; 154 avl_remove(&rs->rs_rdma_keys, &mr->r_rb_node); 155 RB_CLEAR_NODE(&mr->r_rb_node) 156 mutex_exit(&rs->rs_rdma_lock); 157 rdsv3_destroy_mr(mr); 158 rdsv3_mr_put(mr); 159 mutex_enter(&rs->rs_rdma_lock); 160 } 161 mutex_exit(&rs->rs_rdma_lock); 162 163 if (rs->rs_transport && rs->rs_transport->flush_mrs) 164 rs->rs_transport->flush_mrs(); 165 } 166 167 static int 168 __rdsv3_rdma_map(struct rdsv3_sock *rs, struct rds_get_mr_args *args, 169 uint64_t *cookie_ret, struct rdsv3_mr **mr_ret) 170 { 171 struct rdsv3_mr *mr = NULL, *found; 172 void *trans_private; 173 rds_rdma_cookie_t cookie; 174 unsigned int nents = 0; 175 int ret; 176 177 if (rs->rs_bound_addr == 0) { 178 ret = -ENOTCONN; /* XXX not a great errno */ 179 goto out; 180 } 181 182 if (!rs->rs_transport->get_mr) { 183 ret = -EOPNOTSUPP; 184 goto out; 185 } 186 187 mr = kmem_zalloc(sizeof (struct rdsv3_mr), KM_NOSLEEP); 188 if (!mr) { 189 ret = -ENOMEM; 190 goto out; 191 } 192 193 mr->r_refcount = 1; 194 RB_CLEAR_NODE(&mr->r_rb_node); 195 mr->r_trans = rs->rs_transport; 196 mr->r_sock = rs; 197 198 if (args->flags & RDS_RDMA_USE_ONCE) 199 mr->r_use_once = 1; 200 if (args->flags & RDS_RDMA_INVALIDATE) 201 mr->r_invalidate = 1; 202 if (args->flags & RDS_RDMA_READWRITE) 203 mr->r_write = 1; 204 205 /* 206 * Obtain a transport specific MR. If this succeeds, the 207 * s/g list is now owned by the MR. 208 * Note that dma_map() implies that pending writes are 209 * flushed to RAM, so no dma_sync is needed here. 210 */ 211 trans_private = rs->rs_transport->get_mr(&args->vec, nents, rs, 212 &mr->r_key); 213 214 if (IS_ERR(trans_private)) { 215 ret = PTR_ERR(trans_private); 216 goto out; 217 } 218 219 mr->r_trans_private = trans_private; 220 221 /* 222 * The user may pass us an unaligned address, but we can only 223 * map page aligned regions. So we keep the offset, and build 224 * a 64bit cookie containing <R_Key, offset> and pass that 225 * around. 226 */ 227 cookie = rdsv3_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGEMASK); 228 if (cookie_ret) 229 *cookie_ret = cookie; 230 231 /* 232 * copy value of cookie to user address at args->cookie_addr 233 */ 234 if (args->cookie_addr) { 235 ret = ddi_copyout((void *)&cookie, 236 (void *)((intptr_t)args->cookie_addr), 237 sizeof (rds_rdma_cookie_t), 0); 238 if (ret != 0) { 239 ret = -EFAULT; 240 goto out; 241 } 242 } 243 244 RDSV3_DPRINTF5("__rdsv3_rdma_map", 245 "RDS: get_mr mr 0x%p addr 0x%llx key 0x%x", 246 mr, args->vec.addr, mr->r_key); 247 /* 248 * Inserting the new MR into the rbtree bumps its 249 * reference count. 250 */ 251 mutex_enter(&rs->rs_rdma_lock); 252 found = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr); 253 mutex_exit(&rs->rs_rdma_lock); 254 255 ASSERT(!(found && found != mr)); 256 257 if (mr_ret) { 258 atomic_inc_32(&mr->r_refcount); 259 *mr_ret = mr; 260 } 261 262 ret = 0; 263 out: 264 if (mr) 265 rdsv3_mr_put(mr); 266 return (ret); 267 } 268 269 int 270 rdsv3_get_mr(struct rdsv3_sock *rs, const void *optval, int optlen) 271 { 272 struct rds_get_mr_args args; 273 274 if (optlen != sizeof (struct rds_get_mr_args)) 275 return (-EINVAL); 276 277 #if 1 278 bcopy((struct rds_get_mr_args *)optval, &args, 279 sizeof (struct rds_get_mr_args)); 280 #else 281 if (ddi_copyin(optval, &args, optlen, 0)) 282 return (-EFAULT); 283 #endif 284 285 return (__rdsv3_rdma_map(rs, &args, NULL, NULL)); 286 } 287 288 int 289 rdsv3_get_mr_for_dest(struct rdsv3_sock *rs, const void *optval, 290 int optlen) 291 { 292 struct rds_get_mr_for_dest_args args; 293 struct rds_get_mr_args new_args; 294 295 if (optlen != sizeof (struct rds_get_mr_for_dest_args)) 296 return (-EINVAL); 297 298 #if 1 299 bcopy((struct rds_get_mr_for_dest_args *)optval, &args, 300 sizeof (struct rds_get_mr_for_dest_args)); 301 #else 302 if (ddi_copyin(optval, &args, optlen, 0)) 303 return (-EFAULT); 304 #endif 305 306 /* 307 * Initially, just behave like get_mr(). 308 * TODO: Implement get_mr as wrapper around this 309 * and deprecate it. 310 */ 311 new_args.vec = args.vec; 312 new_args.cookie_addr = args.cookie_addr; 313 new_args.flags = args.flags; 314 315 return (__rdsv3_rdma_map(rs, &new_args, NULL, NULL)); 316 } 317 318 /* 319 * Free the MR indicated by the given R_Key 320 */ 321 int 322 rdsv3_free_mr(struct rdsv3_sock *rs, const void *optval, int optlen) 323 { 324 struct rds_free_mr_args args; 325 struct rdsv3_mr *mr; 326 327 if (optlen != sizeof (struct rds_free_mr_args)) 328 return (-EINVAL); 329 330 #if 1 331 bcopy((struct rds_free_mr_args *)optval, &args, 332 sizeof (struct rds_free_mr_args)); 333 #else 334 if (ddi_copyin((struct rds_free_mr_args *)optval, &args, 335 sizeof (struct rds_free_mr_args), 0)) 336 return (-EFAULT); 337 #endif 338 339 /* Special case - a null cookie means flush all unused MRs */ 340 if (args.cookie == 0) { 341 if (!rs->rs_transport || !rs->rs_transport->flush_mrs) 342 return (-EINVAL); 343 rs->rs_transport->flush_mrs(); 344 return (0); 345 } 346 347 /* 348 * Look up the MR given its R_key and remove it from the rbtree 349 * so nobody else finds it. 350 * This should also prevent races with rdsv3_rdma_unuse. 351 */ 352 mutex_enter(&rs->rs_rdma_lock); 353 mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, 354 rdsv3_rdma_cookie_key(args.cookie), NULL); 355 if (mr) { 356 avl_remove(&rs->rs_rdma_keys, &mr->r_rb_node); 357 RB_CLEAR_NODE(&mr->r_rb_node); 358 if (args.flags & RDS_RDMA_INVALIDATE) 359 mr->r_invalidate = 1; 360 } 361 mutex_exit(&rs->rs_rdma_lock); 362 363 if (!mr) 364 return (-EINVAL); 365 366 /* 367 * call rdsv3_destroy_mr() ourselves so that we're sure it's done 368 * by time we return. If we let rdsv3_mr_put() do it it might not 369 * happen until someone else drops their ref. 370 */ 371 rdsv3_destroy_mr(mr); 372 rdsv3_mr_put(mr); 373 return (0); 374 } 375 376 /* 377 * This is called when we receive an extension header that 378 * tells us this MR was used. It allows us to implement 379 * use_once semantics 380 */ 381 void 382 rdsv3_rdma_unuse(struct rdsv3_sock *rs, uint32_t r_key, int force) 383 { 384 struct rdsv3_mr *mr; 385 int zot_me = 0; 386 387 RDSV3_DPRINTF4("rdsv3_rdma_unuse", "Enter rkey: 0x%x", r_key); 388 389 mutex_enter(&rs->rs_rdma_lock); 390 mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); 391 if (!mr) { 392 RDSV3_DPRINTF4("rdsv3_rdma_unuse", 393 "rdsv3: trying to unuse MR with unknown r_key %u!", r_key); 394 mutex_exit(&rs->rs_rdma_lock); 395 return; 396 } 397 398 if (mr->r_use_once || force) { 399 avl_remove(&rs->rs_rdma_keys, &mr->r_rb_node); 400 RB_CLEAR_NODE(&mr->r_rb_node); 401 zot_me = 1; 402 } else { 403 atomic_inc_32(&mr->r_refcount); 404 } 405 mutex_exit(&rs->rs_rdma_lock); 406 407 /* 408 * May have to issue a dma_sync on this memory region. 409 * Note we could avoid this if the operation was a RDMA READ, 410 * but at this point we can't tell. 411 */ 412 if (mr->r_trans->sync_mr) 413 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); 414 415 /* 416 * If the MR was marked as invalidate, this will 417 * trigger an async flush. 418 */ 419 if (zot_me) 420 rdsv3_destroy_mr(mr); 421 rdsv3_mr_put(mr); 422 RDSV3_DPRINTF4("rdsv3_rdma_unuse", "Return"); 423 } 424 425 void 426 rdsv3_rdma_free_op(struct rdsv3_rdma_op *ro) 427 { 428 unsigned int i; 429 430 /* deallocate RDMA resources on rdsv3_message */ 431 for (i = 0; i < ro->r_nents; i++) { 432 ddi_umem_unlock(ro->r_rdma_sg[i].umem_cookie); 433 } 434 435 if (ro->r_notifier) 436 kmem_free(ro->r_notifier, sizeof (*ro->r_notifier)); 437 kmem_free(ro, sizeof (*ro)); 438 } 439 440 /* 441 * args is a pointer to an in-kernel copy in the sendmsg cmsg. 442 */ 443 static struct rdsv3_rdma_op * 444 rdsv3_rdma_prepare(struct rdsv3_sock *rs, struct rds_rdma_args *args) 445 { 446 struct rds_iovec vec; 447 struct rdsv3_rdma_op *op = NULL; 448 unsigned int nr_bytes; 449 struct rds_iovec *local_vec; 450 unsigned int nr; 451 unsigned int i; 452 ddi_umem_cookie_t umem_cookie; 453 size_t umem_len; 454 caddr_t umem_addr; 455 int ret; 456 457 if (rs->rs_bound_addr == 0) { 458 ret = -ENOTCONN; /* XXX not a great errno */ 459 goto out; 460 } 461 462 if (args->nr_local > (uint64_t)UINT_MAX) { 463 ret = -EMSGSIZE; 464 goto out; 465 } 466 467 op = kmem_zalloc(offsetof(struct rdsv3_rdma_op, 468 r_rdma_sg[args->nr_local]), KM_NOSLEEP); 469 if (op == NULL) { 470 ret = -ENOMEM; 471 goto out; 472 } 473 474 op->r_write = !!(args->flags & RDS_RDMA_READWRITE); 475 op->r_fence = !!(args->flags & RDS_RDMA_FENCE); 476 op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); 477 op->r_recverr = rs->rs_recverr; 478 479 if (op->r_notify || op->r_recverr) { 480 /* 481 * We allocate an uninitialized notifier here, because 482 * we don't want to do that in the completion handler. We 483 * would have to use GFP_ATOMIC there, and don't want to deal 484 * with failed allocations. 485 */ 486 op->r_notifier = kmem_alloc(sizeof (struct rdsv3_notifier), 487 KM_NOSLEEP); 488 if (!op->r_notifier) { 489 ret = -ENOMEM; 490 goto out; 491 } 492 op->r_notifier->n_user_token = args->user_token; 493 op->r_notifier->n_status = RDS_RDMA_SUCCESS; 494 } 495 496 /* 497 * The cookie contains the R_Key of the remote memory region, and 498 * optionally an offset into it. This is how we implement RDMA into 499 * unaligned memory. 500 * When setting up the RDMA, we need to add that offset to the 501 * destination address (which is really an offset into the MR) 502 * FIXME: We may want to move this into ib_rdma.c 503 */ 504 op->r_key = rdsv3_rdma_cookie_key(args->cookie); 505 op->r_remote_addr = args->remote_vec.addr + 506 rdsv3_rdma_cookie_offset(args->cookie); 507 508 nr_bytes = 0; 509 510 RDSV3_DPRINTF5("rdsv3_rdma_prepare", 511 "RDS: rdma prepare nr_local %llu rva %llx rkey %x", 512 (unsigned long long)args->nr_local, 513 (unsigned long long)args->remote_vec.addr, 514 op->r_key); 515 516 local_vec = (struct rds_iovec *)(unsigned long) args->local_vec_addr; 517 518 /* pin the scatter list of user buffers */ 519 for (i = 0; i < args->nr_local; i++) { 520 if (ddi_copyin(&local_vec[i], &vec, 521 sizeof (struct rds_iovec), 0)) { 522 ret = -EFAULT; 523 goto out; 524 } 525 526 nr = rdsv3_pages_in_vec(&vec); 527 if (nr == 0) { 528 RDSV3_DPRINTF2("rdsv3_rdma_prepare", 529 "rdsv3_pages_in_vec returned 0"); 530 ret = -EINVAL; 531 goto out; 532 } 533 534 rs->rs_user_addr = vec.addr; 535 rs->rs_user_bytes = vec.bytes; 536 537 /* pin user memory pages */ 538 umem_len = ptob(btopr(vec.bytes + 539 ((uintptr_t)vec.addr & PAGEOFFSET))); 540 umem_addr = (caddr_t)((uintptr_t)vec.addr & ~PAGEOFFSET); 541 ret = umem_lockmemory(umem_addr, umem_len, 542 DDI_UMEMLOCK_WRITE | DDI_UMEMLOCK_READ, 543 &umem_cookie, NULL, NULL); 544 if (ret != 0) { 545 RDSV3_DPRINTF2("rdsv3_rdma_prepare", 546 "umem_lockmemory() returned %d", ret); 547 ret = -EFAULT; 548 goto out; 549 } 550 op->r_rdma_sg[i].umem_cookie = umem_cookie; 551 op->r_rdma_sg[i].iovec = vec; 552 nr_bytes += vec.bytes; 553 554 RDSV3_DPRINTF5("rdsv3_rdma_prepare", 555 "RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx", 556 nr_bytes, nr, vec.bytes, vec.addr); 557 } 558 op->r_nents = i; 559 560 if (nr_bytes > args->remote_vec.bytes) { 561 RDSV3_DPRINTF2("rdsv3_rdma_prepare", 562 "RDS nr_bytes %u remote_bytes %u do not match", 563 nr_bytes, (unsigned int) args->remote_vec.bytes); 564 ret = -EINVAL; 565 goto out; 566 } 567 op->r_bytes = nr_bytes; 568 569 ret = 0; 570 out: 571 if (ret) { 572 if (op) 573 rdsv3_rdma_free_op(op); 574 op = ERR_PTR(ret); 575 } 576 return (op); 577 } 578 579 #define CEIL(x, y) (((x) + (y) - 1) / (y)) 580 581 /* 582 * The application asks for a RDMA transfer. 583 * Extract all arguments and set up the rdma_op 584 */ 585 int 586 rdsv3_cmsg_rdma_args(struct rdsv3_sock *rs, struct rdsv3_message *rm, 587 struct cmsghdr *cmsg) 588 { 589 struct rdsv3_rdma_op *op; 590 /* uint64_t alignment on the buffer */ 591 uint64_t buf[CEIL(CMSG_LEN(sizeof (struct rds_rdma_args)), 592 sizeof (uint64_t))]; 593 594 if (cmsg->cmsg_len != CMSG_LEN(sizeof (struct rds_rdma_args)) || 595 rm->m_rdma_op != NULL) 596 return (-EINVAL); 597 598 ASSERT(sizeof (buf) >= cmsg->cmsg_len && ((uintptr_t)buf & 0x7) == 0); 599 600 bcopy(CMSG_DATA(cmsg), (char *)buf, cmsg->cmsg_len); 601 op = rdsv3_rdma_prepare(rs, (struct rds_rdma_args *)buf); 602 603 if (IS_ERR(op)) 604 return (PTR_ERR(op)); 605 rdsv3_stats_inc(s_send_rdma); 606 rm->m_rdma_op = op; 607 return (0); 608 } 609 610 /* 611 * The application wants us to pass an RDMA destination (aka MR) 612 * to the remote 613 */ 614 int 615 rdsv3_cmsg_rdma_dest(struct rdsv3_sock *rs, struct rdsv3_message *rm, 616 struct cmsghdr *cmsg) 617 { 618 struct rdsv3_mr *mr; 619 uint32_t r_key; 620 int err = 0; 621 622 if (cmsg->cmsg_len != CMSG_LEN(sizeof (rds_rdma_cookie_t)) || 623 rm->m_rdma_cookie != 0) 624 return (-EINVAL); 625 626 (void) memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), 627 sizeof (rm->m_rdma_cookie)); 628 629 /* 630 * We are reusing a previously mapped MR here. Most likely, the 631 * application has written to the buffer, so we need to explicitly 632 * flush those writes to RAM. Otherwise the HCA may not see them 633 * when doing a DMA from that buffer. 634 */ 635 r_key = rdsv3_rdma_cookie_key(rm->m_rdma_cookie); 636 637 mutex_enter(&rs->rs_rdma_lock); 638 mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); 639 if (!mr) 640 err = -EINVAL; /* invalid r_key */ 641 else 642 atomic_inc_32(&mr->r_refcount); 643 mutex_exit(&rs->rs_rdma_lock); 644 645 if (mr) { 646 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); 647 rm->m_rdma_mr = mr; 648 } 649 return (err); 650 } 651 652 /* 653 * The application passes us an address range it wants to enable RDMA 654 * to/from. We map the area, and save the <R_Key,offset> pair 655 * in rm->m_rdma_cookie. This causes it to be sent along to the peer 656 * in an extension header. 657 */ 658 int 659 rdsv3_cmsg_rdma_map(struct rdsv3_sock *rs, struct rdsv3_message *rm, 660 struct cmsghdr *cmsg) 661 { 662 /* uint64_t alignment on the buffer */ 663 uint64_t buf[CEIL(CMSG_LEN(sizeof (struct rds_get_mr_args)), 664 sizeof (uint64_t))]; 665 int status; 666 667 if (cmsg->cmsg_len != CMSG_LEN(sizeof (struct rds_get_mr_args)) || 668 rm->m_rdma_cookie != 0) 669 return (-EINVAL); 670 671 ASSERT(sizeof (buf) >= cmsg->cmsg_len && ((uintptr_t)buf & 0x7) == 0); 672 673 bcopy(CMSG_DATA(cmsg), (char *)buf, cmsg->cmsg_len); 674 status = __rdsv3_rdma_map(rs, (struct rds_get_mr_args *)buf, 675 &rm->m_rdma_cookie, &rm->m_rdma_mr); 676 677 return (status); 678 } 679