1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 #include <sys/types.h> 25 #include <sys/stream.h> 26 #include <sys/dlpi.h> 27 #include <sys/stropts.h> 28 #include <sys/strsun.h> 29 #include <sys/sysmacros.h> 30 #include <sys/strlog.h> 31 #include <sys/ddi.h> 32 #include <sys/cmn_err.h> 33 #include <sys/socket.h> 34 #include <net/if.h> 35 #include <net/if_types.h> 36 #include <netinet/in.h> 37 #include <sys/ethernet.h> 38 #include <inet/arp.h> 39 #include <inet/ip.h> 40 #include <inet/ip6.h> 41 #include <inet/ip_ire.h> 42 #include <inet/ip_if.h> 43 #include <inet/ip_ftable.h> 44 45 #include <sys/sunddi.h> 46 #include <sys/ksynch.h> 47 48 #include <sys/rds.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sockio.h> 52 #include <sys/sysmacros.h> 53 #include <inet/common.h> 54 #include <inet/ip.h> 55 #include <net/if_types.h> 56 57 #include <sys/ib/clients/rdsv3/rdsv3.h> 58 #include <sys/ib/clients/rdsv3/rdma.h> 59 #include <sys/ib/clients/rdsv3/ib.h> 60 #include <sys/ib/clients/rdsv3/rdsv3_impl.h> 61 #include <sys/ib/clients/rdsv3/rdsv3_debug.h> 62 63 #include <sys/dls.h> 64 #include <sys/mac.h> 65 #include <sys/mac_client.h> 66 #include <sys/mac_provider.h> 67 #include <sys/mac_client_priv.h> 68 69 uint_t rdsv3_one_sec_in_hz; 70 ddi_taskq_t *rdsv3_taskq = NULL; 71 extern kmem_cache_t *rdsv3_alloc_cache; 72 73 extern unsigned int ip_ocsum(ushort_t *address, int halfword_count, 74 unsigned int sum); 75 76 /* 77 * Check if the IP interface named by `lifrp' is RDS-capable. 78 */ 79 boolean_t 80 rdsv3_capable_interface(struct lifreq *lifrp) 81 { 82 char ifname[LIFNAMSIZ]; 83 char drv[MAXLINKNAMELEN]; 84 uint_t ppa; 85 char *cp; 86 87 RDSV3_DPRINTF4("rdsv3_capable_interface", "Enter"); 88 89 if (lifrp->lifr_type == IFT_IB) 90 return (B_TRUE); 91 92 /* 93 * Strip off the logical interface portion before getting 94 * intimate with the name. 95 */ 96 (void) strlcpy(ifname, lifrp->lifr_name, LIFNAMSIZ); 97 if ((cp = strchr(ifname, ':')) != NULL) 98 *cp = '\0'; 99 100 if (strcmp("lo0", ifname) == 0) { 101 /* 102 * loopback is considered RDS-capable 103 */ 104 return (B_TRUE); 105 } 106 107 return (ddi_parse(ifname, drv, &ppa) == DDI_SUCCESS && 108 rdsv3_if_lookup_by_name(drv)); 109 } 110 111 int 112 rdsv3_do_ip_ioctl(ksocket_t so4, void **ipaddrs, int *size, int *nifs) 113 { 114 struct lifnum lifn; 115 struct lifconf lifc; 116 struct lifreq *lp, *rlp, lifr; 117 int rval = 0; 118 int numifs; 119 int bufsize, rbufsize; 120 void *buf, *rbuf; 121 int i, j, n, rc; 122 123 *ipaddrs = NULL; 124 *size = 0; 125 *nifs = 0; 126 127 RDSV3_DPRINTF4("rdsv3_do_ip_ioctl", "Enter"); 128 129 retry_count: 130 /* snapshot the current number of interfaces */ 131 lifn.lifn_family = PF_UNSPEC; 132 lifn.lifn_flags = LIFC_NOXMIT | LIFC_TEMPORARY | LIFC_ALLZONES; 133 lifn.lifn_count = 0; 134 rval = ksocket_ioctl(so4, SIOCGLIFNUM, (intptr_t)&lifn, &rval, 135 CRED()); 136 if (rval != 0) { 137 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl", 138 "ksocket_ioctl returned: %d", rval); 139 return (rval); 140 } 141 142 numifs = lifn.lifn_count; 143 if (numifs <= 0) { 144 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl", "No interfaces found"); 145 return (0); 146 } 147 148 /* allocate extra room in case more interfaces appear */ 149 numifs += 10; 150 151 /* get the interface names and ip addresses */ 152 bufsize = numifs * sizeof (struct lifreq); 153 buf = kmem_alloc(bufsize, KM_SLEEP); 154 155 lifc.lifc_family = AF_UNSPEC; 156 lifc.lifc_flags = LIFC_NOXMIT | LIFC_TEMPORARY | LIFC_ALLZONES; 157 lifc.lifc_len = bufsize; 158 lifc.lifc_buf = buf; 159 rc = ksocket_ioctl(so4, SIOCGLIFCONF, (intptr_t)&lifc, &rval, CRED()); 160 if (rc != 0) { 161 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl", "SIOCGLIFCONF failed"); 162 kmem_free(buf, bufsize); 163 return (rc); 164 } 165 /* if our extra room is used up, try again */ 166 if (bufsize <= lifc.lifc_len) { 167 kmem_free(buf, bufsize); 168 buf = NULL; 169 goto retry_count; 170 } 171 /* calc actual number of ifconfs */ 172 n = lifc.lifc_len / sizeof (struct lifreq); 173 174 /* 175 * Count the RDS interfaces 176 */ 177 for (i = 0, j = 0, lp = lifc.lifc_req; i < n; i++, lp++) { 178 179 /* 180 * Copy as the SIOCGLIFFLAGS ioctl is destructive 181 */ 182 bcopy(lp, &lifr, sizeof (struct lifreq)); 183 /* 184 * fetch the flags using the socket of the correct family 185 */ 186 switch (lifr.lifr_addr.ss_family) { 187 case AF_INET: 188 rc = ksocket_ioctl(so4, SIOCGLIFFLAGS, (intptr_t)&lifr, 189 &rval, CRED()); 190 break; 191 default: 192 continue; 193 } 194 195 if (rc != 0) continue; 196 197 /* 198 * If we got the flags, skip uninteresting 199 * interfaces based on flags 200 */ 201 if ((lifr.lifr_flags & IFF_UP) != IFF_UP) 202 continue; 203 if (lifr.lifr_flags & 204 (IFF_ANYCAST|IFF_NOLOCAL|IFF_DEPRECATED)) 205 continue; 206 if (!rdsv3_capable_interface(&lifr)) 207 continue; 208 j++; 209 } 210 211 if (j <= 0) { 212 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl", "No RDS interfaces"); 213 kmem_free(buf, bufsize); 214 return (rval); 215 } 216 217 numifs = j; 218 219 /* This is the buffer we pass back */ 220 rbufsize = numifs * sizeof (struct lifreq); 221 rbuf = kmem_alloc(rbufsize, KM_SLEEP); 222 rlp = (struct lifreq *)rbuf; 223 224 /* 225 * Examine the array of interfaces and filter uninteresting ones 226 */ 227 for (i = 0, lp = lifc.lifc_req; i < n; i++, lp++) { 228 229 /* 230 * Copy the address as the SIOCGLIFFLAGS ioctl is destructive 231 */ 232 bcopy(lp, &lifr, sizeof (struct lifreq)); 233 /* 234 * fetch the flags using the socket of the correct family 235 */ 236 switch (lifr.lifr_addr.ss_family) { 237 case AF_INET: 238 rc = ksocket_ioctl(so4, SIOCGLIFFLAGS, (intptr_t)&lifr, 239 &rval, CRED()); 240 break; 241 default: 242 continue; 243 } 244 245 246 if (rc != 0) { 247 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl", 248 "ksocket_ioctl failed" " for %s", lifr.lifr_name); 249 continue; 250 } 251 252 /* 253 * If we got the flags, skip uninteresting 254 * interfaces based on flags 255 */ 256 if ((lifr.lifr_flags & IFF_UP) != IFF_UP) 257 continue; 258 if (lifr.lifr_flags & 259 (IFF_ANYCAST|IFF_NOLOCAL|IFF_DEPRECATED)) 260 continue; 261 if (!rdsv3_capable_interface(&lifr)) 262 continue; 263 264 /* save the record */ 265 bcopy(lp, rlp, sizeof (struct lifreq)); 266 rlp->lifr_addr.ss_family = AF_INET_OFFLOAD; 267 rlp++; 268 } 269 270 kmem_free(buf, bufsize); 271 272 *ipaddrs = rbuf; 273 *size = rbufsize; 274 *nifs = numifs; 275 276 RDSV3_DPRINTF4("rdsv3_do_ip_ioctl", "Return"); 277 278 return (rval); 279 } 280 281 /* 282 * Check if the IP interface named by `ifrp' is RDS-capable. 283 */ 284 boolean_t 285 rdsv3_capable_interface_old(struct ifreq *ifrp) 286 { 287 char ifname[IFNAMSIZ]; 288 char drv[MAXLINKNAMELEN]; 289 uint_t ppa; 290 char *cp; 291 292 RDSV3_DPRINTF4("rdsv3_capable_interface_old", "Enter"); 293 294 /* 295 * Strip off the logical interface portion before getting 296 * intimate with the name. 297 */ 298 (void) strlcpy(ifname, ifrp->ifr_name, IFNAMSIZ); 299 if ((cp = strchr(ifname, ':')) != NULL) 300 *cp = '\0'; 301 302 RDSV3_DPRINTF4("rdsv3_capable_interface_old", "ifname: %s", ifname); 303 304 if ((strcmp("lo0", ifname) == 0) || 305 (strncmp("ibd", ifname, 3) == 0)) { 306 /* 307 * loopback and IB are considered RDS-capable 308 */ 309 return (B_TRUE); 310 } 311 312 return (ddi_parse(ifname, drv, &ppa) == DDI_SUCCESS && 313 rdsv3_if_lookup_by_name(drv)); 314 } 315 316 int 317 rdsv3_do_ip_ioctl_old(ksocket_t so4, void **ipaddrs, int *size, int *nifs) 318 { 319 uint_t ifn; 320 struct ifconf ifc; 321 struct ifreq *lp, *rlp, ifr; 322 int rval = 0; 323 int numifs; 324 int bufsize, rbufsize; 325 void *buf, *rbuf; 326 int i, j, n, rc; 327 328 *ipaddrs = NULL; 329 *size = 0; 330 *nifs = 0; 331 332 RDSV3_DPRINTF4("rdsv3_do_ip_ioctl_old", "Enter"); 333 334 retry_count: 335 rval = ksocket_ioctl(so4, SIOCGIFNUM, (intptr_t)&ifn, &rval, 336 CRED()); 337 if (rval != 0) { 338 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl_old", 339 "ksocket_ioctl(SIOCGIFNUM) returned: %d", rval); 340 return (rval); 341 } 342 343 numifs = ifn; 344 if (numifs <= 0) { 345 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl_old", "No interfaces found"); 346 return (0); 347 } 348 349 /* allocate extra room in case more interfaces appear */ 350 numifs += 10; 351 352 /* get the interface names and ip addresses */ 353 bufsize = numifs * sizeof (struct ifreq); 354 buf = kmem_alloc(bufsize, KM_SLEEP); 355 356 ifc.ifc_len = bufsize; 357 ifc.ifc_buf = buf; 358 rc = ksocket_ioctl(so4, SIOCGIFCONF, (intptr_t)&ifc, &rval, CRED()); 359 if (rc != 0) { 360 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl_old", 361 "SIOCGLIFCONF failed: %d", rc); 362 kmem_free(buf, bufsize); 363 return (rc); 364 } 365 /* if our extra room is used up, try again */ 366 if (bufsize <= ifc.ifc_len) { 367 kmem_free(buf, bufsize); 368 buf = NULL; 369 goto retry_count; 370 } 371 /* calc actual number of ifconfs */ 372 n = ifc.ifc_len / sizeof (struct ifreq); 373 374 /* 375 * Count the RDS interfaces 376 */ 377 for (i = 0, j = 0, lp = ifc.ifc_req; i < n; i++, lp++) { 378 379 /* 380 * Copy as the SIOCGIFFLAGS ioctl is destructive 381 */ 382 bcopy(lp, &ifr, sizeof (struct ifreq)); 383 /* 384 * fetch the flags using the socket of the correct family 385 */ 386 switch (ifr.ifr_addr.sa_family) { 387 case AF_INET: 388 rc = ksocket_ioctl(so4, SIOCGIFFLAGS, (intptr_t)&ifr, 389 &rval, CRED()); 390 break; 391 default: 392 continue; 393 } 394 395 if (rc != 0) continue; 396 397 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl_old", 398 "1. ifr_name: %s, flags: %d", ifr.ifr_name, 399 (ushort_t)ifr.ifr_flags); 400 401 /* 402 * If we got the flags, skip uninteresting 403 * interfaces based on flags 404 */ 405 if ((((ushort_t)ifr.ifr_flags) & IFF_UP) != IFF_UP) 406 continue; 407 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl_old", 408 "2. ifr_name: %s, flags: %d", ifr.ifr_name, 409 (ushort_t)ifr.ifr_flags); 410 if (((ushort_t)ifr.ifr_flags) & 411 (IFF_ANYCAST|IFF_NOLOCAL|IFF_DEPRECATED)) 412 continue; 413 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl_old", 414 "3. ifr_name: %s, flags: %d", ifr.ifr_name, 415 (ushort_t)ifr.ifr_flags); 416 if (!rdsv3_capable_interface_old(&ifr)) 417 continue; 418 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl_old", 419 "4. ifr_name: %s, flags: %d", ifr.ifr_name, 420 (ushort_t)ifr.ifr_flags); 421 j++; 422 } 423 424 if (j <= 0) { 425 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl_old", "No RDS interfaces"); 426 kmem_free(buf, bufsize); 427 return (rval); 428 } 429 430 numifs = j; 431 432 /* This is the buffer we pass back */ 433 rbufsize = numifs * sizeof (struct ifreq); 434 rbuf = kmem_alloc(rbufsize, KM_SLEEP); 435 rlp = (struct ifreq *)rbuf; 436 437 /* 438 * Examine the array of interfaces and filter uninteresting ones 439 */ 440 for (i = 0, lp = ifc.ifc_req; i < n; i++, lp++) { 441 442 /* 443 * Copy the address as the SIOCGIFFLAGS ioctl is destructive 444 */ 445 bcopy(lp, &ifr, sizeof (struct ifreq)); 446 /* 447 * fetch the flags using the socket of the correct family 448 */ 449 switch (ifr.ifr_addr.sa_family) { 450 case AF_INET: 451 rc = ksocket_ioctl(so4, SIOCGIFFLAGS, (intptr_t)&ifr, 452 &rval, CRED()); 453 break; 454 default: 455 continue; 456 } 457 458 459 if (rc != 0) { 460 RDSV3_DPRINTF2("rdsv3_do_ip_ioctl_old", 461 "ksocket_ioctl failed: %d for %s", 462 rc, ifr.ifr_name); 463 continue; 464 } 465 466 /* 467 * If we got the flags, skip uninteresting 468 * interfaces based on flags 469 */ 470 if ((((ushort_t)ifr.ifr_flags) & IFF_UP) != IFF_UP) 471 continue; 472 if (((ushort_t)ifr.ifr_flags) & 473 (IFF_ANYCAST|IFF_NOLOCAL|IFF_DEPRECATED)) 474 continue; 475 if (!rdsv3_capable_interface_old(&ifr)) 476 continue; 477 478 /* save the record */ 479 bcopy(lp, rlp, sizeof (struct ifreq)); 480 rlp->ifr_addr.sa_family = AF_INET_OFFLOAD; 481 rlp++; 482 } 483 484 kmem_free(buf, bufsize); 485 486 *ipaddrs = rbuf; 487 *size = rbufsize; 488 *nifs = numifs; 489 490 RDSV3_DPRINTF4("rdsv3_do_ip_ioctl_old", "Return"); 491 492 return (rval); 493 } 494 495 boolean_t 496 rdsv3_isloopback(ipaddr_t addr) 497 { 498 ip_stack_t *ipst; 499 500 ipst = netstack_find_by_zoneid(GLOBAL_ZONEID)->netstack_ip; 501 ASSERT(ipst != NULL); 502 if (ip_type_v4(addr, ipst) != IRE_LOOPBACK) { 503 netstack_rele(ipst->ips_netstack); 504 return (B_FALSE); 505 } 506 netstack_rele(ipst->ips_netstack); 507 return (B_TRUE); 508 } 509 510 /* 511 * Work Queue Implementation 512 */ 513 514 #define RDSV3_WQ_THREAD_IDLE 0 515 #define RDSV3_WQ_THREAD_RUNNING 1 516 #define RDSV3_WQ_THREAD_FLUSHING 2 517 #define RDSV3_WQ_THREAD_EXITING 3 518 519 /* worker thread */ 520 void 521 rdsv3_worker_thread(void *arg) 522 { 523 rdsv3_workqueue_struct_t *wq = arg; 524 rdsv3_work_t *work; 525 526 RDSV3_DPRINTF4("rdsv3_worker_thread", "Enter(wq: 0x%p)", wq); 527 528 mutex_enter(&wq->wq_lock); 529 work = list_remove_head(&wq->wq_queue); 530 while (work) { 531 mutex_exit(&wq->wq_lock); 532 533 /* process work */ 534 work->func(work); 535 536 mutex_enter(&wq->wq_lock); 537 work = list_remove_head(&wq->wq_queue); 538 } 539 540 /* No more work, go home, until called again */ 541 if (wq->wq_state != RDSV3_WQ_THREAD_EXITING) { 542 wq->wq_state = RDSV3_WQ_THREAD_IDLE; 543 } 544 mutex_exit(&wq->wq_lock); 545 546 RDSV3_DPRINTF4("rdsv3_worker_thread", "Return(wq: 0x%p)", wq); 547 } 548 549 /* XXX */ 550 void 551 rdsv3_flush_workqueue(rdsv3_workqueue_struct_t *wq) 552 { 553 RDSV3_DPRINTF4("rdsv3_flush_workqueue", "Enter(wq: %p)", wq); 554 555 mutex_enter(&wq->wq_lock); 556 switch (wq->wq_state) { 557 case RDSV3_WQ_THREAD_IDLE: 558 /* nothing to do */ 559 ASSERT(list_is_empty(&wq->wq_queue)); 560 break; 561 562 case RDSV3_WQ_THREAD_RUNNING: 563 wq->wq_state = RDSV3_WQ_THREAD_FLUSHING; 564 /* FALLTHRU */ 565 case RDSV3_WQ_THREAD_FLUSHING: 566 /* already flushing, wait until the flushing is complete */ 567 do { 568 mutex_exit(&wq->wq_lock); 569 delay(drv_usectohz(1000000)); 570 mutex_enter(&wq->wq_lock); 571 } while (wq->wq_state == RDSV3_WQ_THREAD_FLUSHING); 572 break; 573 case RDSV3_WQ_THREAD_EXITING: 574 mutex_exit(&wq->wq_lock); 575 rdsv3_worker_thread(wq); 576 return; 577 } 578 mutex_exit(&wq->wq_lock); 579 580 RDSV3_DPRINTF4("rdsv3_flush_workqueue", "Return(wq: %p)", wq); 581 } 582 583 void 584 rdsv3_queue_work(rdsv3_workqueue_struct_t *wq, rdsv3_work_t *wp) 585 { 586 RDSV3_DPRINTF4("rdsv3_queue_work", "Enter(wq: %p, wp: %p)", wq, wp); 587 588 mutex_enter(&wq->wq_lock); 589 590 if (list_link_active(&wp->work_item)) { 591 /* This is already in the queue, ignore this call */ 592 mutex_exit(&wq->wq_lock); 593 RDSV3_DPRINTF3("rdsv3_queue_work", "already queued: %p", wp); 594 return; 595 } 596 597 switch (wq->wq_state) { 598 case RDSV3_WQ_THREAD_RUNNING: 599 list_insert_tail(&wq->wq_queue, wp); 600 mutex_exit(&wq->wq_lock); 601 break; 602 603 case RDSV3_WQ_THREAD_FLUSHING: 604 do { 605 mutex_exit(&wq->wq_lock); 606 delay(drv_usectohz(1000000)); 607 mutex_enter(&wq->wq_lock); 608 } while (wq->wq_state == RDSV3_WQ_THREAD_FLUSHING); 609 610 if (wq->wq_state == RDSV3_WQ_THREAD_RUNNING) { 611 list_insert_tail(&wq->wq_queue, wp); 612 mutex_exit(&wq->wq_lock); 613 break; 614 } 615 /* FALLTHRU */ 616 617 case RDSV3_WQ_THREAD_IDLE: 618 list_insert_tail(&wq->wq_queue, wp); 619 wq->wq_state = RDSV3_WQ_THREAD_RUNNING; 620 mutex_exit(&wq->wq_lock); 621 622 (void) ddi_taskq_dispatch(rdsv3_taskq, rdsv3_worker_thread, wq, 623 DDI_SLEEP); 624 break; 625 626 case RDSV3_WQ_THREAD_EXITING: 627 mutex_exit(&wq->wq_lock); 628 break; 629 } 630 631 RDSV3_DPRINTF4("rdsv3_queue_work", "Return(wq: %p, wp: %p)", wq, wp); 632 } 633 634 /* timeout handler for delayed work queuing */ 635 void 636 rdsv3_work_timeout_handler(void *arg) 637 { 638 rdsv3_delayed_work_t *dwp = (rdsv3_delayed_work_t *)arg; 639 640 RDSV3_DPRINTF4("rdsv3_work_timeout_handler", 641 "Enter(wq: %p, wp: %p)", dwp->wq, &dwp->work); 642 643 mutex_enter(&dwp->lock); 644 dwp->timeid = 0; 645 mutex_exit(&dwp->lock); 646 647 mutex_enter(&dwp->wq->wq_lock); 648 dwp->wq->wq_pending--; 649 if (dwp->wq->wq_state == RDSV3_WQ_THREAD_EXITING) { 650 mutex_exit(&dwp->wq->wq_lock); 651 return; 652 } 653 mutex_exit(&dwp->wq->wq_lock); 654 655 rdsv3_queue_work(dwp->wq, &dwp->work); 656 657 RDSV3_DPRINTF4("rdsv3_work_timeout_handler", 658 "Return(wq: %p, wp: %p)", dwp->wq, &dwp->work); 659 } 660 661 void 662 rdsv3_queue_delayed_work(rdsv3_workqueue_struct_t *wq, 663 rdsv3_delayed_work_t *dwp, uint_t delay) 664 { 665 RDSV3_DPRINTF4("rdsv3_queue_delayed_work", 666 "Enter(wq: %p, wp: %p)", wq, dwp); 667 668 if (delay == 0) { 669 rdsv3_queue_work(wq, &dwp->work); 670 return; 671 } 672 673 mutex_enter(&wq->wq_lock); 674 if (wq->wq_state == RDSV3_WQ_THREAD_EXITING) { 675 mutex_exit(&wq->wq_lock); 676 RDSV3_DPRINTF4("rdsv3_queue_delayed_work", 677 "WQ exiting - don't queue (wq: %p, wp: %p)", wq, dwp); 678 return; 679 } 680 wq->wq_pending++; 681 mutex_exit(&wq->wq_lock); 682 683 mutex_enter(&dwp->lock); 684 if (dwp->timeid == 0) { 685 dwp->wq = wq; 686 dwp->timeid = timeout(rdsv3_work_timeout_handler, dwp, 687 jiffies + (delay * rdsv3_one_sec_in_hz)); 688 mutex_exit(&dwp->lock); 689 } else { 690 mutex_exit(&dwp->lock); 691 RDSV3_DPRINTF4("rdsv3_queue_delayed_work", "Already queued: %p", 692 dwp); 693 mutex_enter(&wq->wq_lock); 694 wq->wq_pending--; 695 mutex_exit(&wq->wq_lock); 696 } 697 698 RDSV3_DPRINTF4("rdsv3_queue_delayed_work", 699 "Return(wq: %p, wp: %p)", wq, dwp); 700 } 701 702 void 703 rdsv3_cancel_delayed_work(rdsv3_delayed_work_t *dwp) 704 { 705 RDSV3_DPRINTF4("rdsv3_cancel_delayed_work", 706 "Enter(wq: %p, dwp: %p)", dwp->wq, dwp); 707 708 mutex_enter(&dwp->lock); 709 if (dwp->timeid != 0) { 710 (void) untimeout(dwp->timeid); 711 dwp->timeid = 0; 712 } else { 713 RDSV3_DPRINTF4("rdsv3_cancel_delayed_work", 714 "Nothing to cancel (wq: %p, dwp: %p)", dwp->wq, dwp); 715 mutex_exit(&dwp->lock); 716 return; 717 } 718 mutex_exit(&dwp->lock); 719 720 mutex_enter(&dwp->wq->wq_lock); 721 dwp->wq->wq_pending--; 722 mutex_exit(&dwp->wq->wq_lock); 723 724 RDSV3_DPRINTF4("rdsv3_cancel_delayed_work", 725 "Return(wq: %p, dwp: %p)", dwp->wq, dwp); 726 } 727 728 void 729 rdsv3_destroy_task_workqueue(rdsv3_workqueue_struct_t *wq) 730 { 731 RDSV3_DPRINTF2("rdsv3_destroy_workqueue", "Enter"); 732 733 ASSERT(wq); 734 735 mutex_enter(&wq->wq_lock); 736 wq->wq_state = RDSV3_WQ_THREAD_EXITING; 737 738 while (wq->wq_pending > 0) { 739 mutex_exit(&wq->wq_lock); 740 delay(drv_usectohz(1000000)); 741 mutex_enter(&wq->wq_lock); 742 }; 743 mutex_exit(&wq->wq_lock); 744 745 rdsv3_flush_workqueue(wq); 746 747 list_destroy(&wq->wq_queue); 748 mutex_destroy(&wq->wq_lock); 749 kmem_free(wq, sizeof (rdsv3_workqueue_struct_t)); 750 751 ASSERT(rdsv3_taskq); 752 ddi_taskq_destroy(rdsv3_taskq); 753 754 wq = NULL; 755 rdsv3_taskq = NULL; 756 757 RDSV3_DPRINTF2("rdsv3_destroy_workqueue", "Return"); 758 } 759 760 /* ARGSUSED */ 761 void 762 rdsv3_rdma_init_worker(struct rdsv3_work_s *work) 763 { 764 rdsv3_rdma_init(); 765 } 766 767 #define RDSV3_NUM_TASKQ_THREADS 1 768 rdsv3_workqueue_struct_t * 769 rdsv3_create_task_workqueue(char *name) 770 { 771 rdsv3_workqueue_struct_t *wq; 772 773 RDSV3_DPRINTF2("create_singlethread_workqueue", "Enter (dip: %p)", 774 rdsv3_dev_info); 775 776 rdsv3_taskq = ddi_taskq_create(rdsv3_dev_info, name, 777 RDSV3_NUM_TASKQ_THREADS, TASKQ_DEFAULTPRI, 0); 778 if (rdsv3_taskq == NULL) { 779 RDSV3_DPRINTF2(__FILE__, 780 "ddi_taskq_create failed for rdsv3_taskq"); 781 return (NULL); 782 } 783 784 wq = kmem_zalloc(sizeof (rdsv3_workqueue_struct_t), KM_NOSLEEP); 785 if (wq == NULL) { 786 RDSV3_DPRINTF2(__FILE__, "kmem_zalloc failed for wq"); 787 ddi_taskq_destroy(rdsv3_taskq); 788 return (NULL); 789 } 790 791 list_create(&wq->wq_queue, sizeof (struct rdsv3_work_s), 792 offsetof(struct rdsv3_work_s, work_item)); 793 mutex_init(&wq->wq_lock, NULL, MUTEX_DRIVER, NULL); 794 wq->wq_state = RDSV3_WQ_THREAD_IDLE; 795 wq->wq_pending = 0; 796 rdsv3_one_sec_in_hz = drv_usectohz(1000000); 797 798 RDSV3_DPRINTF2("create_singlethread_workqueue", "Return"); 799 800 return (wq); 801 } 802 803 /* 804 * Implementation for struct sock 805 */ 806 807 void 808 rdsv3_sock_exit_data(struct rsock *sk) 809 { 810 struct rdsv3_sock *rs = sk->sk_protinfo; 811 812 RDSV3_DPRINTF4("rdsv3_sock_exit_data", "rs: %p sk: %p", rs, sk); 813 814 ASSERT(rs != NULL); 815 ASSERT(rdsv3_sk_sock_flag(sk, SOCK_DEAD)); 816 817 rs->rs_sk = NULL; 818 819 list_destroy(&rs->rs_send_queue); 820 list_destroy(&rs->rs_notify_queue); 821 list_destroy(&rs->rs_recv_queue); 822 823 rw_destroy(&rs->rs_recv_lock); 824 mutex_destroy(&rs->rs_lock); 825 826 mutex_destroy(&rs->rs_rdma_lock); 827 avl_destroy(&rs->rs_rdma_keys); 828 829 mutex_destroy(&rs->rs_conn_lock); 830 mutex_destroy(&rs->rs_congested_lock); 831 cv_destroy(&rs->rs_congested_cv); 832 833 rdsv3_exit_waitqueue(sk->sk_sleep); 834 kmem_free(sk->sk_sleep, sizeof (rdsv3_wait_queue_t)); 835 mutex_destroy(&sk->sk_lock); 836 837 kmem_cache_free(rdsv3_alloc_cache, sk); 838 RDSV3_DPRINTF4("rdsv3_sock_exit_data", "rs: %p sk: %p", rs, sk); 839 } 840 841 /* XXX - figure out right values */ 842 #define RDSV3_RECV_HIWATER (256 * 1024) 843 #define RDSV3_RECV_LOWATER 128 844 #define RDSV3_XMIT_HIWATER (256 * 1024) 845 #define RDSV3_XMIT_LOWATER 1024 846 847 struct rsock * 848 rdsv3_sk_alloc() 849 { 850 struct rsock *sk; 851 852 sk = kmem_cache_alloc(rdsv3_alloc_cache, KM_SLEEP); 853 if (sk == NULL) { 854 RDSV3_DPRINTF2("rdsv3_create", "kmem_cache_alloc failed"); 855 return (NULL); 856 } 857 858 bzero(sk, sizeof (struct rsock) + sizeof (struct rdsv3_sock)); 859 return (sk); 860 } 861 862 void 863 rdsv3_sock_init_data(struct rsock *sk) 864 { 865 sk->sk_sleep = kmem_zalloc(sizeof (rdsv3_wait_queue_t), KM_SLEEP); 866 rdsv3_init_waitqueue(sk->sk_sleep); 867 868 mutex_init(&sk->sk_lock, NULL, MUTEX_DRIVER, NULL); 869 sk->sk_refcount = 1; 870 sk->sk_protinfo = (struct rdsv3_sock *)(sk + 1); 871 sk->sk_sndbuf = RDSV3_XMIT_HIWATER; 872 sk->sk_rcvbuf = RDSV3_RECV_HIWATER; 873 } 874 875 /* 876 * Connection cache 877 */ 878 /* ARGSUSED */ 879 int 880 rdsv3_conn_constructor(void *buf, void *arg, int kmflags) 881 { 882 struct rdsv3_connection *conn = buf; 883 884 bzero(conn, sizeof (struct rdsv3_connection)); 885 886 conn->c_next_tx_seq = 1; 887 mutex_init(&conn->c_lock, NULL, MUTEX_DRIVER, NULL); 888 mutex_init(&conn->c_send_lock, NULL, MUTEX_DRIVER, NULL); 889 conn->c_send_generation = 1; 890 conn->c_senders = 0; 891 892 list_create(&conn->c_send_queue, sizeof (struct rdsv3_message), 893 offsetof(struct rdsv3_message, m_conn_item)); 894 list_create(&conn->c_retrans, sizeof (struct rdsv3_message), 895 offsetof(struct rdsv3_message, m_conn_item)); 896 return (0); 897 } 898 899 /* ARGSUSED */ 900 void 901 rdsv3_conn_destructor(void *buf, void *arg) 902 { 903 struct rdsv3_connection *conn = buf; 904 905 ASSERT(list_is_empty(&conn->c_send_queue)); 906 ASSERT(list_is_empty(&conn->c_retrans)); 907 list_destroy(&conn->c_send_queue); 908 list_destroy(&conn->c_retrans); 909 mutex_destroy(&conn->c_send_lock); 910 mutex_destroy(&conn->c_lock); 911 } 912 913 int 914 rdsv3_conn_compare(const void *conn1, const void *conn2) 915 { 916 uint32_be_t laddr1, faddr1, laddr2, faddr2; 917 918 laddr1 = ((rdsv3_conn_info_t *)conn1)->c_laddr; 919 laddr2 = ((struct rdsv3_connection *)conn2)->c_laddr; 920 921 if (laddr1 == laddr2) { 922 faddr1 = ((rdsv3_conn_info_t *)conn1)->c_faddr; 923 faddr2 = ((struct rdsv3_connection *)conn2)->c_faddr; 924 if (faddr1 == faddr2) 925 return (0); 926 if (faddr1 < faddr2) 927 return (-1); 928 return (1); 929 } 930 931 if (laddr1 < laddr2) 932 return (-1); 933 934 return (1); 935 } 936 937 /* rdsv3_ib_incoming cache */ 938 /* ARGSUSED */ 939 int 940 rdsv3_ib_inc_constructor(void *buf, void *arg, int kmflags) 941 { 942 list_create(&((struct rdsv3_ib_incoming *)buf)->ii_frags, 943 sizeof (struct rdsv3_page_frag), 944 offsetof(struct rdsv3_page_frag, f_item)); 945 946 return (0); 947 } 948 949 /* ARGSUSED */ 950 void 951 rdsv3_ib_inc_destructor(void *buf, void *arg) 952 { 953 list_destroy(&((struct rdsv3_ib_incoming *)buf)->ii_frags); 954 } 955 956 /* ib_frag_slab cache */ 957 /* ARGSUSED */ 958 int 959 rdsv3_ib_frag_constructor(void *buf, void *arg, int kmflags) 960 { 961 struct rdsv3_page_frag *frag = (struct rdsv3_page_frag *)buf; 962 struct rdsv3_ib_device *rds_ibdev = (struct rdsv3_ib_device *)arg; 963 ibt_iov_attr_t iov_attr; 964 ibt_iov_t iov_arr[1]; 965 ibt_all_wr_t wr; 966 967 bzero(frag, sizeof (struct rdsv3_page_frag)); 968 list_link_init(&frag->f_item); 969 970 frag->f_page = kmem_alloc(PAGE_SIZE, kmflags); 971 if (frag->f_page == NULL) { 972 RDSV3_DPRINTF2("rdsv3_ib_frag_constructor", 973 "kmem_alloc for %d failed", PAGE_SIZE); 974 return (-1); 975 } 976 frag->f_offset = 0; 977 978 iov_attr.iov_as = NULL; 979 iov_attr.iov = &iov_arr[0]; 980 iov_attr.iov_buf = NULL; 981 iov_attr.iov_list_len = 1; 982 iov_attr.iov_wr_nds = 1; 983 iov_attr.iov_lso_hdr_sz = 0; 984 iov_attr.iov_flags = IBT_IOV_SLEEP | IBT_IOV_RECV; 985 986 iov_arr[0].iov_addr = frag->f_page; 987 iov_arr[0].iov_len = PAGE_SIZE; 988 989 wr.recv.wr_nds = 1; 990 wr.recv.wr_sgl = &frag->f_sge; 991 992 if (ibt_map_mem_iov(ib_get_ibt_hca_hdl(rds_ibdev->dev), 993 &iov_attr, &wr, &frag->f_mapped) != IBT_SUCCESS) { 994 RDSV3_DPRINTF2("rdsv3_ib_frag_constructor", 995 "ibt_map_mem_iov failed"); 996 kmem_free(frag->f_page, PAGE_SIZE); 997 return (-1); 998 } 999 1000 return (0); 1001 } 1002 1003 /* ARGSUSED */ 1004 void 1005 rdsv3_ib_frag_destructor(void *buf, void *arg) 1006 { 1007 struct rdsv3_page_frag *frag = (struct rdsv3_page_frag *)buf; 1008 struct rdsv3_ib_device *rds_ibdev = (struct rdsv3_ib_device *)arg; 1009 1010 /* unmap the page */ 1011 if (ibt_unmap_mem_iov(ib_get_ibt_hca_hdl(rds_ibdev->dev), 1012 frag->f_mapped) != IBT_SUCCESS) 1013 RDSV3_DPRINTF2("rdsv3_ib_frag_destructor", 1014 "ibt_unmap_mem_iov failed"); 1015 1016 /* free the page */ 1017 kmem_free(frag->f_page, PAGE_SIZE); 1018 } 1019 1020 /* loop.c */ 1021 extern kmutex_t loop_conns_lock; 1022 extern list_t loop_conns; 1023 1024 struct rdsv3_loop_connection 1025 { 1026 struct list_node loop_node; 1027 struct rdsv3_connection *conn; 1028 }; 1029 1030 void 1031 rdsv3_loop_init(void) 1032 { 1033 list_create(&loop_conns, sizeof (struct rdsv3_loop_connection), 1034 offsetof(struct rdsv3_loop_connection, loop_node)); 1035 mutex_init(&loop_conns_lock, NULL, MUTEX_DRIVER, NULL); 1036 } 1037 1038 /* rdma.c */ 1039 /* IB Rkey is used here for comparison */ 1040 int 1041 rdsv3_mr_compare(const void *mr1, const void *mr2) 1042 { 1043 uint32_t key1 = *(uint32_t *)mr1; 1044 uint32_t key2 = ((struct rdsv3_mr *)mr2)->r_key; 1045 1046 if (key1 < key2) 1047 return (-1); 1048 if (key1 > key2) 1049 return (1); 1050 return (0); 1051 } 1052 1053 /* transport.c */ 1054 extern struct rdsv3_transport *transports[]; 1055 extern krwlock_t trans_sem; 1056 1057 void 1058 rdsv3_trans_exit(void) 1059 { 1060 struct rdsv3_transport *trans; 1061 int i; 1062 1063 RDSV3_DPRINTF2("rdsv3_trans_exit", "Enter"); 1064 1065 /* currently, only IB transport */ 1066 rw_enter(&trans_sem, RW_READER); 1067 trans = NULL; 1068 for (i = 0; i < RDS_TRANS_COUNT; i++) { 1069 if (transports[i]) { 1070 trans = transports[i]; 1071 break; 1072 } 1073 } 1074 rw_exit(&trans_sem); 1075 1076 /* trans->exit() will remove the trans from the list */ 1077 if (trans) 1078 trans->exit(); 1079 1080 rw_destroy(&trans_sem); 1081 1082 RDSV3_DPRINTF2("rdsv3_trans_exit", "Return"); 1083 } 1084 1085 void 1086 rdsv3_trans_init() 1087 { 1088 RDSV3_DPRINTF2("rdsv3_trans_init", "Enter"); 1089 1090 rw_init(&trans_sem, NULL, RW_DRIVER, NULL); 1091 1092 RDSV3_DPRINTF2("rdsv3_trans_init", "Return"); 1093 } 1094 1095 int 1096 rdsv3_put_cmsg(struct nmsghdr *msg, int level, int type, size_t size, 1097 void *payload) 1098 { 1099 struct cmsghdr *cp; 1100 char *bp; 1101 size_t cmlen; 1102 size_t cmspace; 1103 size_t bufsz; 1104 1105 RDSV3_DPRINTF4("rdsv3_put_cmsg", 1106 "Enter(msg: %p level: %d type: %d sz: %d)", 1107 msg, level, type, size); 1108 1109 if (msg == NULL || msg->msg_controllen == 0) { 1110 return (0); 1111 } 1112 /* check for first cmsg or this is another cmsg to be appended */ 1113 if (msg->msg_control == NULL) 1114 msg->msg_controllen = 0; 1115 1116 cmlen = CMSG_LEN(size); 1117 cmspace = CMSG_SPACE(size); 1118 bufsz = msg->msg_controllen + cmspace; 1119 1120 /* extend the existing cmsg to append the next cmsg */ 1121 bp = kmem_alloc(bufsz, KM_SLEEP); 1122 if (msg->msg_control) { 1123 bcopy(msg->msg_control, bp, msg->msg_controllen); 1124 kmem_free(msg->msg_control, (size_t)msg->msg_controllen); 1125 } 1126 1127 /* assign payload the proper cmsg location */ 1128 cp = (struct cmsghdr *)(bp + msg->msg_controllen); 1129 cp->cmsg_len = cmlen; 1130 cp->cmsg_level = level; 1131 cp->cmsg_type = type; 1132 1133 bcopy(payload, CMSG_DATA(cp), cmlen - 1134 (unsigned int)_CMSG_DATA_ALIGN(sizeof (struct cmsghdr))); 1135 1136 msg->msg_control = bp; 1137 msg->msg_controllen = bufsz; 1138 1139 RDSV3_DPRINTF4("rdsv3_put_cmsg", "Return(cmsg_len: %d)", cp->cmsg_len); 1140 1141 return (0); 1142 } 1143 1144 /* ARGSUSED */ 1145 int 1146 rdsv3_verify_bind_address(ipaddr_t addr) 1147 { 1148 return (1); 1149 } 1150 1151 /* checksum */ 1152 uint16_t 1153 rdsv3_ip_fast_csum(void *hdr, size_t length) 1154 { 1155 return (0xffff & 1156 (uint16_t)(~ip_ocsum((ushort_t *)hdr, (int)length <<1, 0))); 1157 } 1158 1159 /* scatterlist implementation */ 1160 /* ARGSUSED */ 1161 caddr_t 1162 rdsv3_ib_sg_dma_address(ib_device_t *dev, struct rdsv3_scatterlist *scat, 1163 uint_t offset) 1164 { 1165 return (0); 1166 } 1167 1168 uint_t 1169 rdsv3_ib_dma_map_sg(struct ib_device *dev, struct rdsv3_scatterlist *scat, 1170 uint_t num) 1171 { 1172 struct rdsv3_scatterlist *s, *first; 1173 ibt_iov_t *iov; 1174 ibt_wr_ds_t *sgl; 1175 ibt_iov_attr_t iov_attr; 1176 ibt_send_wr_t swr; 1177 uint_t i; 1178 1179 RDSV3_DPRINTF4("rdsv3_ib_dma_map_sg", "scat %p, num: %d", scat, num); 1180 1181 s = first = &scat[0]; 1182 ASSERT(first->mihdl == NULL); 1183 1184 iov = kmem_alloc(num * sizeof (ibt_iov_t), KM_SLEEP); 1185 sgl = kmem_zalloc((num * 2) * sizeof (ibt_wr_ds_t), KM_SLEEP); 1186 1187 for (i = 0; i < num; i++, s++) { 1188 iov[i].iov_addr = s->vaddr; 1189 iov[i].iov_len = s->length; 1190 } 1191 1192 iov_attr.iov_as = NULL; 1193 iov_attr.iov = iov; 1194 iov_attr.iov_buf = NULL; 1195 iov_attr.iov_list_len = num; 1196 iov_attr.iov_wr_nds = num * 2; 1197 iov_attr.iov_lso_hdr_sz = 0; 1198 iov_attr.iov_flags = IBT_IOV_SLEEP; 1199 1200 swr.wr_sgl = sgl; 1201 1202 i = ibt_map_mem_iov(ib_get_ibt_hca_hdl(dev), 1203 &iov_attr, (ibt_all_wr_t *)&swr, &first->mihdl); 1204 kmem_free(iov, num * sizeof (ibt_iov_t)); 1205 if (i != IBT_SUCCESS) { 1206 RDSV3_DPRINTF2("rdsv3_ib_dma_map_sg", 1207 "ibt_map_mem_iov returned: %d", i); 1208 return (0); 1209 } 1210 1211 s = first; 1212 for (i = 0; i < num; i++, s++, sgl++) { 1213 s->sgl = sgl; 1214 } 1215 1216 return (num); 1217 } 1218 1219 void 1220 rdsv3_ib_dma_unmap_sg(ib_device_t *dev, struct rdsv3_scatterlist *scat, 1221 uint_t num) 1222 { 1223 /* Zero length messages have no scatter gather entries */ 1224 if (num != 0) { 1225 ASSERT(scat->mihdl != NULL); 1226 ASSERT(scat->sgl != NULL); 1227 1228 (void) ibt_unmap_mem_iov(ib_get_ibt_hca_hdl(dev), scat->mihdl); 1229 1230 kmem_free(scat->sgl, (num * 2) * sizeof (ibt_wr_ds_t)); 1231 scat->sgl = NULL; 1232 scat->mihdl = NULL; 1233 } 1234 } 1235 1236 int 1237 rdsv3_ib_alloc_hdrs(ib_device_t *dev, struct rdsv3_ib_connection *ic) 1238 { 1239 caddr_t addr; 1240 size_t size; 1241 ibt_mr_attr_t mr_attr; 1242 ibt_mr_desc_t mr_desc; 1243 ibt_mr_hdl_t mr_hdl; 1244 int ret; 1245 1246 RDSV3_DPRINTF4("rdsv3_ib_alloc_hdrs", "Enter(dev: %p)", dev); 1247 1248 ASSERT(ic->i_mr == NULL); 1249 1250 size = (ic->i_send_ring.w_nr + ic->i_recv_ring.w_nr + 1) * 1251 sizeof (struct rdsv3_header); 1252 1253 addr = kmem_zalloc(size, KM_NOSLEEP); 1254 if (addr == NULL) 1255 return (-1); 1256 1257 mr_attr.mr_vaddr = (ib_vaddr_t)(uintptr_t)addr; 1258 mr_attr.mr_len = size; 1259 mr_attr.mr_as = NULL; 1260 mr_attr.mr_flags = IBT_MR_ENABLE_LOCAL_WRITE; 1261 ret = ibt_register_mr(ib_get_ibt_hca_hdl(dev), RDSV3_PD2PDHDL(ic->i_pd), 1262 &mr_attr, &mr_hdl, &mr_desc); 1263 if (ret != IBT_SUCCESS) { 1264 RDSV3_DPRINTF2("rdsv3_ib_alloc_hdrs", 1265 "ibt_register_mr returned: " "%d", ret); 1266 return (-1); 1267 } 1268 1269 ic->i_mr = 1270 (struct rdsv3_hdrs_mr *)kmem_alloc(sizeof (struct rdsv3_hdrs_mr), 1271 KM_SLEEP); 1272 ic->i_mr->addr = addr; 1273 ic->i_mr->size = size; 1274 ic->i_mr->hdl = mr_hdl; 1275 ic->i_mr->lkey = mr_desc.md_lkey; 1276 1277 ic->i_send_hdrs = (struct rdsv3_header *)addr; 1278 ic->i_send_hdrs_dma = (uint64_t)(uintptr_t)addr; 1279 1280 ic->i_recv_hdrs = (struct rdsv3_header *)(addr + 1281 (ic->i_send_ring.w_nr * sizeof (struct rdsv3_header))); 1282 ic->i_recv_hdrs_dma = (uint64_t)(uintptr_t)(addr + 1283 (ic->i_send_ring.w_nr * sizeof (struct rdsv3_header))); 1284 1285 ic->i_ack = (struct rdsv3_header *)(addr + 1286 ((ic->i_send_ring.w_nr + ic->i_recv_ring.w_nr) * 1287 sizeof (struct rdsv3_header))); 1288 ic->i_ack_dma = (uint64_t)(uintptr_t)(addr + 1289 ((ic->i_send_ring.w_nr + ic->i_recv_ring.w_nr) * 1290 sizeof (struct rdsv3_header))); 1291 1292 RDSV3_DPRINTF4("rdsv3_ib_alloc_hdrs", "Return(dev: %p)", dev); 1293 1294 return (0); 1295 } 1296 1297 void 1298 rdsv3_ib_free_hdrs(ib_device_t *dev, struct rdsv3_ib_connection *ic) 1299 { 1300 RDSV3_DPRINTF4("rdsv3_ib_free_hdrs", "Enter(dev: %p)", dev); 1301 ASSERT(ic->i_mr != NULL); 1302 1303 ic->i_send_hdrs = NULL; 1304 ic->i_send_hdrs_dma = 0; 1305 1306 ic->i_recv_hdrs = NULL; 1307 ic->i_recv_hdrs_dma = 0; 1308 1309 ic->i_ack = NULL; 1310 ic->i_ack_dma = 0; 1311 1312 (void) ibt_deregister_mr(ib_get_ibt_hca_hdl(dev), ic->i_mr->hdl); 1313 1314 kmem_free(ic->i_mr->addr, ic->i_mr->size); 1315 kmem_free(ic->i_mr, sizeof (struct rdsv3_hdrs_mr)); 1316 1317 ic->i_mr = NULL; 1318 RDSV3_DPRINTF4("rdsv3_ib_free_hdrs", "Return(dev: %p)", dev); 1319 } 1320 1321 /* 1322 * atomic_add_unless - add unless the number is a given value 1323 * @v: pointer of type atomic_t 1324 * @a: the amount to add to v... 1325 * @u: ...unless v is equal to u. 1326 * 1327 * Atomically adds @a to @v, so long as it was not @u. 1328 * Returns non-zero if @v was not @u, and zero otherwise. 1329 */ 1330 int 1331 atomic_add_unless(atomic_t *v, uint_t a, ulong_t u) 1332 { 1333 uint_t c, old; 1334 1335 c = *v; 1336 while (c != u && (old = atomic_cas_uint(v, c, c + a)) != c) { 1337 c = old; 1338 } 1339 return ((ulong_t)c != u); 1340 } 1341