1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Topspin Communications. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "ipoib.h" 41 #include <sys/eventhandler.h> 42 43 #include <linux/module.h> 44 45 #include <linux/slab.h> 46 #include <linux/kernel.h> 47 #include <linux/vmalloc.h> 48 49 #include <linux/if_vlan.h> 50 51 #include <net/infiniband.h> 52 53 #include <rdma/ib_cache.h> 54 55 MODULE_AUTHOR("Roland Dreier"); 56 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 57 MODULE_LICENSE("Dual BSD/GPL"); 58 59 int ipoib_sendq_size = IPOIB_TX_RING_SIZE; 60 int ipoib_recvq_size = IPOIB_RX_RING_SIZE; 61 62 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 63 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 64 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 65 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 66 67 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 68 int ipoib_debug_level = 1; 69 70 module_param_named(debug_level, ipoib_debug_level, int, 0644); 71 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 72 #endif 73 74 struct ipoib_path_iter { 75 struct ipoib_dev_priv *priv; 76 struct ipoib_path path; 77 }; 78 79 static const u8 ipv4_bcast_addr[] = { 80 0x00, 0xff, 0xff, 0xff, 81 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 82 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 83 }; 84 85 struct workqueue_struct *ipoib_workqueue; 86 87 struct ib_sa_client ipoib_sa_client; 88 89 static void ipoib_add_one(struct ib_device *device); 90 static void ipoib_remove_one(struct ib_device *device, void *client_data); 91 static struct net_device *ipoib_get_net_dev_by_params( 92 struct ib_device *dev, u8 port, u16 pkey, 93 const union ib_gid *gid, const struct sockaddr *addr, 94 void *client_data); 95 static void ipoib_start(struct ifnet *dev); 96 static int ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data); 97 98 static struct unrhdr *ipoib_unrhdr; 99 100 static void 101 ipoib_unrhdr_init(void *arg) 102 { 103 104 ipoib_unrhdr = new_unrhdr(0, 65535, NULL); 105 } 106 SYSINIT(ipoib_unrhdr_init, SI_SUB_KLD - 1, SI_ORDER_ANY, ipoib_unrhdr_init, NULL); 107 108 static void 109 ipoib_unrhdr_uninit(void *arg) 110 { 111 112 if (ipoib_unrhdr != NULL) { 113 struct unrhdr *hdr; 114 115 hdr = ipoib_unrhdr; 116 ipoib_unrhdr = NULL; 117 118 delete_unrhdr(hdr); 119 } 120 } 121 SYSUNINIT(ipoib_unrhdr_uninit, SI_SUB_KLD - 1, SI_ORDER_ANY, ipoib_unrhdr_uninit, NULL); 122 123 static struct ib_client ipoib_client = { 124 .name = "ipoib", 125 .add = ipoib_add_one, 126 .remove = ipoib_remove_one, 127 .get_net_dev_by_params = ipoib_get_net_dev_by_params, 128 }; 129 130 int 131 ipoib_open(struct ipoib_dev_priv *priv) 132 { 133 struct ifnet *dev = priv->dev; 134 135 ipoib_dbg(priv, "bringing up interface\n"); 136 137 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 138 139 if (ipoib_pkey_dev_delay_open(priv)) 140 return 0; 141 142 if (ipoib_ib_dev_open(priv)) 143 goto err_disable; 144 145 if (ipoib_ib_dev_up(priv)) 146 goto err_stop; 147 148 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 149 struct ipoib_dev_priv *cpriv; 150 151 /* Bring up any child interfaces too */ 152 mutex_lock(&priv->vlan_mutex); 153 list_for_each_entry(cpriv, &priv->child_intfs, list) 154 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 155 ipoib_open(cpriv); 156 mutex_unlock(&priv->vlan_mutex); 157 } 158 dev->if_drv_flags |= IFF_DRV_RUNNING; 159 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 160 161 return 0; 162 163 err_stop: 164 ipoib_ib_dev_stop(priv, 1); 165 166 err_disable: 167 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 168 169 return -EINVAL; 170 } 171 172 static void 173 ipoib_init(void *arg) 174 { 175 struct ifnet *dev; 176 struct ipoib_dev_priv *priv; 177 178 priv = arg; 179 dev = priv->dev; 180 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 181 ipoib_open(priv); 182 queue_work(ipoib_workqueue, &priv->flush_light); 183 } 184 185 186 static int 187 ipoib_stop(struct ipoib_dev_priv *priv) 188 { 189 struct ifnet *dev = priv->dev; 190 191 ipoib_dbg(priv, "stopping interface\n"); 192 193 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 194 195 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 196 197 ipoib_ib_dev_down(priv, 0); 198 ipoib_ib_dev_stop(priv, 0); 199 200 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 201 struct ipoib_dev_priv *cpriv; 202 203 /* Bring down any child interfaces too */ 204 mutex_lock(&priv->vlan_mutex); 205 list_for_each_entry(cpriv, &priv->child_intfs, list) 206 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) != 0) 207 ipoib_stop(cpriv); 208 mutex_unlock(&priv->vlan_mutex); 209 } 210 211 return 0; 212 } 213 214 static int 215 ipoib_propagate_ifnet_mtu(struct ipoib_dev_priv *priv, int new_mtu, 216 bool propagate) 217 { 218 struct ifnet *ifp; 219 struct ifreq ifr; 220 int error; 221 222 ifp = priv->dev; 223 if (ifp->if_mtu == new_mtu) 224 return (0); 225 if (propagate) { 226 strlcpy(ifr.ifr_name, if_name(ifp), IFNAMSIZ); 227 ifr.ifr_mtu = new_mtu; 228 CURVNET_SET(ifp->if_vnet); 229 error = ifhwioctl(SIOCSIFMTU, ifp, (caddr_t)&ifr, curthread); 230 CURVNET_RESTORE(); 231 } else { 232 ifp->if_mtu = new_mtu; 233 error = 0; 234 } 235 return (error); 236 } 237 238 int 239 ipoib_change_mtu(struct ipoib_dev_priv *priv, int new_mtu, bool propagate) 240 { 241 int error, prev_admin_mtu; 242 243 /* dev->if_mtu > 2K ==> connected mode */ 244 if (ipoib_cm_admin_enabled(priv)) { 245 if (new_mtu > IPOIB_CM_MTU(ipoib_cm_max_mtu(priv))) 246 return -EINVAL; 247 248 if (new_mtu > priv->mcast_mtu) 249 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 250 priv->mcast_mtu); 251 252 return (ipoib_propagate_ifnet_mtu(priv, new_mtu, propagate)); 253 } 254 255 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 256 return -EINVAL; 257 258 prev_admin_mtu = priv->admin_mtu; 259 priv->admin_mtu = new_mtu; 260 error = ipoib_propagate_ifnet_mtu(priv, min(priv->mcast_mtu, 261 priv->admin_mtu), propagate); 262 if (error == 0) { 263 /* check for MTU change to avoid infinite loop */ 264 if (prev_admin_mtu != new_mtu) 265 queue_work(ipoib_workqueue, &priv->flush_light); 266 } else 267 priv->admin_mtu = prev_admin_mtu; 268 return (error); 269 } 270 271 static int 272 ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 273 { 274 struct ipoib_dev_priv *priv = ifp->if_softc; 275 struct ifaddr *ifa = (struct ifaddr *) data; 276 struct ifreq *ifr = (struct ifreq *) data; 277 int error = 0; 278 279 /* check if detaching */ 280 if (priv == NULL || priv->gone != 0) 281 return (ENXIO); 282 283 switch (command) { 284 case SIOCSIFFLAGS: 285 if (ifp->if_flags & IFF_UP) { 286 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 287 error = -ipoib_open(priv); 288 } else 289 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 290 ipoib_stop(priv); 291 break; 292 case SIOCADDMULTI: 293 case SIOCDELMULTI: 294 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 295 queue_work(ipoib_workqueue, &priv->restart_task); 296 break; 297 case SIOCSIFADDR: 298 ifp->if_flags |= IFF_UP; 299 300 switch (ifa->ifa_addr->sa_family) { 301 #ifdef INET 302 case AF_INET: 303 ifp->if_init(ifp->if_softc); /* before arpwhohas */ 304 arp_ifinit(ifp, ifa); 305 break; 306 #endif 307 default: 308 ifp->if_init(ifp->if_softc); 309 break; 310 } 311 break; 312 313 case SIOCGIFADDR: 314 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0], 315 INFINIBAND_ALEN); 316 break; 317 318 case SIOCSIFMTU: 319 /* 320 * Set the interface MTU. 321 */ 322 error = -ipoib_change_mtu(priv, ifr->ifr_mtu, false); 323 break; 324 default: 325 error = EINVAL; 326 break; 327 } 328 return (error); 329 } 330 331 332 static struct ipoib_path * 333 __path_find(struct ipoib_dev_priv *priv, void *gid) 334 { 335 struct rb_node *n = priv->path_tree.rb_node; 336 struct ipoib_path *path; 337 int ret; 338 339 while (n) { 340 path = rb_entry(n, struct ipoib_path, rb_node); 341 342 ret = memcmp(gid, path->pathrec.dgid.raw, 343 sizeof (union ib_gid)); 344 345 if (ret < 0) 346 n = n->rb_left; 347 else if (ret > 0) 348 n = n->rb_right; 349 else 350 return path; 351 } 352 353 return NULL; 354 } 355 356 static int 357 __path_add(struct ipoib_dev_priv *priv, struct ipoib_path *path) 358 { 359 struct rb_node **n = &priv->path_tree.rb_node; 360 struct rb_node *pn = NULL; 361 struct ipoib_path *tpath; 362 int ret; 363 364 while (*n) { 365 pn = *n; 366 tpath = rb_entry(pn, struct ipoib_path, rb_node); 367 368 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 369 sizeof (union ib_gid)); 370 if (ret < 0) 371 n = &pn->rb_left; 372 else if (ret > 0) 373 n = &pn->rb_right; 374 else 375 return -EEXIST; 376 } 377 378 rb_link_node(&path->rb_node, pn, n); 379 rb_insert_color(&path->rb_node, &priv->path_tree); 380 381 list_add_tail(&path->list, &priv->path_list); 382 383 return 0; 384 } 385 386 void 387 ipoib_path_free(struct ipoib_dev_priv *priv, struct ipoib_path *path) 388 { 389 390 _IF_DRAIN(&path->queue); 391 392 if (path->ah) 393 ipoib_put_ah(path->ah); 394 if (ipoib_cm_get(path)) 395 ipoib_cm_destroy_tx(ipoib_cm_get(path)); 396 397 kfree(path); 398 } 399 400 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 401 402 struct ipoib_path_iter * 403 ipoib_path_iter_init(struct ipoib_dev_priv *priv) 404 { 405 struct ipoib_path_iter *iter; 406 407 iter = kmalloc(sizeof *iter, GFP_KERNEL); 408 if (!iter) 409 return NULL; 410 411 iter->priv = priv; 412 memset(iter->path.pathrec.dgid.raw, 0, 16); 413 414 if (ipoib_path_iter_next(iter)) { 415 kfree(iter); 416 return NULL; 417 } 418 419 return iter; 420 } 421 422 int 423 ipoib_path_iter_next(struct ipoib_path_iter *iter) 424 { 425 struct ipoib_dev_priv *priv = iter->priv; 426 struct rb_node *n; 427 struct ipoib_path *path; 428 int ret = 1; 429 430 spin_lock_irq(&priv->lock); 431 432 n = rb_first(&priv->path_tree); 433 434 while (n) { 435 path = rb_entry(n, struct ipoib_path, rb_node); 436 437 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 438 sizeof (union ib_gid)) < 0) { 439 iter->path = *path; 440 ret = 0; 441 break; 442 } 443 444 n = rb_next(n); 445 } 446 447 spin_unlock_irq(&priv->lock); 448 449 return ret; 450 } 451 452 void 453 ipoib_path_iter_read(struct ipoib_path_iter *iter, struct ipoib_path *path) 454 { 455 *path = iter->path; 456 } 457 458 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 459 460 void 461 ipoib_mark_paths_invalid(struct ipoib_dev_priv *priv) 462 { 463 struct ipoib_path *path, *tp; 464 465 spin_lock_irq(&priv->lock); 466 467 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 468 ipoib_dbg(priv, "mark path LID 0x%04x GID %16D invalid\n", 469 be16_to_cpu(path->pathrec.dlid), 470 path->pathrec.dgid.raw, ":"); 471 path->valid = 0; 472 } 473 474 spin_unlock_irq(&priv->lock); 475 } 476 477 void 478 ipoib_flush_paths(struct ipoib_dev_priv *priv) 479 { 480 struct ipoib_path *path, *tp; 481 LIST_HEAD(remove_list); 482 unsigned long flags; 483 484 spin_lock_irqsave(&priv->lock, flags); 485 486 list_splice_init(&priv->path_list, &remove_list); 487 488 list_for_each_entry(path, &remove_list, list) 489 rb_erase(&path->rb_node, &priv->path_tree); 490 491 list_for_each_entry_safe(path, tp, &remove_list, list) { 492 if (path->query) 493 ib_sa_cancel_query(path->query_id, path->query); 494 spin_unlock_irqrestore(&priv->lock, flags); 495 wait_for_completion(&path->done); 496 ipoib_path_free(priv, path); 497 spin_lock_irqsave(&priv->lock, flags); 498 } 499 500 spin_unlock_irqrestore(&priv->lock, flags); 501 } 502 503 static void 504 path_rec_completion(int status, struct ib_sa_path_rec *pathrec, void *path_ptr) 505 { 506 struct ipoib_path *path = path_ptr; 507 struct ipoib_dev_priv *priv = path->priv; 508 struct ifnet *dev = priv->dev; 509 struct ipoib_ah *ah = NULL; 510 struct ipoib_ah *old_ah = NULL; 511 struct epoch_tracker et; 512 struct ifqueue mbqueue; 513 struct mbuf *mb; 514 unsigned long flags; 515 516 if (!status) 517 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %16D\n", 518 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw, ":"); 519 else 520 ipoib_dbg(priv, "PathRec status %d for GID %16D\n", 521 status, path->pathrec.dgid.raw, ":"); 522 523 bzero(&mbqueue, sizeof(mbqueue)); 524 525 if (!status) { 526 struct ib_ah_attr av; 527 528 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) 529 ah = ipoib_create_ah(priv, priv->pd, &av); 530 } 531 532 spin_lock_irqsave(&priv->lock, flags); 533 534 if (ah) { 535 path->pathrec = *pathrec; 536 537 old_ah = path->ah; 538 path->ah = ah; 539 540 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 541 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 542 543 for (;;) { 544 _IF_DEQUEUE(&path->queue, mb); 545 if (mb == NULL) 546 break; 547 _IF_ENQUEUE(&mbqueue, mb); 548 } 549 550 #ifdef CONFIG_INFINIBAND_IPOIB_CM 551 if (ipoib_cm_enabled(priv, path->hwaddr) && !ipoib_cm_get(path)) 552 ipoib_cm_set(path, ipoib_cm_create_tx(priv, path)); 553 #endif 554 555 path->valid = 1; 556 } 557 558 path->query = NULL; 559 complete(&path->done); 560 561 spin_unlock_irqrestore(&priv->lock, flags); 562 563 if (old_ah) 564 ipoib_put_ah(old_ah); 565 566 NET_EPOCH_ENTER(et); 567 for (;;) { 568 _IF_DEQUEUE(&mbqueue, mb); 569 if (mb == NULL) 570 break; 571 mb->m_pkthdr.rcvif = dev; 572 if (dev->if_transmit(dev, mb)) 573 ipoib_warn(priv, "dev_queue_xmit failed " 574 "to requeue packet\n"); 575 } 576 NET_EPOCH_EXIT(et); 577 } 578 579 static struct ipoib_path * 580 path_rec_create(struct ipoib_dev_priv *priv, uint8_t *hwaddr) 581 { 582 struct ipoib_path *path; 583 584 if (!priv->broadcast) 585 return NULL; 586 587 path = kzalloc(sizeof *path, GFP_ATOMIC); 588 if (!path) 589 return NULL; 590 591 path->priv = priv; 592 593 bzero(&path->queue, sizeof(path->queue)); 594 595 #ifdef CONFIG_INFINIBAND_IPOIB_CM 596 memcpy(&path->hwaddr, hwaddr, INFINIBAND_ALEN); 597 #endif 598 memcpy(path->pathrec.dgid.raw, &hwaddr[4], sizeof (union ib_gid)); 599 path->pathrec.sgid = priv->local_gid; 600 path->pathrec.pkey = cpu_to_be16(priv->pkey); 601 path->pathrec.numb_path = 1; 602 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 603 604 return path; 605 } 606 607 static int 608 path_rec_start(struct ipoib_dev_priv *priv, struct ipoib_path *path) 609 { 610 struct ifnet *dev = priv->dev; 611 612 ib_sa_comp_mask comp_mask = IB_SA_PATH_REC_MTU_SELECTOR | IB_SA_PATH_REC_MTU; 613 struct ib_sa_path_rec p_rec; 614 615 p_rec = path->pathrec; 616 p_rec.mtu_selector = IB_SA_GT; 617 618 switch (roundup_pow_of_two(dev->if_mtu + IPOIB_ENCAP_LEN)) { 619 case 512: 620 p_rec.mtu = IB_MTU_256; 621 break; 622 case 1024: 623 p_rec.mtu = IB_MTU_512; 624 break; 625 case 2048: 626 p_rec.mtu = IB_MTU_1024; 627 break; 628 case 4096: 629 p_rec.mtu = IB_MTU_2048; 630 break; 631 default: 632 /* Wildcard everything */ 633 comp_mask = 0; 634 p_rec.mtu = 0; 635 p_rec.mtu_selector = 0; 636 } 637 638 ipoib_dbg(priv, "Start path record lookup for %16D MTU > %d\n", 639 p_rec.dgid.raw, ":", 640 comp_mask ? ib_mtu_enum_to_int(p_rec.mtu) : 0); 641 642 init_completion(&path->done); 643 644 path->query_id = 645 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 646 &p_rec, comp_mask | 647 IB_SA_PATH_REC_DGID | 648 IB_SA_PATH_REC_SGID | 649 IB_SA_PATH_REC_NUMB_PATH | 650 IB_SA_PATH_REC_TRAFFIC_CLASS | 651 IB_SA_PATH_REC_PKEY, 652 1000, GFP_ATOMIC, 653 path_rec_completion, 654 path, &path->query); 655 if (path->query_id < 0) { 656 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 657 path->query = NULL; 658 complete(&path->done); 659 return path->query_id; 660 } 661 662 return 0; 663 } 664 665 static void 666 ipoib_unicast_send(struct mbuf *mb, struct ipoib_dev_priv *priv, struct ipoib_header *eh) 667 { 668 struct ipoib_path *path; 669 670 path = __path_find(priv, eh->hwaddr + 4); 671 if (!path || !path->valid) { 672 int new_path = 0; 673 674 if (!path) { 675 path = path_rec_create(priv, eh->hwaddr); 676 new_path = 1; 677 } 678 if (path) { 679 if (_IF_QLEN(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) 680 _IF_ENQUEUE(&path->queue, mb); 681 else { 682 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); 683 m_freem(mb); 684 } 685 686 if (!path->query && path_rec_start(priv, path)) { 687 if (new_path) 688 ipoib_path_free(priv, path); 689 return; 690 } else 691 __path_add(priv, path); 692 } else { 693 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); 694 m_freem(mb); 695 } 696 697 return; 698 } 699 700 if (ipoib_cm_get(path) && ipoib_cm_up(path)) { 701 ipoib_cm_send(priv, mb, ipoib_cm_get(path)); 702 } else if (path->ah) { 703 ipoib_send(priv, mb, path->ah, IPOIB_QPN(eh->hwaddr)); 704 } else if ((path->query || !path_rec_start(priv, path)) && 705 path->queue.ifq_len < IPOIB_MAX_PATH_REC_QUEUE) { 706 _IF_ENQUEUE(&path->queue, mb); 707 } else { 708 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); 709 m_freem(mb); 710 } 711 } 712 713 static int 714 ipoib_send_one(struct ipoib_dev_priv *priv, struct mbuf *mb) 715 { 716 struct ipoib_header *eh; 717 718 eh = mtod(mb, struct ipoib_header *); 719 if (IPOIB_IS_MULTICAST(eh->hwaddr)) { 720 /* Add in the P_Key for multicast*/ 721 eh->hwaddr[8] = (priv->pkey >> 8) & 0xff; 722 eh->hwaddr[9] = priv->pkey & 0xff; 723 724 ipoib_mcast_send(priv, eh->hwaddr + 4, mb); 725 } else 726 ipoib_unicast_send(mb, priv, eh); 727 728 return 0; 729 } 730 731 void 732 ipoib_start_locked(struct ifnet *dev, struct ipoib_dev_priv *priv) 733 { 734 struct mbuf *mb; 735 736 assert_spin_locked(&priv->lock); 737 738 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd) && 739 (dev->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 740 IFQ_DRV_DEQUEUE(&dev->if_snd, mb); 741 if (mb == NULL) 742 break; 743 INFINIBAND_BPF_MTAP(dev, mb); 744 ipoib_send_one(priv, mb); 745 } 746 } 747 748 static void 749 _ipoib_start(struct ifnet *dev, struct ipoib_dev_priv *priv) 750 { 751 752 if ((dev->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 753 IFF_DRV_RUNNING) 754 return; 755 756 spin_lock(&priv->lock); 757 ipoib_start_locked(dev, priv); 758 spin_unlock(&priv->lock); 759 } 760 761 static void 762 ipoib_start(struct ifnet *dev) 763 { 764 _ipoib_start(dev, dev->if_softc); 765 } 766 767 static void 768 ipoib_vlan_start(struct ifnet *dev) 769 { 770 struct ipoib_dev_priv *priv; 771 struct mbuf *mb; 772 773 priv = VLAN_COOKIE(dev); 774 if (priv != NULL) 775 return _ipoib_start(dev, priv); 776 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd)) { 777 IFQ_DRV_DEQUEUE(&dev->if_snd, mb); 778 if (mb == NULL) 779 break; 780 m_freem(mb); 781 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 782 } 783 } 784 785 int 786 ipoib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port) 787 { 788 789 /* Allocate RX/TX "rings" to hold queued mbs */ 790 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 791 GFP_KERNEL); 792 if (!priv->rx_ring) { 793 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 794 ca->name, ipoib_recvq_size); 795 goto out; 796 } 797 798 priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, GFP_KERNEL); 799 if (!priv->tx_ring) { 800 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 801 ca->name, ipoib_sendq_size); 802 goto out_rx_ring_cleanup; 803 } 804 memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); 805 806 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 807 808 if (ipoib_ib_dev_init(priv, ca, port)) 809 goto out_tx_ring_cleanup; 810 811 return 0; 812 813 out_tx_ring_cleanup: 814 kfree(priv->tx_ring); 815 816 out_rx_ring_cleanup: 817 kfree(priv->rx_ring); 818 819 out: 820 return -ENOMEM; 821 } 822 823 static void 824 ipoib_detach(struct ipoib_dev_priv *priv) 825 { 826 struct ifnet *dev; 827 828 dev = priv->dev; 829 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 830 priv->gone = 1; 831 infiniband_ifdetach(dev); 832 if_free(dev); 833 free_unr(ipoib_unrhdr, priv->unit); 834 } else 835 VLAN_SETCOOKIE(priv->dev, NULL); 836 837 free(priv, M_TEMP); 838 } 839 840 void 841 ipoib_dev_cleanup(struct ipoib_dev_priv *priv) 842 { 843 struct ipoib_dev_priv *cpriv, *tcpriv; 844 845 /* Delete any child interfaces first */ 846 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 847 ipoib_dev_cleanup(cpriv); 848 ipoib_detach(cpriv); 849 } 850 851 ipoib_ib_dev_cleanup(priv); 852 853 kfree(priv->rx_ring); 854 kfree(priv->tx_ring); 855 856 priv->rx_ring = NULL; 857 priv->tx_ring = NULL; 858 } 859 860 static struct ipoib_dev_priv * 861 ipoib_priv_alloc(void) 862 { 863 struct ipoib_dev_priv *priv; 864 865 priv = malloc(sizeof(struct ipoib_dev_priv), M_TEMP, M_ZERO|M_WAITOK); 866 spin_lock_init(&priv->lock); 867 spin_lock_init(&priv->drain_lock); 868 mutex_init(&priv->vlan_mutex); 869 INIT_LIST_HEAD(&priv->path_list); 870 INIT_LIST_HEAD(&priv->child_intfs); 871 INIT_LIST_HEAD(&priv->dead_ahs); 872 INIT_LIST_HEAD(&priv->multicast_list); 873 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); 874 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 875 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 876 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 877 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 878 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 879 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 880 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 881 memcpy(priv->broadcastaddr, ipv4_bcast_addr, INFINIBAND_ALEN); 882 883 return (priv); 884 } 885 886 struct ipoib_dev_priv * 887 ipoib_intf_alloc(const char *name) 888 { 889 struct ipoib_dev_priv *priv; 890 struct ifnet *dev; 891 892 priv = ipoib_priv_alloc(); 893 dev = priv->dev = if_alloc(IFT_INFINIBAND); 894 if (!dev) { 895 free(priv, M_TEMP); 896 return NULL; 897 } 898 dev->if_softc = priv; 899 priv->unit = alloc_unr(ipoib_unrhdr); 900 if (priv->unit == -1) { 901 if_free(dev); 902 free(priv, M_TEMP); 903 return NULL; 904 } 905 if_initname(dev, name, priv->unit); 906 dev->if_flags = IFF_BROADCAST | IFF_MULTICAST; 907 908 infiniband_ifattach(dev, NULL, priv->broadcastaddr); 909 910 dev->if_init = ipoib_init; 911 dev->if_ioctl = ipoib_ioctl; 912 dev->if_start = ipoib_start; 913 914 dev->if_snd.ifq_maxlen = ipoib_sendq_size * 2; 915 916 priv->dev = dev; 917 if_link_state_change(dev, LINK_STATE_DOWN); 918 919 return dev->if_softc; 920 } 921 922 int 923 ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 924 { 925 struct ib_device_attr *device_attr = &hca->attrs; 926 927 priv->hca_caps = device_attr->device_cap_flags; 928 929 priv->dev->if_hwassist = 0; 930 priv->dev->if_capabilities = 0; 931 932 #ifndef CONFIG_INFINIBAND_IPOIB_CM 933 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 934 set_bit(IPOIB_FLAG_CSUM, &priv->flags); 935 priv->dev->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; 936 priv->dev->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 937 } 938 939 #if 0 940 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) { 941 priv->dev->if_capabilities |= IFCAP_TSO4; 942 priv->dev->if_hwassist |= CSUM_TSO; 943 } 944 #endif 945 #endif 946 priv->dev->if_capabilities |= 947 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_LINKSTATE; 948 priv->dev->if_capenable = priv->dev->if_capabilities; 949 950 return 0; 951 } 952 953 954 static struct ifnet * 955 ipoib_add_port(const char *format, struct ib_device *hca, u8 port) 956 { 957 struct ipoib_dev_priv *priv; 958 struct ib_port_attr attr; 959 int result = -ENOMEM; 960 961 priv = ipoib_intf_alloc(format); 962 if (!priv) 963 goto alloc_mem_failed; 964 965 if (!ib_query_port(hca, port, &attr)) 966 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 967 else { 968 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 969 hca->name, port); 970 goto device_init_failed; 971 } 972 973 /* MTU will be reset when mcast join happens */ 974 priv->dev->if_mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 975 priv->mcast_mtu = priv->admin_mtu = priv->dev->if_mtu; 976 977 result = ib_query_pkey(hca, port, 0, &priv->pkey); 978 if (result) { 979 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 980 hca->name, port, result); 981 goto device_init_failed; 982 } 983 984 if (ipoib_set_dev_features(priv, hca)) 985 goto device_init_failed; 986 987 /* 988 * Set the full membership bit, so that we join the right 989 * broadcast group, etc. 990 */ 991 priv->pkey |= 0x8000; 992 993 priv->broadcastaddr[8] = priv->pkey >> 8; 994 priv->broadcastaddr[9] = priv->pkey & 0xff; 995 996 result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL); 997 if (result) { 998 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 999 hca->name, port, result); 1000 goto device_init_failed; 1001 } 1002 memcpy(IF_LLADDR(priv->dev) + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1003 1004 result = ipoib_dev_init(priv, hca, port); 1005 if (result < 0) { 1006 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1007 hca->name, port, result); 1008 goto device_init_failed; 1009 } 1010 if (ipoib_cm_admin_enabled(priv)) 1011 priv->dev->if_mtu = IPOIB_CM_MTU(ipoib_cm_max_mtu(priv)); 1012 1013 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1014 priv->ca, ipoib_event); 1015 result = ib_register_event_handler(&priv->event_handler); 1016 if (result < 0) { 1017 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1018 "port %d (ret = %d)\n", 1019 hca->name, port, result); 1020 goto event_failed; 1021 } 1022 if_printf(priv->dev, "Attached to %s port %d\n", hca->name, port); 1023 1024 return priv->dev; 1025 1026 event_failed: 1027 ipoib_dev_cleanup(priv); 1028 1029 device_init_failed: 1030 ipoib_detach(priv); 1031 1032 alloc_mem_failed: 1033 return ERR_PTR(result); 1034 } 1035 1036 static void 1037 ipoib_add_one(struct ib_device *device) 1038 { 1039 struct list_head *dev_list; 1040 struct ifnet *dev; 1041 struct ipoib_dev_priv *priv; 1042 int s, e, p; 1043 1044 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1045 return; 1046 1047 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1048 if (!dev_list) 1049 return; 1050 1051 INIT_LIST_HEAD(dev_list); 1052 1053 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1054 s = 0; 1055 e = 0; 1056 } else { 1057 s = 1; 1058 e = device->phys_port_cnt; 1059 } 1060 1061 for (p = s; p <= e; ++p) { 1062 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND) 1063 continue; 1064 dev = ipoib_add_port("ib", device, p); 1065 if (!IS_ERR(dev)) { 1066 priv = dev->if_softc; 1067 list_add_tail(&priv->list, dev_list); 1068 } 1069 } 1070 1071 ib_set_client_data(device, &ipoib_client, dev_list); 1072 } 1073 1074 static void 1075 ipoib_remove_one(struct ib_device *device, void *client_data) 1076 { 1077 struct ipoib_dev_priv *priv, *tmp; 1078 struct list_head *dev_list = client_data; 1079 1080 if (!dev_list) 1081 return; 1082 1083 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1084 return; 1085 1086 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1087 if (rdma_port_get_link_layer(device, priv->port) != IB_LINK_LAYER_INFINIBAND) 1088 continue; 1089 1090 ipoib_stop(priv); 1091 1092 ib_unregister_event_handler(&priv->event_handler); 1093 1094 /* dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); */ 1095 1096 flush_workqueue(ipoib_workqueue); 1097 1098 ipoib_dev_cleanup(priv); 1099 ipoib_detach(priv); 1100 } 1101 1102 kfree(dev_list); 1103 } 1104 1105 static int 1106 ipoib_match_dev_addr(const struct sockaddr *addr, struct net_device *dev) 1107 { 1108 struct epoch_tracker et; 1109 struct ifaddr *ifa; 1110 int retval = 0; 1111 1112 NET_EPOCH_ENTER(et); 1113 CK_STAILQ_FOREACH(ifa, &dev->if_addrhead, ifa_link) { 1114 if (ifa->ifa_addr == NULL || 1115 ifa->ifa_addr->sa_family != addr->sa_family || 1116 ifa->ifa_addr->sa_len != addr->sa_len) { 1117 continue; 1118 } 1119 if (memcmp(ifa->ifa_addr, addr, addr->sa_len) == 0) { 1120 retval = 1; 1121 break; 1122 } 1123 } 1124 NET_EPOCH_EXIT(et); 1125 1126 return (retval); 1127 } 1128 1129 /* 1130 * ipoib_match_gid_pkey_addr - returns the number of IPoIB netdevs on 1131 * top a given ipoib device matching a pkey_index and address, if one 1132 * exists. 1133 * 1134 * @found_net_dev: contains a matching net_device if the return value 1135 * >= 1, with a reference held. 1136 */ 1137 static int 1138 ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv, 1139 const union ib_gid *gid, u16 pkey_index, const struct sockaddr *addr, 1140 struct net_device **found_net_dev) 1141 { 1142 struct ipoib_dev_priv *child_priv; 1143 int matches = 0; 1144 1145 if (priv->pkey_index == pkey_index && 1146 (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) { 1147 if (addr == NULL || ipoib_match_dev_addr(addr, priv->dev) != 0) { 1148 if (*found_net_dev == NULL) { 1149 struct net_device *net_dev; 1150 1151 if (priv->parent != NULL) 1152 net_dev = priv->parent; 1153 else 1154 net_dev = priv->dev; 1155 *found_net_dev = net_dev; 1156 dev_hold(net_dev); 1157 } 1158 matches++; 1159 } 1160 } 1161 1162 /* Check child interfaces */ 1163 mutex_lock(&priv->vlan_mutex); 1164 list_for_each_entry(child_priv, &priv->child_intfs, list) { 1165 matches += ipoib_match_gid_pkey_addr(child_priv, gid, 1166 pkey_index, addr, found_net_dev); 1167 if (matches > 1) 1168 break; 1169 } 1170 mutex_unlock(&priv->vlan_mutex); 1171 1172 return matches; 1173 } 1174 1175 /* 1176 * __ipoib_get_net_dev_by_params - returns the number of matching 1177 * net_devs found (between 0 and 2). Also return the matching 1178 * net_device in the @net_dev parameter, holding a reference to the 1179 * net_device, if the number of matches >= 1 1180 */ 1181 static int 1182 __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port, 1183 u16 pkey_index, const union ib_gid *gid, 1184 const struct sockaddr *addr, struct net_device **net_dev) 1185 { 1186 struct ipoib_dev_priv *priv; 1187 int matches = 0; 1188 1189 *net_dev = NULL; 1190 1191 list_for_each_entry(priv, dev_list, list) { 1192 if (priv->port != port) 1193 continue; 1194 1195 matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index, 1196 addr, net_dev); 1197 1198 if (matches > 1) 1199 break; 1200 } 1201 1202 return matches; 1203 } 1204 1205 static struct net_device * 1206 ipoib_get_net_dev_by_params(struct ib_device *dev, u8 port, u16 pkey, 1207 const union ib_gid *gid, const struct sockaddr *addr, void *client_data) 1208 { 1209 struct net_device *net_dev; 1210 struct list_head *dev_list = client_data; 1211 u16 pkey_index; 1212 int matches; 1213 int ret; 1214 1215 if (!rdma_protocol_ib(dev, port)) 1216 return NULL; 1217 1218 ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index); 1219 if (ret) 1220 return NULL; 1221 1222 if (!dev_list) 1223 return NULL; 1224 1225 /* See if we can find a unique device matching the L2 parameters */ 1226 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 1227 gid, NULL, &net_dev); 1228 1229 switch (matches) { 1230 case 0: 1231 return NULL; 1232 case 1: 1233 return net_dev; 1234 } 1235 1236 dev_put(net_dev); 1237 1238 /* Couldn't find a unique device with L2 parameters only. Use L3 1239 * address to uniquely match the net device */ 1240 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 1241 gid, addr, &net_dev); 1242 switch (matches) { 1243 case 0: 1244 return NULL; 1245 default: 1246 dev_warn_ratelimited(&dev->dev, 1247 "duplicate IP address detected\n"); 1248 /* Fall through */ 1249 case 1: 1250 return net_dev; 1251 } 1252 } 1253 1254 static void 1255 ipoib_config_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) 1256 { 1257 struct ipoib_dev_priv *parent; 1258 struct ipoib_dev_priv *priv; 1259 struct epoch_tracker et; 1260 struct ifnet *dev; 1261 uint16_t pkey; 1262 int error; 1263 1264 if (ifp->if_type != IFT_INFINIBAND) 1265 return; 1266 NET_EPOCH_ENTER(et); 1267 dev = VLAN_DEVAT(ifp, vtag); 1268 NET_EPOCH_EXIT(et); 1269 if (dev == NULL) 1270 return; 1271 priv = NULL; 1272 error = 0; 1273 parent = ifp->if_softc; 1274 /* We only support 15 bits of pkey. */ 1275 if (vtag & 0x8000) 1276 return; 1277 pkey = vtag | 0x8000; /* Set full membership bit. */ 1278 if (pkey == parent->pkey) 1279 return; 1280 /* Check for dups */ 1281 mutex_lock(&parent->vlan_mutex); 1282 list_for_each_entry(priv, &parent->child_intfs, list) { 1283 if (priv->pkey == pkey) { 1284 priv = NULL; 1285 error = EBUSY; 1286 goto out; 1287 } 1288 } 1289 priv = ipoib_priv_alloc(); 1290 priv->dev = dev; 1291 priv->max_ib_mtu = parent->max_ib_mtu; 1292 priv->mcast_mtu = priv->admin_mtu = parent->dev->if_mtu; 1293 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 1294 error = ipoib_set_dev_features(priv, parent->ca); 1295 if (error) 1296 goto out; 1297 priv->pkey = pkey; 1298 priv->broadcastaddr[8] = pkey >> 8; 1299 priv->broadcastaddr[9] = pkey & 0xff; 1300 dev->if_broadcastaddr = priv->broadcastaddr; 1301 error = ipoib_dev_init(priv, parent->ca, parent->port); 1302 if (error) 1303 goto out; 1304 priv->parent = parent->dev; 1305 list_add_tail(&priv->list, &parent->child_intfs); 1306 VLAN_SETCOOKIE(dev, priv); 1307 dev->if_start = ipoib_vlan_start; 1308 dev->if_drv_flags &= ~IFF_DRV_RUNNING; 1309 dev->if_hdrlen = IPOIB_HEADER_LEN; 1310 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1311 ipoib_open(priv); 1312 mutex_unlock(&parent->vlan_mutex); 1313 return; 1314 out: 1315 mutex_unlock(&parent->vlan_mutex); 1316 if (priv) 1317 free(priv, M_TEMP); 1318 if (error) 1319 ipoib_warn(parent, 1320 "failed to initialize subinterface: device %s, port %d vtag 0x%X", 1321 parent->ca->name, parent->port, vtag); 1322 return; 1323 } 1324 1325 static void 1326 ipoib_unconfig_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) 1327 { 1328 struct ipoib_dev_priv *parent; 1329 struct ipoib_dev_priv *priv; 1330 struct epoch_tracker et; 1331 struct ifnet *dev; 1332 uint16_t pkey; 1333 1334 if (ifp->if_type != IFT_INFINIBAND) 1335 return; 1336 1337 NET_EPOCH_ENTER(et); 1338 dev = VLAN_DEVAT(ifp, vtag); 1339 NET_EPOCH_EXIT(et); 1340 if (dev) 1341 VLAN_SETCOOKIE(dev, NULL); 1342 pkey = vtag | 0x8000; 1343 parent = ifp->if_softc; 1344 mutex_lock(&parent->vlan_mutex); 1345 list_for_each_entry(priv, &parent->child_intfs, list) { 1346 if (priv->pkey == pkey) { 1347 ipoib_dev_cleanup(priv); 1348 list_del(&priv->list); 1349 break; 1350 } 1351 } 1352 mutex_unlock(&parent->vlan_mutex); 1353 } 1354 1355 eventhandler_tag ipoib_vlan_attach; 1356 eventhandler_tag ipoib_vlan_detach; 1357 1358 static int __init 1359 ipoib_init_module(void) 1360 { 1361 int ret; 1362 1363 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1364 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1365 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1366 1367 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1368 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1369 ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE, 1370 IPOIB_MIN_QUEUE_SIZE)); 1371 #ifdef CONFIG_INFINIBAND_IPOIB_CM 1372 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 1373 #endif 1374 1375 ipoib_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 1376 ipoib_config_vlan, NULL, EVENTHANDLER_PRI_FIRST); 1377 ipoib_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 1378 ipoib_unconfig_vlan, NULL, EVENTHANDLER_PRI_FIRST); 1379 1380 /* 1381 * We create our own workqueue mainly because we want to be 1382 * able to flush it when devices are being removed. We can't 1383 * use schedule_work()/flush_scheduled_work() because both 1384 * unregister_netdev() and linkwatch_event take the rtnl lock, 1385 * so flush_scheduled_work() can deadlock during device 1386 * removal. 1387 */ 1388 ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1389 if (!ipoib_workqueue) { 1390 ret = -ENOMEM; 1391 goto err_fs; 1392 } 1393 1394 ib_sa_register_client(&ipoib_sa_client); 1395 1396 ret = ib_register_client(&ipoib_client); 1397 if (ret) 1398 goto err_sa; 1399 1400 return 0; 1401 1402 err_sa: 1403 ib_sa_unregister_client(&ipoib_sa_client); 1404 destroy_workqueue(ipoib_workqueue); 1405 1406 err_fs: 1407 return ret; 1408 } 1409 1410 static void __exit 1411 ipoib_cleanup_module(void) 1412 { 1413 1414 EVENTHANDLER_DEREGISTER(vlan_config, ipoib_vlan_attach); 1415 EVENTHANDLER_DEREGISTER(vlan_unconfig, ipoib_vlan_detach); 1416 ib_unregister_client(&ipoib_client); 1417 ib_sa_unregister_client(&ipoib_sa_client); 1418 destroy_workqueue(ipoib_workqueue); 1419 } 1420 module_init_order(ipoib_init_module, SI_ORDER_FIFTH); 1421 module_exit_order(ipoib_cleanup_module, SI_ORDER_FIFTH); 1422 1423 static int 1424 ipoib_evhand(module_t mod, int event, void *arg) 1425 { 1426 return (0); 1427 } 1428 1429 static moduledata_t ipoib_mod = { 1430 .name = "ipoib", 1431 .evhand = ipoib_evhand, 1432 }; 1433 1434 DECLARE_MODULE(ipoib, ipoib_mod, SI_SUB_LAST, SI_ORDER_ANY); 1435 MODULE_DEPEND(ipoib, ibcore, 1, 1, 1); 1436 MODULE_DEPEND(ipoib, if_infiniband, 1, 1, 1); 1437 MODULE_DEPEND(ipoib, linuxkpi, 1, 1, 1); 1438