1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Topspin Communications. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37 #include "ipoib.h" 38 39 static int ipoib_resolvemulti(struct ifnet *, struct sockaddr **, 40 struct sockaddr *); 41 42 43 #include <linux/module.h> 44 45 #include <linux/slab.h> 46 #include <linux/kernel.h> 47 #include <linux/vmalloc.h> 48 49 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 50 #include <linux/if_vlan.h> 51 #include <net/ip.h> 52 #include <net/ipv6.h> 53 54 MODULE_AUTHOR("Roland Dreier"); 55 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 56 MODULE_LICENSE("Dual BSD/GPL"); 57 58 int ipoib_sendq_size = IPOIB_TX_RING_SIZE; 59 int ipoib_recvq_size = IPOIB_RX_RING_SIZE; 60 61 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 62 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 63 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 64 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 65 66 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 67 int ipoib_debug_level = 1; 68 69 module_param_named(debug_level, ipoib_debug_level, int, 0644); 70 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 71 #endif 72 73 struct ipoib_path_iter { 74 struct ipoib_dev_priv *priv; 75 struct ipoib_path path; 76 }; 77 78 static const u8 ipv4_bcast_addr[] = { 79 0x00, 0xff, 0xff, 0xff, 80 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 81 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 82 }; 83 84 struct workqueue_struct *ipoib_workqueue; 85 86 struct ib_sa_client ipoib_sa_client; 87 88 static void ipoib_add_one(struct ib_device *device); 89 static void ipoib_remove_one(struct ib_device *device, void *client_data); 90 static void ipoib_start(struct ifnet *dev); 91 static int ipoib_output(struct ifnet *ifp, struct mbuf *m, 92 const struct sockaddr *dst, struct route *ro); 93 static int ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data); 94 static void ipoib_input(struct ifnet *ifp, struct mbuf *m); 95 96 #define IPOIB_MTAP(_ifp, _m) \ 97 do { \ 98 if (bpf_peers_present((_ifp)->if_bpf)) { \ 99 M_ASSERTVALID(_m); \ 100 ipoib_mtap_mb((_ifp), (_m)); \ 101 } \ 102 } while (0) 103 104 static struct unrhdr *ipoib_unrhdr; 105 106 static void 107 ipoib_unrhdr_init(void *arg) 108 { 109 110 ipoib_unrhdr = new_unrhdr(0, 65535, NULL); 111 } 112 SYSINIT(ipoib_unrhdr_init, SI_SUB_KLD - 1, SI_ORDER_ANY, ipoib_unrhdr_init, NULL); 113 114 static void 115 ipoib_unrhdr_uninit(void *arg) 116 { 117 118 if (ipoib_unrhdr != NULL) { 119 struct unrhdr *hdr; 120 121 hdr = ipoib_unrhdr; 122 ipoib_unrhdr = NULL; 123 124 delete_unrhdr(hdr); 125 } 126 } 127 SYSUNINIT(ipoib_unrhdr_uninit, SI_SUB_KLD - 1, SI_ORDER_ANY, ipoib_unrhdr_uninit, NULL); 128 129 /* 130 * This is for clients that have an ipoib_header in the mbuf. 131 */ 132 static void 133 ipoib_mtap_mb(struct ifnet *ifp, struct mbuf *mb) 134 { 135 struct ipoib_header *ih; 136 struct ether_header eh; 137 138 ih = mtod(mb, struct ipoib_header *); 139 eh.ether_type = ih->proto; 140 bcopy(ih->hwaddr, &eh.ether_dhost, ETHER_ADDR_LEN); 141 bzero(&eh.ether_shost, ETHER_ADDR_LEN); 142 mb->m_data += sizeof(struct ipoib_header); 143 mb->m_len -= sizeof(struct ipoib_header); 144 bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb); 145 mb->m_data -= sizeof(struct ipoib_header); 146 mb->m_len += sizeof(struct ipoib_header); 147 } 148 149 void 150 ipoib_mtap_proto(struct ifnet *ifp, struct mbuf *mb, uint16_t proto) 151 { 152 struct ether_header eh; 153 154 eh.ether_type = proto; 155 bzero(&eh.ether_shost, ETHER_ADDR_LEN); 156 bzero(&eh.ether_dhost, ETHER_ADDR_LEN); 157 bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb); 158 } 159 160 static struct ib_client ipoib_client = { 161 .name = "ipoib", 162 .add = ipoib_add_one, 163 .remove = ipoib_remove_one 164 }; 165 166 int 167 ipoib_open(struct ipoib_dev_priv *priv) 168 { 169 struct ifnet *dev = priv->dev; 170 171 ipoib_dbg(priv, "bringing up interface\n"); 172 173 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 174 175 if (ipoib_pkey_dev_delay_open(priv)) 176 return 0; 177 178 if (ipoib_ib_dev_open(priv)) 179 goto err_disable; 180 181 if (ipoib_ib_dev_up(priv)) 182 goto err_stop; 183 184 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 185 struct ipoib_dev_priv *cpriv; 186 187 /* Bring up any child interfaces too */ 188 mutex_lock(&priv->vlan_mutex); 189 list_for_each_entry(cpriv, &priv->child_intfs, list) 190 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 191 ipoib_open(cpriv); 192 mutex_unlock(&priv->vlan_mutex); 193 } 194 dev->if_drv_flags |= IFF_DRV_RUNNING; 195 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 196 197 return 0; 198 199 err_stop: 200 ipoib_ib_dev_stop(priv, 1); 201 202 err_disable: 203 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 204 205 return -EINVAL; 206 } 207 208 static void 209 ipoib_init(void *arg) 210 { 211 struct ifnet *dev; 212 struct ipoib_dev_priv *priv; 213 214 priv = arg; 215 dev = priv->dev; 216 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 217 ipoib_open(priv); 218 queue_work(ipoib_workqueue, &priv->flush_light); 219 } 220 221 222 static int 223 ipoib_stop(struct ipoib_dev_priv *priv) 224 { 225 struct ifnet *dev = priv->dev; 226 227 ipoib_dbg(priv, "stopping interface\n"); 228 229 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 230 231 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 232 233 ipoib_ib_dev_down(priv, 0); 234 ipoib_ib_dev_stop(priv, 0); 235 236 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 237 struct ipoib_dev_priv *cpriv; 238 239 /* Bring down any child interfaces too */ 240 mutex_lock(&priv->vlan_mutex); 241 list_for_each_entry(cpriv, &priv->child_intfs, list) 242 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) != 0) 243 ipoib_stop(cpriv); 244 mutex_unlock(&priv->vlan_mutex); 245 } 246 247 return 0; 248 } 249 250 int 251 ipoib_change_mtu(struct ipoib_dev_priv *priv, int new_mtu) 252 { 253 struct ifnet *dev = priv->dev; 254 255 /* dev->if_mtu > 2K ==> connected mode */ 256 if (ipoib_cm_admin_enabled(priv)) { 257 if (new_mtu > IPOIB_CM_MTU(ipoib_cm_max_mtu(priv))) 258 return -EINVAL; 259 260 if (new_mtu > priv->mcast_mtu) 261 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 262 priv->mcast_mtu); 263 264 dev->if_mtu = new_mtu; 265 return 0; 266 } 267 268 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 269 return -EINVAL; 270 271 priv->admin_mtu = new_mtu; 272 273 dev->if_mtu = min(priv->mcast_mtu, priv->admin_mtu); 274 275 queue_work(ipoib_workqueue, &priv->flush_light); 276 277 return 0; 278 } 279 280 static int 281 ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 282 { 283 struct ipoib_dev_priv *priv = ifp->if_softc; 284 struct ifaddr *ifa = (struct ifaddr *) data; 285 struct ifreq *ifr = (struct ifreq *) data; 286 int error = 0; 287 288 /* check if detaching */ 289 if (priv == NULL || priv->gone != 0) 290 return (ENXIO); 291 292 switch (command) { 293 case SIOCSIFFLAGS: 294 if (ifp->if_flags & IFF_UP) { 295 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 296 error = -ipoib_open(priv); 297 } else 298 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 299 ipoib_stop(priv); 300 break; 301 case SIOCADDMULTI: 302 case SIOCDELMULTI: 303 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 304 queue_work(ipoib_workqueue, &priv->restart_task); 305 break; 306 case SIOCSIFADDR: 307 ifp->if_flags |= IFF_UP; 308 309 switch (ifa->ifa_addr->sa_family) { 310 #ifdef INET 311 case AF_INET: 312 ifp->if_init(ifp->if_softc); /* before arpwhohas */ 313 arp_ifinit(ifp, ifa); 314 break; 315 #endif 316 default: 317 ifp->if_init(ifp->if_softc); 318 break; 319 } 320 break; 321 322 case SIOCGIFADDR: 323 { 324 struct sockaddr *sa; 325 326 sa = (struct sockaddr *) & ifr->ifr_data; 327 bcopy(IF_LLADDR(ifp), 328 (caddr_t) sa->sa_data, INFINIBAND_ALEN); 329 } 330 break; 331 332 case SIOCSIFMTU: 333 /* 334 * Set the interface MTU. 335 */ 336 error = -ipoib_change_mtu(priv, ifr->ifr_mtu); 337 break; 338 default: 339 error = EINVAL; 340 break; 341 } 342 return (error); 343 } 344 345 346 static struct ipoib_path * 347 __path_find(struct ipoib_dev_priv *priv, void *gid) 348 { 349 struct rb_node *n = priv->path_tree.rb_node; 350 struct ipoib_path *path; 351 int ret; 352 353 while (n) { 354 path = rb_entry(n, struct ipoib_path, rb_node); 355 356 ret = memcmp(gid, path->pathrec.dgid.raw, 357 sizeof (union ib_gid)); 358 359 if (ret < 0) 360 n = n->rb_left; 361 else if (ret > 0) 362 n = n->rb_right; 363 else 364 return path; 365 } 366 367 return NULL; 368 } 369 370 static int 371 __path_add(struct ipoib_dev_priv *priv, struct ipoib_path *path) 372 { 373 struct rb_node **n = &priv->path_tree.rb_node; 374 struct rb_node *pn = NULL; 375 struct ipoib_path *tpath; 376 int ret; 377 378 while (*n) { 379 pn = *n; 380 tpath = rb_entry(pn, struct ipoib_path, rb_node); 381 382 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 383 sizeof (union ib_gid)); 384 if (ret < 0) 385 n = &pn->rb_left; 386 else if (ret > 0) 387 n = &pn->rb_right; 388 else 389 return -EEXIST; 390 } 391 392 rb_link_node(&path->rb_node, pn, n); 393 rb_insert_color(&path->rb_node, &priv->path_tree); 394 395 list_add_tail(&path->list, &priv->path_list); 396 397 return 0; 398 } 399 400 void 401 ipoib_path_free(struct ipoib_dev_priv *priv, struct ipoib_path *path) 402 { 403 404 _IF_DRAIN(&path->queue); 405 406 if (path->ah) 407 ipoib_put_ah(path->ah); 408 if (ipoib_cm_get(path)) 409 ipoib_cm_destroy_tx(ipoib_cm_get(path)); 410 411 kfree(path); 412 } 413 414 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 415 416 struct ipoib_path_iter * 417 ipoib_path_iter_init(struct ipoib_dev_priv *priv) 418 { 419 struct ipoib_path_iter *iter; 420 421 iter = kmalloc(sizeof *iter, GFP_KERNEL); 422 if (!iter) 423 return NULL; 424 425 iter->priv = priv; 426 memset(iter->path.pathrec.dgid.raw, 0, 16); 427 428 if (ipoib_path_iter_next(iter)) { 429 kfree(iter); 430 return NULL; 431 } 432 433 return iter; 434 } 435 436 int 437 ipoib_path_iter_next(struct ipoib_path_iter *iter) 438 { 439 struct ipoib_dev_priv *priv = iter->priv; 440 struct rb_node *n; 441 struct ipoib_path *path; 442 int ret = 1; 443 444 spin_lock_irq(&priv->lock); 445 446 n = rb_first(&priv->path_tree); 447 448 while (n) { 449 path = rb_entry(n, struct ipoib_path, rb_node); 450 451 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 452 sizeof (union ib_gid)) < 0) { 453 iter->path = *path; 454 ret = 0; 455 break; 456 } 457 458 n = rb_next(n); 459 } 460 461 spin_unlock_irq(&priv->lock); 462 463 return ret; 464 } 465 466 void 467 ipoib_path_iter_read(struct ipoib_path_iter *iter, struct ipoib_path *path) 468 { 469 *path = iter->path; 470 } 471 472 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 473 474 void 475 ipoib_mark_paths_invalid(struct ipoib_dev_priv *priv) 476 { 477 struct ipoib_path *path, *tp; 478 479 spin_lock_irq(&priv->lock); 480 481 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 482 ipoib_dbg(priv, "mark path LID 0x%04x GID %16D invalid\n", 483 be16_to_cpu(path->pathrec.dlid), 484 path->pathrec.dgid.raw, ":"); 485 path->valid = 0; 486 } 487 488 spin_unlock_irq(&priv->lock); 489 } 490 491 void 492 ipoib_flush_paths(struct ipoib_dev_priv *priv) 493 { 494 struct ipoib_path *path, *tp; 495 LIST_HEAD(remove_list); 496 unsigned long flags; 497 498 spin_lock_irqsave(&priv->lock, flags); 499 500 list_splice_init(&priv->path_list, &remove_list); 501 502 list_for_each_entry(path, &remove_list, list) 503 rb_erase(&path->rb_node, &priv->path_tree); 504 505 list_for_each_entry_safe(path, tp, &remove_list, list) { 506 if (path->query) 507 ib_sa_cancel_query(path->query_id, path->query); 508 spin_unlock_irqrestore(&priv->lock, flags); 509 wait_for_completion(&path->done); 510 ipoib_path_free(priv, path); 511 spin_lock_irqsave(&priv->lock, flags); 512 } 513 514 spin_unlock_irqrestore(&priv->lock, flags); 515 } 516 517 static void 518 path_rec_completion(int status, struct ib_sa_path_rec *pathrec, void *path_ptr) 519 { 520 struct ipoib_path *path = path_ptr; 521 struct ipoib_dev_priv *priv = path->priv; 522 struct ifnet *dev = priv->dev; 523 struct ipoib_ah *ah = NULL; 524 struct ipoib_ah *old_ah = NULL; 525 struct ifqueue mbqueue; 526 struct mbuf *mb; 527 unsigned long flags; 528 529 if (!status) 530 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %16D\n", 531 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw, ":"); 532 else 533 ipoib_dbg(priv, "PathRec status %d for GID %16D\n", 534 status, path->pathrec.dgid.raw, ":"); 535 536 bzero(&mbqueue, sizeof(mbqueue)); 537 538 if (!status) { 539 struct ib_ah_attr av; 540 541 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) 542 ah = ipoib_create_ah(priv, priv->pd, &av); 543 } 544 545 spin_lock_irqsave(&priv->lock, flags); 546 547 if (ah) { 548 path->pathrec = *pathrec; 549 550 old_ah = path->ah; 551 path->ah = ah; 552 553 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 554 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 555 556 for (;;) { 557 _IF_DEQUEUE(&path->queue, mb); 558 if (mb == NULL) 559 break; 560 _IF_ENQUEUE(&mbqueue, mb); 561 } 562 563 #ifdef CONFIG_INFINIBAND_IPOIB_CM 564 if (ipoib_cm_enabled(priv, path->hwaddr) && !ipoib_cm_get(path)) 565 ipoib_cm_set(path, ipoib_cm_create_tx(priv, path)); 566 #endif 567 568 path->valid = 1; 569 } 570 571 path->query = NULL; 572 complete(&path->done); 573 574 spin_unlock_irqrestore(&priv->lock, flags); 575 576 if (old_ah) 577 ipoib_put_ah(old_ah); 578 579 for (;;) { 580 _IF_DEQUEUE(&mbqueue, mb); 581 if (mb == NULL) 582 break; 583 mb->m_pkthdr.rcvif = dev; 584 if (dev->if_transmit(dev, mb)) 585 ipoib_warn(priv, "dev_queue_xmit failed " 586 "to requeue packet\n"); 587 } 588 } 589 590 static struct ipoib_path * 591 path_rec_create(struct ipoib_dev_priv *priv, uint8_t *hwaddr) 592 { 593 struct ipoib_path *path; 594 595 if (!priv->broadcast) 596 return NULL; 597 598 path = kzalloc(sizeof *path, GFP_ATOMIC); 599 if (!path) 600 return NULL; 601 602 path->priv = priv; 603 604 bzero(&path->queue, sizeof(path->queue)); 605 606 #ifdef CONFIG_INFINIBAND_IPOIB_CM 607 memcpy(&path->hwaddr, hwaddr, INFINIBAND_ALEN); 608 #endif 609 memcpy(path->pathrec.dgid.raw, &hwaddr[4], sizeof (union ib_gid)); 610 path->pathrec.sgid = priv->local_gid; 611 path->pathrec.pkey = cpu_to_be16(priv->pkey); 612 path->pathrec.numb_path = 1; 613 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 614 615 return path; 616 } 617 618 static int 619 path_rec_start(struct ipoib_dev_priv *priv, struct ipoib_path *path) 620 { 621 struct ifnet *dev = priv->dev; 622 623 ib_sa_comp_mask comp_mask = IB_SA_PATH_REC_MTU_SELECTOR | IB_SA_PATH_REC_MTU; 624 struct ib_sa_path_rec p_rec; 625 626 p_rec = path->pathrec; 627 p_rec.mtu_selector = IB_SA_GT; 628 629 switch (roundup_pow_of_two(dev->if_mtu + IPOIB_ENCAP_LEN)) { 630 case 512: 631 p_rec.mtu = IB_MTU_256; 632 break; 633 case 1024: 634 p_rec.mtu = IB_MTU_512; 635 break; 636 case 2048: 637 p_rec.mtu = IB_MTU_1024; 638 break; 639 case 4096: 640 p_rec.mtu = IB_MTU_2048; 641 break; 642 default: 643 /* Wildcard everything */ 644 comp_mask = 0; 645 p_rec.mtu = 0; 646 p_rec.mtu_selector = 0; 647 } 648 649 ipoib_dbg(priv, "Start path record lookup for %16D MTU > %d\n", 650 p_rec.dgid.raw, ":", 651 comp_mask ? ib_mtu_enum_to_int(p_rec.mtu) : 0); 652 653 init_completion(&path->done); 654 655 path->query_id = 656 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 657 &p_rec, comp_mask | 658 IB_SA_PATH_REC_DGID | 659 IB_SA_PATH_REC_SGID | 660 IB_SA_PATH_REC_NUMB_PATH | 661 IB_SA_PATH_REC_TRAFFIC_CLASS | 662 IB_SA_PATH_REC_PKEY, 663 1000, GFP_ATOMIC, 664 path_rec_completion, 665 path, &path->query); 666 if (path->query_id < 0) { 667 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 668 path->query = NULL; 669 complete(&path->done); 670 return path->query_id; 671 } 672 673 return 0; 674 } 675 676 static void 677 ipoib_unicast_send(struct mbuf *mb, struct ipoib_dev_priv *priv, struct ipoib_header *eh) 678 { 679 struct ipoib_path *path; 680 681 path = __path_find(priv, eh->hwaddr + 4); 682 if (!path || !path->valid) { 683 int new_path = 0; 684 685 if (!path) { 686 path = path_rec_create(priv, eh->hwaddr); 687 new_path = 1; 688 } 689 if (path) { 690 if (_IF_QLEN(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) 691 _IF_ENQUEUE(&path->queue, mb); 692 else { 693 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); 694 m_freem(mb); 695 } 696 697 if (!path->query && path_rec_start(priv, path)) { 698 spin_unlock_irqrestore(&priv->lock, flags); 699 if (new_path) 700 ipoib_path_free(priv, path); 701 return; 702 } else 703 __path_add(priv, path); 704 } else { 705 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); 706 m_freem(mb); 707 } 708 709 return; 710 } 711 712 if (ipoib_cm_get(path) && ipoib_cm_up(path)) { 713 ipoib_cm_send(priv, mb, ipoib_cm_get(path)); 714 } else if (path->ah) { 715 ipoib_send(priv, mb, path->ah, IPOIB_QPN(eh->hwaddr)); 716 } else if ((path->query || !path_rec_start(priv, path)) && 717 path->queue.ifq_len < IPOIB_MAX_PATH_REC_QUEUE) { 718 _IF_ENQUEUE(&path->queue, mb); 719 } else { 720 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); 721 m_freem(mb); 722 } 723 } 724 725 static int 726 ipoib_send_one(struct ipoib_dev_priv *priv, struct mbuf *mb) 727 { 728 struct ipoib_header *eh; 729 730 eh = mtod(mb, struct ipoib_header *); 731 if (IPOIB_IS_MULTICAST(eh->hwaddr)) { 732 /* Add in the P_Key for multicast*/ 733 eh->hwaddr[8] = (priv->pkey >> 8) & 0xff; 734 eh->hwaddr[9] = priv->pkey & 0xff; 735 736 ipoib_mcast_send(priv, eh->hwaddr + 4, mb); 737 } else 738 ipoib_unicast_send(mb, priv, eh); 739 740 return 0; 741 } 742 743 744 static void 745 _ipoib_start(struct ifnet *dev, struct ipoib_dev_priv *priv) 746 { 747 struct mbuf *mb; 748 749 if ((dev->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 750 IFF_DRV_RUNNING) 751 return; 752 753 spin_lock(&priv->lock); 754 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd) && 755 (dev->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 756 IFQ_DRV_DEQUEUE(&dev->if_snd, mb); 757 if (mb == NULL) 758 break; 759 IPOIB_MTAP(dev, mb); 760 ipoib_send_one(priv, mb); 761 } 762 spin_unlock(&priv->lock); 763 } 764 765 static void 766 ipoib_start(struct ifnet *dev) 767 { 768 _ipoib_start(dev, dev->if_softc); 769 } 770 771 static void 772 ipoib_vlan_start(struct ifnet *dev) 773 { 774 struct ipoib_dev_priv *priv; 775 struct mbuf *mb; 776 777 priv = VLAN_COOKIE(dev); 778 if (priv != NULL) 779 return _ipoib_start(dev, priv); 780 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd)) { 781 IFQ_DRV_DEQUEUE(&dev->if_snd, mb); 782 if (mb == NULL) 783 break; 784 m_freem(mb); 785 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 786 } 787 } 788 789 int 790 ipoib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port) 791 { 792 793 /* Allocate RX/TX "rings" to hold queued mbs */ 794 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 795 GFP_KERNEL); 796 if (!priv->rx_ring) { 797 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 798 ca->name, ipoib_recvq_size); 799 goto out; 800 } 801 802 priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, GFP_KERNEL); 803 if (!priv->tx_ring) { 804 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 805 ca->name, ipoib_sendq_size); 806 goto out_rx_ring_cleanup; 807 } 808 memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); 809 810 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 811 812 if (ipoib_ib_dev_init(priv, ca, port)) 813 goto out_tx_ring_cleanup; 814 815 return 0; 816 817 out_tx_ring_cleanup: 818 kfree(priv->tx_ring); 819 820 out_rx_ring_cleanup: 821 kfree(priv->rx_ring); 822 823 out: 824 return -ENOMEM; 825 } 826 827 static void 828 ipoib_detach(struct ipoib_dev_priv *priv) 829 { 830 struct ifnet *dev; 831 832 dev = priv->dev; 833 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 834 priv->gone = 1; 835 bpfdetach(dev); 836 if_detach(dev); 837 if_free(dev); 838 free_unr(ipoib_unrhdr, priv->unit); 839 } else 840 VLAN_SETCOOKIE(priv->dev, NULL); 841 842 free(priv, M_TEMP); 843 } 844 845 void 846 ipoib_dev_cleanup(struct ipoib_dev_priv *priv) 847 { 848 struct ipoib_dev_priv *cpriv, *tcpriv; 849 850 /* Delete any child interfaces first */ 851 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 852 ipoib_dev_cleanup(cpriv); 853 ipoib_detach(cpriv); 854 } 855 856 ipoib_ib_dev_cleanup(priv); 857 858 kfree(priv->rx_ring); 859 kfree(priv->tx_ring); 860 861 priv->rx_ring = NULL; 862 priv->tx_ring = NULL; 863 } 864 865 static struct ipoib_dev_priv * 866 ipoib_priv_alloc(void) 867 { 868 struct ipoib_dev_priv *priv; 869 870 priv = malloc(sizeof(struct ipoib_dev_priv), M_TEMP, M_ZERO|M_WAITOK); 871 spin_lock_init(&priv->lock); 872 spin_lock_init(&priv->drain_lock); 873 mutex_init(&priv->vlan_mutex); 874 INIT_LIST_HEAD(&priv->path_list); 875 INIT_LIST_HEAD(&priv->child_intfs); 876 INIT_LIST_HEAD(&priv->dead_ahs); 877 INIT_LIST_HEAD(&priv->multicast_list); 878 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); 879 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 880 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 881 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 882 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 883 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 884 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 885 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 886 memcpy(priv->broadcastaddr, ipv4_bcast_addr, INFINIBAND_ALEN); 887 888 return (priv); 889 } 890 891 struct ipoib_dev_priv * 892 ipoib_intf_alloc(const char *name) 893 { 894 struct ipoib_dev_priv *priv; 895 struct sockaddr_dl *sdl; 896 struct ifnet *dev; 897 898 priv = ipoib_priv_alloc(); 899 dev = priv->dev = if_alloc(IFT_INFINIBAND); 900 if (!dev) { 901 free(priv, M_TEMP); 902 return NULL; 903 } 904 dev->if_softc = priv; 905 priv->unit = alloc_unr(ipoib_unrhdr); 906 if (priv->unit == -1) { 907 if_free(dev); 908 free(priv, M_TEMP); 909 return NULL; 910 } 911 if_initname(dev, name, priv->unit); 912 dev->if_flags = IFF_BROADCAST | IFF_MULTICAST; 913 dev->if_addrlen = INFINIBAND_ALEN; 914 dev->if_hdrlen = IPOIB_HEADER_LEN; 915 if_attach(dev); 916 dev->if_init = ipoib_init; 917 dev->if_ioctl = ipoib_ioctl; 918 dev->if_start = ipoib_start; 919 dev->if_output = ipoib_output; 920 dev->if_input = ipoib_input; 921 dev->if_resolvemulti = ipoib_resolvemulti; 922 dev->if_baudrate = IF_Gbps(10); 923 dev->if_broadcastaddr = priv->broadcastaddr; 924 dev->if_snd.ifq_maxlen = ipoib_sendq_size * 2; 925 sdl = (struct sockaddr_dl *)dev->if_addr->ifa_addr; 926 sdl->sdl_type = IFT_INFINIBAND; 927 sdl->sdl_alen = dev->if_addrlen; 928 priv->dev = dev; 929 if_link_state_change(dev, LINK_STATE_DOWN); 930 bpfattach(dev, DLT_EN10MB, ETHER_HDR_LEN); 931 932 return dev->if_softc; 933 } 934 935 int 936 ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 937 { 938 struct ib_device_attr *device_attr = &hca->attrs; 939 940 priv->hca_caps = device_attr->device_cap_flags; 941 942 priv->dev->if_hwassist = 0; 943 priv->dev->if_capabilities = 0; 944 945 #ifndef CONFIG_INFINIBAND_IPOIB_CM 946 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 947 set_bit(IPOIB_FLAG_CSUM, &priv->flags); 948 priv->dev->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; 949 priv->dev->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 950 } 951 952 #if 0 953 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) { 954 priv->dev->if_capabilities |= IFCAP_TSO4; 955 priv->dev->if_hwassist |= CSUM_TSO; 956 } 957 #endif 958 #endif 959 priv->dev->if_capabilities |= 960 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_LINKSTATE; 961 priv->dev->if_capenable = priv->dev->if_capabilities; 962 963 return 0; 964 } 965 966 967 static struct ifnet * 968 ipoib_add_port(const char *format, struct ib_device *hca, u8 port) 969 { 970 struct ipoib_dev_priv *priv; 971 struct ib_port_attr attr; 972 int result = -ENOMEM; 973 974 priv = ipoib_intf_alloc(format); 975 if (!priv) 976 goto alloc_mem_failed; 977 978 if (!ib_query_port(hca, port, &attr)) 979 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 980 else { 981 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 982 hca->name, port); 983 goto device_init_failed; 984 } 985 986 /* MTU will be reset when mcast join happens */ 987 priv->dev->if_mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 988 priv->mcast_mtu = priv->admin_mtu = priv->dev->if_mtu; 989 990 result = ib_query_pkey(hca, port, 0, &priv->pkey); 991 if (result) { 992 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 993 hca->name, port, result); 994 goto device_init_failed; 995 } 996 997 if (ipoib_set_dev_features(priv, hca)) 998 goto device_init_failed; 999 1000 /* 1001 * Set the full membership bit, so that we join the right 1002 * broadcast group, etc. 1003 */ 1004 priv->pkey |= 0x8000; 1005 1006 priv->broadcastaddr[8] = priv->pkey >> 8; 1007 priv->broadcastaddr[9] = priv->pkey & 0xff; 1008 1009 result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL); 1010 if (result) { 1011 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 1012 hca->name, port, result); 1013 goto device_init_failed; 1014 } 1015 memcpy(IF_LLADDR(priv->dev) + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1016 1017 result = ipoib_dev_init(priv, hca, port); 1018 if (result < 0) { 1019 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1020 hca->name, port, result); 1021 goto device_init_failed; 1022 } 1023 if (ipoib_cm_admin_enabled(priv)) 1024 priv->dev->if_mtu = IPOIB_CM_MTU(ipoib_cm_max_mtu(priv)); 1025 1026 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1027 priv->ca, ipoib_event); 1028 result = ib_register_event_handler(&priv->event_handler); 1029 if (result < 0) { 1030 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1031 "port %d (ret = %d)\n", 1032 hca->name, port, result); 1033 goto event_failed; 1034 } 1035 if_printf(priv->dev, "Attached to %s port %d\n", hca->name, port); 1036 1037 return priv->dev; 1038 1039 event_failed: 1040 ipoib_dev_cleanup(priv); 1041 1042 device_init_failed: 1043 ipoib_detach(priv); 1044 1045 alloc_mem_failed: 1046 return ERR_PTR(result); 1047 } 1048 1049 static void 1050 ipoib_add_one(struct ib_device *device) 1051 { 1052 struct list_head *dev_list; 1053 struct ifnet *dev; 1054 struct ipoib_dev_priv *priv; 1055 int s, e, p; 1056 1057 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1058 return; 1059 1060 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1061 if (!dev_list) 1062 return; 1063 1064 INIT_LIST_HEAD(dev_list); 1065 1066 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1067 s = 0; 1068 e = 0; 1069 } else { 1070 s = 1; 1071 e = device->phys_port_cnt; 1072 } 1073 1074 for (p = s; p <= e; ++p) { 1075 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND) 1076 continue; 1077 dev = ipoib_add_port("ib", device, p); 1078 if (!IS_ERR(dev)) { 1079 priv = dev->if_softc; 1080 list_add_tail(&priv->list, dev_list); 1081 } 1082 } 1083 1084 ib_set_client_data(device, &ipoib_client, dev_list); 1085 } 1086 1087 static void 1088 ipoib_remove_one(struct ib_device *device, void *client_data) 1089 { 1090 struct ipoib_dev_priv *priv, *tmp; 1091 struct list_head *dev_list = client_data; 1092 1093 if (!dev_list) 1094 return; 1095 1096 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1097 return; 1098 1099 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1100 if (rdma_port_get_link_layer(device, priv->port) != IB_LINK_LAYER_INFINIBAND) 1101 continue; 1102 1103 ipoib_stop(priv); 1104 1105 ib_unregister_event_handler(&priv->event_handler); 1106 1107 /* dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); */ 1108 1109 flush_workqueue(ipoib_workqueue); 1110 1111 ipoib_dev_cleanup(priv); 1112 ipoib_detach(priv); 1113 } 1114 1115 kfree(dev_list); 1116 } 1117 1118 static void 1119 ipoib_config_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) 1120 { 1121 struct ipoib_dev_priv *parent; 1122 struct ipoib_dev_priv *priv; 1123 struct ifnet *dev; 1124 uint16_t pkey; 1125 int error; 1126 1127 if (ifp->if_type != IFT_INFINIBAND) 1128 return; 1129 dev = VLAN_DEVAT(ifp, vtag); 1130 if (dev == NULL) 1131 return; 1132 priv = NULL; 1133 error = 0; 1134 parent = ifp->if_softc; 1135 /* We only support 15 bits of pkey. */ 1136 if (vtag & 0x8000) 1137 return; 1138 pkey = vtag | 0x8000; /* Set full membership bit. */ 1139 if (pkey == parent->pkey) 1140 return; 1141 /* Check for dups */ 1142 mutex_lock(&parent->vlan_mutex); 1143 list_for_each_entry(priv, &parent->child_intfs, list) { 1144 if (priv->pkey == pkey) { 1145 priv = NULL; 1146 error = EBUSY; 1147 goto out; 1148 } 1149 } 1150 priv = ipoib_priv_alloc(); 1151 priv->dev = dev; 1152 priv->max_ib_mtu = parent->max_ib_mtu; 1153 priv->mcast_mtu = priv->admin_mtu = parent->dev->if_mtu; 1154 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 1155 error = ipoib_set_dev_features(priv, parent->ca); 1156 if (error) 1157 goto out; 1158 priv->pkey = pkey; 1159 priv->broadcastaddr[8] = pkey >> 8; 1160 priv->broadcastaddr[9] = pkey & 0xff; 1161 dev->if_broadcastaddr = priv->broadcastaddr; 1162 error = ipoib_dev_init(priv, parent->ca, parent->port); 1163 if (error) 1164 goto out; 1165 priv->parent = parent->dev; 1166 list_add_tail(&priv->list, &parent->child_intfs); 1167 VLAN_SETCOOKIE(dev, priv); 1168 dev->if_start = ipoib_vlan_start; 1169 dev->if_drv_flags &= ~IFF_DRV_RUNNING; 1170 dev->if_hdrlen = IPOIB_HEADER_LEN; 1171 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1172 ipoib_open(priv); 1173 mutex_unlock(&parent->vlan_mutex); 1174 return; 1175 out: 1176 mutex_unlock(&parent->vlan_mutex); 1177 if (priv) 1178 free(priv, M_TEMP); 1179 if (error) 1180 ipoib_warn(parent, 1181 "failed to initialize subinterface: device %s, port %d vtag 0x%X", 1182 parent->ca->name, parent->port, vtag); 1183 return; 1184 } 1185 1186 static void 1187 ipoib_unconfig_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) 1188 { 1189 struct ipoib_dev_priv *parent; 1190 struct ipoib_dev_priv *priv; 1191 struct ifnet *dev; 1192 uint16_t pkey; 1193 1194 if (ifp->if_type != IFT_INFINIBAND) 1195 return; 1196 1197 dev = VLAN_DEVAT(ifp, vtag); 1198 if (dev) 1199 VLAN_SETCOOKIE(dev, NULL); 1200 pkey = vtag | 0x8000; 1201 parent = ifp->if_softc; 1202 mutex_lock(&parent->vlan_mutex); 1203 list_for_each_entry(priv, &parent->child_intfs, list) { 1204 if (priv->pkey == pkey) { 1205 ipoib_dev_cleanup(priv); 1206 list_del(&priv->list); 1207 break; 1208 } 1209 } 1210 mutex_unlock(&parent->vlan_mutex); 1211 } 1212 1213 eventhandler_tag ipoib_vlan_attach; 1214 eventhandler_tag ipoib_vlan_detach; 1215 1216 static int __init 1217 ipoib_init_module(void) 1218 { 1219 int ret; 1220 1221 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1222 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1223 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1224 1225 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1226 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1227 ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE, 1228 IPOIB_MIN_QUEUE_SIZE)); 1229 #ifdef CONFIG_INFINIBAND_IPOIB_CM 1230 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 1231 #endif 1232 1233 ipoib_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 1234 ipoib_config_vlan, NULL, EVENTHANDLER_PRI_FIRST); 1235 ipoib_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 1236 ipoib_unconfig_vlan, NULL, EVENTHANDLER_PRI_FIRST); 1237 1238 /* 1239 * We create our own workqueue mainly because we want to be 1240 * able to flush it when devices are being removed. We can't 1241 * use schedule_work()/flush_scheduled_work() because both 1242 * unregister_netdev() and linkwatch_event take the rtnl lock, 1243 * so flush_scheduled_work() can deadlock during device 1244 * removal. 1245 */ 1246 ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1247 if (!ipoib_workqueue) { 1248 ret = -ENOMEM; 1249 goto err_fs; 1250 } 1251 1252 ib_sa_register_client(&ipoib_sa_client); 1253 1254 ret = ib_register_client(&ipoib_client); 1255 if (ret) 1256 goto err_sa; 1257 1258 return 0; 1259 1260 err_sa: 1261 ib_sa_unregister_client(&ipoib_sa_client); 1262 destroy_workqueue(ipoib_workqueue); 1263 1264 err_fs: 1265 return ret; 1266 } 1267 1268 static void __exit 1269 ipoib_cleanup_module(void) 1270 { 1271 1272 EVENTHANDLER_DEREGISTER(vlan_config, ipoib_vlan_attach); 1273 EVENTHANDLER_DEREGISTER(vlan_unconfig, ipoib_vlan_detach); 1274 ib_unregister_client(&ipoib_client); 1275 ib_sa_unregister_client(&ipoib_sa_client); 1276 destroy_workqueue(ipoib_workqueue); 1277 } 1278 1279 /* 1280 * Infiniband output routine. 1281 */ 1282 static int 1283 ipoib_output(struct ifnet *ifp, struct mbuf *m, 1284 const struct sockaddr *dst, struct route *ro) 1285 { 1286 u_char edst[INFINIBAND_ALEN]; 1287 #if defined(INET) || defined(INET6) 1288 struct llentry *lle = NULL; 1289 #endif 1290 struct ipoib_header *eh; 1291 int error = 0, is_gw = 0; 1292 short type; 1293 1294 if (ro != NULL) 1295 is_gw = (ro->ro_flags & RT_HAS_GW) != 0; 1296 #ifdef MAC 1297 error = mac_ifnet_check_transmit(ifp, m); 1298 if (error) 1299 goto bad; 1300 #endif 1301 1302 M_PROFILE(m); 1303 if (ifp->if_flags & IFF_MONITOR) { 1304 error = ENETDOWN; 1305 goto bad; 1306 } 1307 if (!((ifp->if_flags & IFF_UP) && 1308 (ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1309 error = ENETDOWN; 1310 goto bad; 1311 } 1312 1313 switch (dst->sa_family) { 1314 #ifdef INET 1315 case AF_INET: 1316 if (lle != NULL && (lle->la_flags & LLE_VALID)) 1317 memcpy(edst, lle->ll_addr, sizeof(edst)); 1318 else if (m->m_flags & M_MCAST) 1319 ip_ib_mc_map(((struct sockaddr_in *)dst)->sin_addr.s_addr, ifp->if_broadcastaddr, edst); 1320 else 1321 error = arpresolve(ifp, is_gw, m, dst, edst, NULL, NULL); 1322 if (error) 1323 return (error == EWOULDBLOCK ? 0 : error); 1324 type = htons(ETHERTYPE_IP); 1325 break; 1326 case AF_ARP: 1327 { 1328 struct arphdr *ah; 1329 ah = mtod(m, struct arphdr *); 1330 ah->ar_hrd = htons(ARPHRD_INFINIBAND); 1331 1332 switch(ntohs(ah->ar_op)) { 1333 case ARPOP_REVREQUEST: 1334 case ARPOP_REVREPLY: 1335 type = htons(ETHERTYPE_REVARP); 1336 break; 1337 case ARPOP_REQUEST: 1338 case ARPOP_REPLY: 1339 default: 1340 type = htons(ETHERTYPE_ARP); 1341 break; 1342 } 1343 1344 if (m->m_flags & M_BCAST) 1345 bcopy(ifp->if_broadcastaddr, edst, INFINIBAND_ALEN); 1346 else 1347 bcopy(ar_tha(ah), edst, INFINIBAND_ALEN); 1348 1349 } 1350 break; 1351 #endif 1352 #ifdef INET6 1353 case AF_INET6: 1354 if (lle != NULL && (lle->la_flags & LLE_VALID)) 1355 memcpy(edst, lle->ll_addr, sizeof(edst)); 1356 else if (m->m_flags & M_MCAST) 1357 ipv6_ib_mc_map(&((struct sockaddr_in6 *)dst)->sin6_addr, ifp->if_broadcastaddr, edst); 1358 else 1359 error = nd6_resolve(ifp, is_gw, m, dst, edst, NULL, NULL); 1360 if (error) 1361 return error; 1362 type = htons(ETHERTYPE_IPV6); 1363 break; 1364 #endif 1365 1366 default: 1367 if_printf(ifp, "can't handle af%d\n", dst->sa_family); 1368 error = EAFNOSUPPORT; 1369 goto bad; 1370 } 1371 1372 /* 1373 * Add local net header. If no space in first mbuf, 1374 * allocate another. 1375 */ 1376 M_PREPEND(m, IPOIB_HEADER_LEN, M_NOWAIT); 1377 if (m == NULL) { 1378 error = ENOBUFS; 1379 goto bad; 1380 } 1381 eh = mtod(m, struct ipoib_header *); 1382 (void)memcpy(&eh->proto, &type, sizeof(eh->proto)); 1383 (void)memcpy(&eh->hwaddr, edst, sizeof (edst)); 1384 1385 /* 1386 * Queue message on interface, update output statistics if 1387 * successful, and start output if interface not yet active. 1388 */ 1389 return ((ifp->if_transmit)(ifp, m)); 1390 bad: 1391 if (m != NULL) 1392 m_freem(m); 1393 return (error); 1394 } 1395 1396 /* 1397 * Upper layer processing for a received Infiniband packet. 1398 */ 1399 void 1400 ipoib_demux(struct ifnet *ifp, struct mbuf *m, u_short proto) 1401 { 1402 int isr; 1403 1404 #ifdef MAC 1405 /* 1406 * Tag the mbuf with an appropriate MAC label before any other 1407 * consumers can get to it. 1408 */ 1409 mac_ifnet_create_mbuf(ifp, m); 1410 #endif 1411 /* Allow monitor mode to claim this frame, after stats are updated. */ 1412 if (ifp->if_flags & IFF_MONITOR) { 1413 if_printf(ifp, "discard frame at IFF_MONITOR\n"); 1414 m_freem(m); 1415 return; 1416 } 1417 /* 1418 * Dispatch frame to upper layer. 1419 */ 1420 switch (proto) { 1421 #ifdef INET 1422 case ETHERTYPE_IP: 1423 isr = NETISR_IP; 1424 break; 1425 1426 case ETHERTYPE_ARP: 1427 if (ifp->if_flags & IFF_NOARP) { 1428 /* Discard packet if ARP is disabled on interface */ 1429 m_freem(m); 1430 return; 1431 } 1432 isr = NETISR_ARP; 1433 break; 1434 #endif 1435 #ifdef INET6 1436 case ETHERTYPE_IPV6: 1437 isr = NETISR_IPV6; 1438 break; 1439 #endif 1440 default: 1441 goto discard; 1442 } 1443 netisr_dispatch(isr, m); 1444 return; 1445 1446 discard: 1447 m_freem(m); 1448 } 1449 1450 /* 1451 * Process a received Infiniband packet. 1452 */ 1453 static void 1454 ipoib_input(struct ifnet *ifp, struct mbuf *m) 1455 { 1456 struct ipoib_header *eh; 1457 1458 if ((ifp->if_flags & IFF_UP) == 0) { 1459 m_freem(m); 1460 return; 1461 } 1462 CURVNET_SET_QUIET(ifp->if_vnet); 1463 1464 /* Let BPF have it before we strip the header. */ 1465 IPOIB_MTAP(ifp, m); 1466 eh = mtod(m, struct ipoib_header *); 1467 /* 1468 * Reset layer specific mbuf flags to avoid confusing upper layers. 1469 * Strip off Infiniband header. 1470 */ 1471 m->m_flags &= ~M_VLANTAG; 1472 m_clrprotoflags(m); 1473 m_adj(m, IPOIB_HEADER_LEN); 1474 1475 if (IPOIB_IS_MULTICAST(eh->hwaddr)) { 1476 if (memcmp(eh->hwaddr, ifp->if_broadcastaddr, 1477 ifp->if_addrlen) == 0) 1478 m->m_flags |= M_BCAST; 1479 else 1480 m->m_flags |= M_MCAST; 1481 if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1); 1482 } 1483 1484 ipoib_demux(ifp, m, ntohs(eh->proto)); 1485 CURVNET_RESTORE(); 1486 } 1487 1488 static int 1489 ipoib_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa, 1490 struct sockaddr *sa) 1491 { 1492 struct sockaddr_dl *sdl; 1493 #ifdef INET 1494 struct sockaddr_in *sin; 1495 #endif 1496 #ifdef INET6 1497 struct sockaddr_in6 *sin6; 1498 #endif 1499 u_char *e_addr; 1500 1501 switch(sa->sa_family) { 1502 case AF_LINK: 1503 /* 1504 * No mapping needed. Just check that it's a valid MC address. 1505 */ 1506 sdl = (struct sockaddr_dl *)sa; 1507 e_addr = LLADDR(sdl); 1508 if (!IPOIB_IS_MULTICAST(e_addr)) 1509 return EADDRNOTAVAIL; 1510 *llsa = NULL; 1511 return 0; 1512 1513 #ifdef INET 1514 case AF_INET: 1515 sin = (struct sockaddr_in *)sa; 1516 if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 1517 return EADDRNOTAVAIL; 1518 sdl = link_init_sdl(ifp, *llsa, IFT_INFINIBAND); 1519 sdl->sdl_alen = INFINIBAND_ALEN; 1520 e_addr = LLADDR(sdl); 1521 ip_ib_mc_map(sin->sin_addr.s_addr, ifp->if_broadcastaddr, 1522 e_addr); 1523 *llsa = (struct sockaddr *)sdl; 1524 return 0; 1525 #endif 1526 #ifdef INET6 1527 case AF_INET6: 1528 sin6 = (struct sockaddr_in6 *)sa; 1529 /* 1530 * An IP6 address of 0 means listen to all 1531 * of the multicast address used for IP6. 1532 * This has no meaning in ipoib. 1533 */ 1534 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 1535 return EADDRNOTAVAIL; 1536 if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) 1537 return EADDRNOTAVAIL; 1538 sdl = link_init_sdl(ifp, *llsa, IFT_INFINIBAND); 1539 sdl->sdl_alen = INFINIBAND_ALEN; 1540 e_addr = LLADDR(sdl); 1541 ipv6_ib_mc_map(&sin6->sin6_addr, ifp->if_broadcastaddr, e_addr); 1542 *llsa = (struct sockaddr *)sdl; 1543 return 0; 1544 #endif 1545 1546 default: 1547 return EAFNOSUPPORT; 1548 } 1549 } 1550 1551 module_init(ipoib_init_module); 1552 module_exit(ipoib_cleanup_module); 1553 1554 static int 1555 ipoib_evhand(module_t mod, int event, void *arg) 1556 { 1557 return (0); 1558 } 1559 1560 static moduledata_t ipoib_mod = { 1561 .name = "ipoib", 1562 .evhand = ipoib_evhand, 1563 }; 1564 1565 DECLARE_MODULE(ipoib, ipoib_mod, SI_SUB_LAST, SI_ORDER_ANY); 1566 MODULE_DEPEND(ipoib, ibcore, 1, 1, 1); 1567 MODULE_DEPEND(ipoib, linuxkpi, 1, 1, 1); 1568