1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 static int ipoib_resolvemulti(struct ifnet *, struct sockaddr **, 38 struct sockaddr *); 39 40 41 #include <linux/module.h> 42 43 #include <linux/slab.h> 44 #include <linux/kernel.h> 45 #include <linux/vmalloc.h> 46 47 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 48 #include <linux/if_vlan.h> 49 #include <net/ip.h> 50 #include <net/ipv6.h> 51 52 MODULE_AUTHOR("Roland Dreier"); 53 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 54 MODULE_LICENSE("Dual BSD/GPL"); 55 56 int ipoib_sendq_size = IPOIB_TX_RING_SIZE; 57 int ipoib_recvq_size = IPOIB_RX_RING_SIZE; 58 59 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 60 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 61 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 62 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 63 64 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 65 int ipoib_debug_level = 1; 66 67 module_param_named(debug_level, ipoib_debug_level, int, 0644); 68 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 69 #endif 70 71 struct ipoib_path_iter { 72 struct ipoib_dev_priv *priv; 73 struct ipoib_path path; 74 }; 75 76 static const u8 ipv4_bcast_addr[] = { 77 0x00, 0xff, 0xff, 0xff, 78 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 79 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 80 }; 81 82 struct workqueue_struct *ipoib_workqueue; 83 84 struct ib_sa_client ipoib_sa_client; 85 86 static void ipoib_add_one(struct ib_device *device); 87 static void ipoib_remove_one(struct ib_device *device); 88 static void ipoib_start(struct ifnet *dev); 89 static int ipoib_output(struct ifnet *ifp, struct mbuf *m, 90 const struct sockaddr *dst, struct route *ro); 91 static int ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data); 92 static void ipoib_input(struct ifnet *ifp, struct mbuf *m); 93 94 #define IPOIB_MTAP(_ifp, _m) \ 95 do { \ 96 if (bpf_peers_present((_ifp)->if_bpf)) { \ 97 M_ASSERTVALID(_m); \ 98 ipoib_mtap_mb((_ifp), (_m)); \ 99 } \ 100 } while (0) 101 102 /* 103 * This is for clients that have an ipoib_header in the mbuf. 104 */ 105 static void 106 ipoib_mtap_mb(struct ifnet *ifp, struct mbuf *mb) 107 { 108 struct ipoib_header *ih; 109 struct ether_header eh; 110 111 ih = mtod(mb, struct ipoib_header *); 112 eh.ether_type = ih->proto; 113 bcopy(ih->hwaddr, &eh.ether_dhost, ETHER_ADDR_LEN); 114 bzero(&eh.ether_shost, ETHER_ADDR_LEN); 115 mb->m_data += sizeof(struct ipoib_header); 116 mb->m_len -= sizeof(struct ipoib_header); 117 bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb); 118 mb->m_data -= sizeof(struct ipoib_header); 119 mb->m_len += sizeof(struct ipoib_header); 120 } 121 122 void 123 ipoib_mtap_proto(struct ifnet *ifp, struct mbuf *mb, uint16_t proto) 124 { 125 struct ether_header eh; 126 127 eh.ether_type = proto; 128 bzero(&eh.ether_shost, ETHER_ADDR_LEN); 129 bzero(&eh.ether_dhost, ETHER_ADDR_LEN); 130 bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb); 131 } 132 133 static struct ib_client ipoib_client = { 134 .name = "ipoib", 135 .add = ipoib_add_one, 136 .remove = ipoib_remove_one 137 }; 138 139 int 140 ipoib_open(struct ipoib_dev_priv *priv) 141 { 142 struct ifnet *dev = priv->dev; 143 144 ipoib_dbg(priv, "bringing up interface\n"); 145 146 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 147 148 if (ipoib_pkey_dev_delay_open(priv)) 149 return 0; 150 151 if (ipoib_ib_dev_open(priv)) 152 goto err_disable; 153 154 if (ipoib_ib_dev_up(priv)) 155 goto err_stop; 156 157 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 158 struct ipoib_dev_priv *cpriv; 159 160 /* Bring up any child interfaces too */ 161 mutex_lock(&priv->vlan_mutex); 162 list_for_each_entry(cpriv, &priv->child_intfs, list) 163 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 164 ipoib_open(cpriv); 165 mutex_unlock(&priv->vlan_mutex); 166 } 167 dev->if_drv_flags |= IFF_DRV_RUNNING; 168 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 169 170 return 0; 171 172 err_stop: 173 ipoib_ib_dev_stop(priv, 1); 174 175 err_disable: 176 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 177 178 return -EINVAL; 179 } 180 181 static void 182 ipoib_init(void *arg) 183 { 184 struct ifnet *dev; 185 struct ipoib_dev_priv *priv; 186 187 priv = arg; 188 dev = priv->dev; 189 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 190 ipoib_open(priv); 191 queue_work(ipoib_workqueue, &priv->flush_light); 192 } 193 194 195 static int 196 ipoib_stop(struct ipoib_dev_priv *priv) 197 { 198 struct ifnet *dev = priv->dev; 199 200 ipoib_dbg(priv, "stopping interface\n"); 201 202 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 203 204 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 205 206 ipoib_ib_dev_down(priv, 0); 207 ipoib_ib_dev_stop(priv, 0); 208 209 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 210 struct ipoib_dev_priv *cpriv; 211 212 /* Bring down any child interfaces too */ 213 mutex_lock(&priv->vlan_mutex); 214 list_for_each_entry(cpriv, &priv->child_intfs, list) 215 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) != 0) 216 ipoib_stop(cpriv); 217 mutex_unlock(&priv->vlan_mutex); 218 } 219 220 return 0; 221 } 222 223 int 224 ipoib_change_mtu(struct ipoib_dev_priv *priv, int new_mtu) 225 { 226 struct ifnet *dev = priv->dev; 227 228 /* dev->if_mtu > 2K ==> connected mode */ 229 if (ipoib_cm_admin_enabled(priv)) { 230 if (new_mtu > IPOIB_CM_MTU(ipoib_cm_max_mtu(priv))) 231 return -EINVAL; 232 233 if (new_mtu > priv->mcast_mtu) 234 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 235 priv->mcast_mtu); 236 237 dev->if_mtu = new_mtu; 238 return 0; 239 } 240 241 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 242 return -EINVAL; 243 244 priv->admin_mtu = new_mtu; 245 246 dev->if_mtu = min(priv->mcast_mtu, priv->admin_mtu); 247 248 queue_work(ipoib_workqueue, &priv->flush_light); 249 250 return 0; 251 } 252 253 static int 254 ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 255 { 256 struct ipoib_dev_priv *priv = ifp->if_softc; 257 struct ifaddr *ifa = (struct ifaddr *) data; 258 struct ifreq *ifr = (struct ifreq *) data; 259 int error = 0; 260 261 switch (command) { 262 case SIOCSIFFLAGS: 263 if (ifp->if_flags & IFF_UP) { 264 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 265 error = -ipoib_open(priv); 266 } else 267 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 268 ipoib_stop(priv); 269 break; 270 case SIOCADDMULTI: 271 case SIOCDELMULTI: 272 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 273 queue_work(ipoib_workqueue, &priv->restart_task); 274 break; 275 case SIOCSIFADDR: 276 ifp->if_flags |= IFF_UP; 277 278 switch (ifa->ifa_addr->sa_family) { 279 #ifdef INET 280 case AF_INET: 281 ifp->if_init(ifp->if_softc); /* before arpwhohas */ 282 arp_ifinit(ifp, ifa); 283 break; 284 #endif 285 default: 286 ifp->if_init(ifp->if_softc); 287 break; 288 } 289 break; 290 291 case SIOCGIFADDR: 292 { 293 struct sockaddr *sa; 294 295 sa = (struct sockaddr *) & ifr->ifr_data; 296 bcopy(IF_LLADDR(ifp), 297 (caddr_t) sa->sa_data, INFINIBAND_ALEN); 298 } 299 break; 300 301 case SIOCSIFMTU: 302 /* 303 * Set the interface MTU. 304 */ 305 error = -ipoib_change_mtu(priv, ifr->ifr_mtu); 306 break; 307 default: 308 error = EINVAL; 309 break; 310 } 311 return (error); 312 } 313 314 315 static struct ipoib_path * 316 __path_find(struct ipoib_dev_priv *priv, void *gid) 317 { 318 struct rb_node *n = priv->path_tree.rb_node; 319 struct ipoib_path *path; 320 int ret; 321 322 while (n) { 323 path = rb_entry(n, struct ipoib_path, rb_node); 324 325 ret = memcmp(gid, path->pathrec.dgid.raw, 326 sizeof (union ib_gid)); 327 328 if (ret < 0) 329 n = n->rb_left; 330 else if (ret > 0) 331 n = n->rb_right; 332 else 333 return path; 334 } 335 336 return NULL; 337 } 338 339 static int 340 __path_add(struct ipoib_dev_priv *priv, struct ipoib_path *path) 341 { 342 struct rb_node **n = &priv->path_tree.rb_node; 343 struct rb_node *pn = NULL; 344 struct ipoib_path *tpath; 345 int ret; 346 347 while (*n) { 348 pn = *n; 349 tpath = rb_entry(pn, struct ipoib_path, rb_node); 350 351 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 352 sizeof (union ib_gid)); 353 if (ret < 0) 354 n = &pn->rb_left; 355 else if (ret > 0) 356 n = &pn->rb_right; 357 else 358 return -EEXIST; 359 } 360 361 rb_link_node(&path->rb_node, pn, n); 362 rb_insert_color(&path->rb_node, &priv->path_tree); 363 364 list_add_tail(&path->list, &priv->path_list); 365 366 return 0; 367 } 368 369 void 370 ipoib_path_free(struct ipoib_dev_priv *priv, struct ipoib_path *path) 371 { 372 373 _IF_DRAIN(&path->queue); 374 375 if (path->ah) 376 ipoib_put_ah(path->ah); 377 if (ipoib_cm_get(path)) 378 ipoib_cm_destroy_tx(ipoib_cm_get(path)); 379 380 kfree(path); 381 } 382 383 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 384 385 struct ipoib_path_iter * 386 ipoib_path_iter_init(struct ipoib_dev_priv *priv) 387 { 388 struct ipoib_path_iter *iter; 389 390 iter = kmalloc(sizeof *iter, GFP_KERNEL); 391 if (!iter) 392 return NULL; 393 394 iter->priv = priv; 395 memset(iter->path.pathrec.dgid.raw, 0, 16); 396 397 if (ipoib_path_iter_next(iter)) { 398 kfree(iter); 399 return NULL; 400 } 401 402 return iter; 403 } 404 405 int 406 ipoib_path_iter_next(struct ipoib_path_iter *iter) 407 { 408 struct ipoib_dev_priv *priv = iter->priv; 409 struct rb_node *n; 410 struct ipoib_path *path; 411 int ret = 1; 412 413 spin_lock_irq(&priv->lock); 414 415 n = rb_first(&priv->path_tree); 416 417 while (n) { 418 path = rb_entry(n, struct ipoib_path, rb_node); 419 420 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 421 sizeof (union ib_gid)) < 0) { 422 iter->path = *path; 423 ret = 0; 424 break; 425 } 426 427 n = rb_next(n); 428 } 429 430 spin_unlock_irq(&priv->lock); 431 432 return ret; 433 } 434 435 void 436 ipoib_path_iter_read(struct ipoib_path_iter *iter, struct ipoib_path *path) 437 { 438 *path = iter->path; 439 } 440 441 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 442 443 void 444 ipoib_mark_paths_invalid(struct ipoib_dev_priv *priv) 445 { 446 struct ipoib_path *path, *tp; 447 448 spin_lock_irq(&priv->lock); 449 450 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 451 ipoib_dbg(priv, "mark path LID 0x%04x GID %16D invalid\n", 452 be16_to_cpu(path->pathrec.dlid), 453 path->pathrec.dgid.raw, ":"); 454 path->valid = 0; 455 } 456 457 spin_unlock_irq(&priv->lock); 458 } 459 460 void 461 ipoib_flush_paths(struct ipoib_dev_priv *priv) 462 { 463 struct ipoib_path *path, *tp; 464 LIST_HEAD(remove_list); 465 unsigned long flags; 466 467 spin_lock_irqsave(&priv->lock, flags); 468 469 list_splice_init(&priv->path_list, &remove_list); 470 471 list_for_each_entry(path, &remove_list, list) 472 rb_erase(&path->rb_node, &priv->path_tree); 473 474 list_for_each_entry_safe(path, tp, &remove_list, list) { 475 if (path->query) 476 ib_sa_cancel_query(path->query_id, path->query); 477 spin_unlock_irqrestore(&priv->lock, flags); 478 wait_for_completion(&path->done); 479 ipoib_path_free(priv, path); 480 spin_lock_irqsave(&priv->lock, flags); 481 } 482 483 spin_unlock_irqrestore(&priv->lock, flags); 484 } 485 486 static void 487 path_rec_completion(int status, struct ib_sa_path_rec *pathrec, void *path_ptr) 488 { 489 struct ipoib_path *path = path_ptr; 490 struct ipoib_dev_priv *priv = path->priv; 491 struct ifnet *dev = priv->dev; 492 struct ipoib_ah *ah = NULL; 493 struct ipoib_ah *old_ah = NULL; 494 struct ifqueue mbqueue; 495 struct mbuf *mb; 496 unsigned long flags; 497 498 if (!status) 499 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %16D\n", 500 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw, ":"); 501 else 502 ipoib_dbg(priv, "PathRec status %d for GID %16D\n", 503 status, path->pathrec.dgid.raw, ":"); 504 505 bzero(&mbqueue, sizeof(mbqueue)); 506 507 if (!status) { 508 struct ib_ah_attr av; 509 510 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) 511 ah = ipoib_create_ah(priv, priv->pd, &av); 512 } 513 514 spin_lock_irqsave(&priv->lock, flags); 515 516 if (ah) { 517 path->pathrec = *pathrec; 518 519 old_ah = path->ah; 520 path->ah = ah; 521 522 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 523 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 524 525 for (;;) { 526 _IF_DEQUEUE(&path->queue, mb); 527 if (mb == NULL) 528 break; 529 _IF_ENQUEUE(&mbqueue, mb); 530 } 531 532 #ifdef CONFIG_INFINIBAND_IPOIB_CM 533 if (ipoib_cm_enabled(priv, path->hwaddr) && !ipoib_cm_get(path)) 534 ipoib_cm_set(path, ipoib_cm_create_tx(priv, path)); 535 #endif 536 537 path->valid = 1; 538 } 539 540 path->query = NULL; 541 complete(&path->done); 542 543 spin_unlock_irqrestore(&priv->lock, flags); 544 545 if (old_ah) 546 ipoib_put_ah(old_ah); 547 548 for (;;) { 549 _IF_DEQUEUE(&mbqueue, mb); 550 if (mb == NULL) 551 break; 552 mb->m_pkthdr.rcvif = dev; 553 if (dev->if_transmit(dev, mb)) 554 ipoib_warn(priv, "dev_queue_xmit failed " 555 "to requeue packet\n"); 556 } 557 } 558 559 static struct ipoib_path * 560 path_rec_create(struct ipoib_dev_priv *priv, uint8_t *hwaddr) 561 { 562 struct ipoib_path *path; 563 564 if (!priv->broadcast) 565 return NULL; 566 567 path = kzalloc(sizeof *path, GFP_ATOMIC); 568 if (!path) 569 return NULL; 570 571 path->priv = priv; 572 573 bzero(&path->queue, sizeof(path->queue)); 574 575 #ifdef CONFIG_INFINIBAND_IPOIB_CM 576 memcpy(&path->hwaddr, hwaddr, INFINIBAND_ALEN); 577 #endif 578 memcpy(path->pathrec.dgid.raw, &hwaddr[4], sizeof (union ib_gid)); 579 path->pathrec.sgid = priv->local_gid; 580 path->pathrec.pkey = cpu_to_be16(priv->pkey); 581 path->pathrec.numb_path = 1; 582 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 583 584 return path; 585 } 586 587 static int 588 path_rec_start(struct ipoib_dev_priv *priv, struct ipoib_path *path) 589 { 590 struct ifnet *dev = priv->dev; 591 592 ib_sa_comp_mask comp_mask = IB_SA_PATH_REC_MTU_SELECTOR | IB_SA_PATH_REC_MTU; 593 struct ib_sa_path_rec p_rec; 594 595 p_rec = path->pathrec; 596 p_rec.mtu_selector = IB_SA_GT; 597 598 switch (roundup_pow_of_two(dev->if_mtu + IPOIB_ENCAP_LEN)) { 599 case 512: 600 p_rec.mtu = IB_MTU_256; 601 break; 602 case 1024: 603 p_rec.mtu = IB_MTU_512; 604 break; 605 case 2048: 606 p_rec.mtu = IB_MTU_1024; 607 break; 608 case 4096: 609 p_rec.mtu = IB_MTU_2048; 610 break; 611 default: 612 /* Wildcard everything */ 613 comp_mask = 0; 614 p_rec.mtu = 0; 615 p_rec.mtu_selector = 0; 616 } 617 618 ipoib_dbg(priv, "Start path record lookup for %16D MTU > %d\n", 619 p_rec.dgid.raw, ":", 620 comp_mask ? ib_mtu_enum_to_int(p_rec.mtu) : 0); 621 622 init_completion(&path->done); 623 624 path->query_id = 625 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 626 &p_rec, comp_mask | 627 IB_SA_PATH_REC_DGID | 628 IB_SA_PATH_REC_SGID | 629 IB_SA_PATH_REC_NUMB_PATH | 630 IB_SA_PATH_REC_TRAFFIC_CLASS | 631 IB_SA_PATH_REC_PKEY, 632 1000, GFP_ATOMIC, 633 path_rec_completion, 634 path, &path->query); 635 if (path->query_id < 0) { 636 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 637 path->query = NULL; 638 complete(&path->done); 639 return path->query_id; 640 } 641 642 return 0; 643 } 644 645 static void 646 ipoib_unicast_send(struct mbuf *mb, struct ipoib_dev_priv *priv, struct ipoib_header *eh) 647 { 648 struct ipoib_path *path; 649 650 path = __path_find(priv, eh->hwaddr + 4); 651 if (!path || !path->valid) { 652 int new_path = 0; 653 654 if (!path) { 655 path = path_rec_create(priv, eh->hwaddr); 656 new_path = 1; 657 } 658 if (path) { 659 _IF_ENQUEUE(&path->queue, mb); 660 if (!path->query && path_rec_start(priv, path)) { 661 spin_unlock_irqrestore(&priv->lock, flags); 662 if (new_path) 663 ipoib_path_free(priv, path); 664 return; 665 } else 666 __path_add(priv, path); 667 } else { 668 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); 669 m_freem(mb); 670 } 671 672 return; 673 } 674 675 if (ipoib_cm_get(path) && ipoib_cm_up(path)) { 676 ipoib_cm_send(priv, mb, ipoib_cm_get(path)); 677 } else if (path->ah) { 678 ipoib_send(priv, mb, path->ah, IPOIB_QPN(eh->hwaddr)); 679 } else if ((path->query || !path_rec_start(priv, path)) && 680 path->queue.ifq_len < IPOIB_MAX_PATH_REC_QUEUE) { 681 _IF_ENQUEUE(&path->queue, mb); 682 } else { 683 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); 684 m_freem(mb); 685 } 686 } 687 688 static int 689 ipoib_send_one(struct ipoib_dev_priv *priv, struct mbuf *mb) 690 { 691 struct ipoib_header *eh; 692 693 eh = mtod(mb, struct ipoib_header *); 694 if (IPOIB_IS_MULTICAST(eh->hwaddr)) { 695 /* Add in the P_Key for multicast*/ 696 eh->hwaddr[8] = (priv->pkey >> 8) & 0xff; 697 eh->hwaddr[9] = priv->pkey & 0xff; 698 699 ipoib_mcast_send(priv, eh->hwaddr + 4, mb); 700 } else 701 ipoib_unicast_send(mb, priv, eh); 702 703 return 0; 704 } 705 706 707 static void 708 _ipoib_start(struct ifnet *dev, struct ipoib_dev_priv *priv) 709 { 710 struct mbuf *mb; 711 712 if ((dev->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 713 IFF_DRV_RUNNING) 714 return; 715 716 spin_lock(&priv->lock); 717 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd) && 718 (dev->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 719 IFQ_DRV_DEQUEUE(&dev->if_snd, mb); 720 if (mb == NULL) 721 break; 722 IPOIB_MTAP(dev, mb); 723 ipoib_send_one(priv, mb); 724 } 725 spin_unlock(&priv->lock); 726 } 727 728 static void 729 ipoib_start(struct ifnet *dev) 730 { 731 _ipoib_start(dev, dev->if_softc); 732 } 733 734 static void 735 ipoib_vlan_start(struct ifnet *dev) 736 { 737 struct ipoib_dev_priv *priv; 738 struct mbuf *mb; 739 740 priv = VLAN_COOKIE(dev); 741 if (priv != NULL) 742 return _ipoib_start(dev, priv); 743 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd)) { 744 IFQ_DRV_DEQUEUE(&dev->if_snd, mb); 745 if (mb == NULL) 746 break; 747 m_freem(mb); 748 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 749 } 750 } 751 752 int 753 ipoib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port) 754 { 755 756 /* Allocate RX/TX "rings" to hold queued mbs */ 757 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 758 GFP_KERNEL); 759 if (!priv->rx_ring) { 760 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 761 ca->name, ipoib_recvq_size); 762 goto out; 763 } 764 765 priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, GFP_KERNEL); 766 if (!priv->tx_ring) { 767 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 768 ca->name, ipoib_sendq_size); 769 goto out_rx_ring_cleanup; 770 } 771 memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); 772 773 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 774 775 if (ipoib_ib_dev_init(priv, ca, port)) 776 goto out_tx_ring_cleanup; 777 778 return 0; 779 780 out_tx_ring_cleanup: 781 kfree(priv->tx_ring); 782 783 out_rx_ring_cleanup: 784 kfree(priv->rx_ring); 785 786 out: 787 return -ENOMEM; 788 } 789 790 static void 791 ipoib_detach(struct ipoib_dev_priv *priv) 792 { 793 struct ifnet *dev; 794 795 dev = priv->dev; 796 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 797 bpfdetach(dev); 798 if_detach(dev); 799 if_free(dev); 800 } else 801 VLAN_SETCOOKIE(priv->dev, NULL); 802 803 free(priv, M_TEMP); 804 } 805 806 void 807 ipoib_dev_cleanup(struct ipoib_dev_priv *priv) 808 { 809 struct ipoib_dev_priv *cpriv, *tcpriv; 810 811 /* Delete any child interfaces first */ 812 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 813 ipoib_dev_cleanup(cpriv); 814 ipoib_detach(cpriv); 815 } 816 817 ipoib_ib_dev_cleanup(priv); 818 819 kfree(priv->rx_ring); 820 kfree(priv->tx_ring); 821 822 priv->rx_ring = NULL; 823 priv->tx_ring = NULL; 824 } 825 826 static volatile int ipoib_unit; 827 828 static struct ipoib_dev_priv * 829 ipoib_priv_alloc(void) 830 { 831 struct ipoib_dev_priv *priv; 832 833 priv = malloc(sizeof(struct ipoib_dev_priv), M_TEMP, M_ZERO|M_WAITOK); 834 spin_lock_init(&priv->lock); 835 mutex_init(&priv->vlan_mutex); 836 INIT_LIST_HEAD(&priv->path_list); 837 INIT_LIST_HEAD(&priv->child_intfs); 838 INIT_LIST_HEAD(&priv->dead_ahs); 839 INIT_LIST_HEAD(&priv->multicast_list); 840 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); 841 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 842 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 843 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 844 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 845 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 846 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 847 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 848 memcpy(priv->broadcastaddr, ipv4_bcast_addr, INFINIBAND_ALEN); 849 850 return (priv); 851 } 852 853 struct ipoib_dev_priv * 854 ipoib_intf_alloc(const char *name) 855 { 856 struct ipoib_dev_priv *priv; 857 struct sockaddr_dl *sdl; 858 struct ifnet *dev; 859 860 priv = ipoib_priv_alloc(); 861 dev = priv->dev = if_alloc(IFT_INFINIBAND); 862 if (!dev) { 863 free(priv, M_TEMP); 864 return NULL; 865 } 866 dev->if_softc = priv; 867 if_initname(dev, name, atomic_fetchadd_int(&ipoib_unit, 1)); 868 dev->if_flags = IFF_BROADCAST | IFF_MULTICAST; 869 dev->if_addrlen = INFINIBAND_ALEN; 870 dev->if_hdrlen = IPOIB_HEADER_LEN; 871 if_attach(dev); 872 dev->if_init = ipoib_init; 873 dev->if_ioctl = ipoib_ioctl; 874 dev->if_start = ipoib_start; 875 dev->if_output = ipoib_output; 876 dev->if_input = ipoib_input; 877 dev->if_resolvemulti = ipoib_resolvemulti; 878 dev->if_baudrate = IF_Gbps(10); 879 dev->if_broadcastaddr = priv->broadcastaddr; 880 dev->if_snd.ifq_maxlen = ipoib_sendq_size * 2; 881 sdl = (struct sockaddr_dl *)dev->if_addr->ifa_addr; 882 sdl->sdl_type = IFT_INFINIBAND; 883 sdl->sdl_alen = dev->if_addrlen; 884 priv->dev = dev; 885 if_link_state_change(dev, LINK_STATE_DOWN); 886 bpfattach(dev, DLT_EN10MB, ETHER_HDR_LEN); 887 888 return dev->if_softc; 889 } 890 891 int 892 ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 893 { 894 struct ib_device_attr *device_attr; 895 int result = -ENOMEM; 896 897 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 898 if (!device_attr) { 899 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n", 900 hca->name, sizeof *device_attr); 901 return result; 902 } 903 904 result = ib_query_device(hca, device_attr); 905 if (result) { 906 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n", 907 hca->name, result); 908 kfree(device_attr); 909 return result; 910 } 911 priv->hca_caps = device_attr->device_cap_flags; 912 913 kfree(device_attr); 914 915 priv->dev->if_hwassist = 0; 916 priv->dev->if_capabilities = 0; 917 918 #ifndef CONFIG_INFINIBAND_IPOIB_CM 919 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 920 set_bit(IPOIB_FLAG_CSUM, &priv->flags); 921 priv->dev->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; 922 priv->dev->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 923 } 924 925 #if 0 926 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) { 927 priv->dev->if_capabilities |= IFCAP_TSO4; 928 priv->dev->if_hwassist |= CSUM_TSO; 929 } 930 #endif 931 #endif 932 priv->dev->if_capabilities |= 933 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_LINKSTATE; 934 priv->dev->if_capenable = priv->dev->if_capabilities; 935 936 return 0; 937 } 938 939 940 static struct ifnet * 941 ipoib_add_port(const char *format, struct ib_device *hca, u8 port) 942 { 943 struct ipoib_dev_priv *priv; 944 struct ib_port_attr attr; 945 int result = -ENOMEM; 946 947 priv = ipoib_intf_alloc(format); 948 if (!priv) 949 goto alloc_mem_failed; 950 951 if (!ib_query_port(hca, port, &attr)) 952 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 953 else { 954 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 955 hca->name, port); 956 goto device_init_failed; 957 } 958 959 /* MTU will be reset when mcast join happens */ 960 priv->dev->if_mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 961 priv->mcast_mtu = priv->admin_mtu = priv->dev->if_mtu; 962 963 result = ib_query_pkey(hca, port, 0, &priv->pkey); 964 if (result) { 965 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 966 hca->name, port, result); 967 goto device_init_failed; 968 } 969 970 if (ipoib_set_dev_features(priv, hca)) 971 goto device_init_failed; 972 973 /* 974 * Set the full membership bit, so that we join the right 975 * broadcast group, etc. 976 */ 977 priv->pkey |= 0x8000; 978 979 priv->broadcastaddr[8] = priv->pkey >> 8; 980 priv->broadcastaddr[9] = priv->pkey & 0xff; 981 982 result = ib_query_gid(hca, port, 0, &priv->local_gid); 983 if (result) { 984 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 985 hca->name, port, result); 986 goto device_init_failed; 987 } 988 memcpy(IF_LLADDR(priv->dev) + 4, priv->local_gid.raw, sizeof (union ib_gid)); 989 990 result = ipoib_dev_init(priv, hca, port); 991 if (result < 0) { 992 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 993 hca->name, port, result); 994 goto device_init_failed; 995 } 996 if (ipoib_cm_admin_enabled(priv)) 997 priv->dev->if_mtu = IPOIB_CM_MTU(ipoib_cm_max_mtu(priv)); 998 999 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1000 priv->ca, ipoib_event); 1001 result = ib_register_event_handler(&priv->event_handler); 1002 if (result < 0) { 1003 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1004 "port %d (ret = %d)\n", 1005 hca->name, port, result); 1006 goto event_failed; 1007 } 1008 if_printf(priv->dev, "Attached to %s port %d\n", hca->name, port); 1009 1010 return priv->dev; 1011 1012 event_failed: 1013 ipoib_dev_cleanup(priv); 1014 1015 device_init_failed: 1016 ipoib_detach(priv); 1017 1018 alloc_mem_failed: 1019 return ERR_PTR(result); 1020 } 1021 1022 static void 1023 ipoib_add_one(struct ib_device *device) 1024 { 1025 struct list_head *dev_list; 1026 struct ifnet *dev; 1027 struct ipoib_dev_priv *priv; 1028 int s, e, p; 1029 1030 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1031 return; 1032 1033 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1034 if (!dev_list) 1035 return; 1036 1037 INIT_LIST_HEAD(dev_list); 1038 1039 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1040 s = 0; 1041 e = 0; 1042 } else { 1043 s = 1; 1044 e = device->phys_port_cnt; 1045 } 1046 1047 for (p = s; p <= e; ++p) { 1048 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND) 1049 continue; 1050 dev = ipoib_add_port("ib", device, p); 1051 if (!IS_ERR(dev)) { 1052 priv = dev->if_softc; 1053 list_add_tail(&priv->list, dev_list); 1054 } 1055 } 1056 1057 ib_set_client_data(device, &ipoib_client, dev_list); 1058 } 1059 1060 static void 1061 ipoib_remove_one(struct ib_device *device) 1062 { 1063 struct ipoib_dev_priv *priv, *tmp; 1064 struct list_head *dev_list; 1065 1066 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1067 return; 1068 1069 dev_list = ib_get_client_data(device, &ipoib_client); 1070 1071 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1072 if (rdma_port_get_link_layer(device, priv->port) != IB_LINK_LAYER_INFINIBAND) 1073 continue; 1074 1075 ipoib_stop(priv); 1076 1077 ib_unregister_event_handler(&priv->event_handler); 1078 1079 /* dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); */ 1080 1081 flush_workqueue(ipoib_workqueue); 1082 1083 ipoib_dev_cleanup(priv); 1084 ipoib_detach(priv); 1085 } 1086 1087 kfree(dev_list); 1088 } 1089 1090 static void 1091 ipoib_config_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) 1092 { 1093 struct ipoib_dev_priv *parent; 1094 struct ipoib_dev_priv *priv; 1095 struct ifnet *dev; 1096 uint16_t pkey; 1097 int error; 1098 1099 if (ifp->if_type != IFT_INFINIBAND) 1100 return; 1101 dev = VLAN_DEVAT(ifp, vtag); 1102 if (dev == NULL) 1103 return; 1104 priv = NULL; 1105 error = 0; 1106 parent = ifp->if_softc; 1107 /* We only support 15 bits of pkey. */ 1108 if (vtag & 0x8000) 1109 return; 1110 pkey = vtag | 0x8000; /* Set full membership bit. */ 1111 if (pkey == parent->pkey) 1112 return; 1113 /* Check for dups */ 1114 mutex_lock(&parent->vlan_mutex); 1115 list_for_each_entry(priv, &parent->child_intfs, list) { 1116 if (priv->pkey == pkey) { 1117 priv = NULL; 1118 error = EBUSY; 1119 goto out; 1120 } 1121 } 1122 priv = ipoib_priv_alloc(); 1123 priv->dev = dev; 1124 priv->max_ib_mtu = parent->max_ib_mtu; 1125 priv->mcast_mtu = priv->admin_mtu = parent->dev->if_mtu; 1126 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 1127 error = ipoib_set_dev_features(priv, parent->ca); 1128 if (error) 1129 goto out; 1130 priv->pkey = pkey; 1131 priv->broadcastaddr[8] = pkey >> 8; 1132 priv->broadcastaddr[9] = pkey & 0xff; 1133 dev->if_broadcastaddr = priv->broadcastaddr; 1134 error = ipoib_dev_init(priv, parent->ca, parent->port); 1135 if (error) 1136 goto out; 1137 priv->parent = parent->dev; 1138 list_add_tail(&priv->list, &parent->child_intfs); 1139 VLAN_SETCOOKIE(dev, priv); 1140 dev->if_start = ipoib_vlan_start; 1141 dev->if_drv_flags &= ~IFF_DRV_RUNNING; 1142 dev->if_hdrlen = IPOIB_HEADER_LEN; 1143 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1144 ipoib_open(priv); 1145 mutex_unlock(&parent->vlan_mutex); 1146 return; 1147 out: 1148 mutex_unlock(&parent->vlan_mutex); 1149 if (priv) 1150 free(priv, M_TEMP); 1151 if (error) 1152 ipoib_warn(parent, 1153 "failed to initialize subinterface: device %s, port %d vtag 0x%X", 1154 parent->ca->name, parent->port, vtag); 1155 return; 1156 } 1157 1158 static void 1159 ipoib_unconfig_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) 1160 { 1161 struct ipoib_dev_priv *parent; 1162 struct ipoib_dev_priv *priv; 1163 struct ifnet *dev; 1164 uint16_t pkey; 1165 1166 if (ifp->if_type != IFT_INFINIBAND) 1167 return; 1168 1169 dev = VLAN_DEVAT(ifp, vtag); 1170 if (dev) 1171 VLAN_SETCOOKIE(dev, NULL); 1172 pkey = vtag | 0x8000; 1173 parent = ifp->if_softc; 1174 mutex_lock(&parent->vlan_mutex); 1175 list_for_each_entry(priv, &parent->child_intfs, list) { 1176 if (priv->pkey == pkey) { 1177 ipoib_dev_cleanup(priv); 1178 list_del(&priv->list); 1179 break; 1180 } 1181 } 1182 mutex_unlock(&parent->vlan_mutex); 1183 } 1184 1185 eventhandler_tag ipoib_vlan_attach; 1186 eventhandler_tag ipoib_vlan_detach; 1187 1188 static int __init 1189 ipoib_init_module(void) 1190 { 1191 int ret; 1192 1193 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1194 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1195 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1196 1197 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1198 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1199 ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE, 1200 IPOIB_MIN_QUEUE_SIZE)); 1201 #ifdef CONFIG_INFINIBAND_IPOIB_CM 1202 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 1203 #endif 1204 1205 ipoib_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 1206 ipoib_config_vlan, NULL, EVENTHANDLER_PRI_FIRST); 1207 ipoib_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 1208 ipoib_unconfig_vlan, NULL, EVENTHANDLER_PRI_FIRST); 1209 1210 /* 1211 * We create our own workqueue mainly because we want to be 1212 * able to flush it when devices are being removed. We can't 1213 * use schedule_work()/flush_scheduled_work() because both 1214 * unregister_netdev() and linkwatch_event take the rtnl lock, 1215 * so flush_scheduled_work() can deadlock during device 1216 * removal. 1217 */ 1218 ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1219 if (!ipoib_workqueue) { 1220 ret = -ENOMEM; 1221 goto err_fs; 1222 } 1223 1224 ib_sa_register_client(&ipoib_sa_client); 1225 1226 ret = ib_register_client(&ipoib_client); 1227 if (ret) 1228 goto err_sa; 1229 1230 return 0; 1231 1232 err_sa: 1233 ib_sa_unregister_client(&ipoib_sa_client); 1234 destroy_workqueue(ipoib_workqueue); 1235 1236 err_fs: 1237 return ret; 1238 } 1239 1240 static void __exit 1241 ipoib_cleanup_module(void) 1242 { 1243 1244 EVENTHANDLER_DEREGISTER(vlan_config, ipoib_vlan_attach); 1245 EVENTHANDLER_DEREGISTER(vlan_unconfig, ipoib_vlan_detach); 1246 ib_unregister_client(&ipoib_client); 1247 ib_sa_unregister_client(&ipoib_sa_client); 1248 destroy_workqueue(ipoib_workqueue); 1249 } 1250 1251 /* 1252 * Infiniband output routine. 1253 */ 1254 static int 1255 ipoib_output(struct ifnet *ifp, struct mbuf *m, 1256 const struct sockaddr *dst, struct route *ro) 1257 { 1258 u_char edst[INFINIBAND_ALEN]; 1259 struct llentry *lle = NULL; 1260 struct rtentry *rt0 = NULL; 1261 struct ipoib_header *eh; 1262 int error = 0, is_gw = 0; 1263 short type; 1264 1265 if (ro != NULL) { 1266 if (!(m->m_flags & (M_BCAST | M_MCAST))) 1267 lle = ro->ro_lle; 1268 rt0 = ro->ro_rt; 1269 if (rt0 != NULL && (rt0->rt_flags & RTF_GATEWAY) != 0) 1270 is_gw = 1; 1271 } 1272 #ifdef MAC 1273 error = mac_ifnet_check_transmit(ifp, m); 1274 if (error) 1275 goto bad; 1276 #endif 1277 1278 M_PROFILE(m); 1279 if (ifp->if_flags & IFF_MONITOR) { 1280 error = ENETDOWN; 1281 goto bad; 1282 } 1283 if (!((ifp->if_flags & IFF_UP) && 1284 (ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1285 error = ENETDOWN; 1286 goto bad; 1287 } 1288 1289 switch (dst->sa_family) { 1290 #ifdef INET 1291 case AF_INET: 1292 if (lle != NULL && (lle->la_flags & LLE_VALID)) 1293 memcpy(edst, &lle->ll_addr.mac8, sizeof(edst)); 1294 else if (m->m_flags & M_MCAST) 1295 ip_ib_mc_map(((struct sockaddr_in *)dst)->sin_addr.s_addr, ifp->if_broadcastaddr, edst); 1296 else 1297 error = arpresolve(ifp, is_gw, m, dst, edst, NULL); 1298 if (error) 1299 return (error == EWOULDBLOCK ? 0 : error); 1300 type = htons(ETHERTYPE_IP); 1301 break; 1302 case AF_ARP: 1303 { 1304 struct arphdr *ah; 1305 ah = mtod(m, struct arphdr *); 1306 ah->ar_hrd = htons(ARPHRD_INFINIBAND); 1307 1308 switch(ntohs(ah->ar_op)) { 1309 case ARPOP_REVREQUEST: 1310 case ARPOP_REVREPLY: 1311 type = htons(ETHERTYPE_REVARP); 1312 break; 1313 case ARPOP_REQUEST: 1314 case ARPOP_REPLY: 1315 default: 1316 type = htons(ETHERTYPE_ARP); 1317 break; 1318 } 1319 1320 if (m->m_flags & M_BCAST) 1321 bcopy(ifp->if_broadcastaddr, edst, INFINIBAND_ALEN); 1322 else 1323 bcopy(ar_tha(ah), edst, INFINIBAND_ALEN); 1324 1325 } 1326 break; 1327 #endif 1328 #ifdef INET6 1329 case AF_INET6: 1330 if (lle != NULL && (lle->la_flags & LLE_VALID)) 1331 memcpy(edst, &lle->ll_addr.mac8, sizeof(edst)); 1332 else if (m->m_flags & M_MCAST) 1333 ipv6_ib_mc_map(&((struct sockaddr_in6 *)dst)->sin6_addr, ifp->if_broadcastaddr, edst); 1334 else 1335 error = nd6_storelladdr(ifp, m, dst, (u_char *)edst, NULL); 1336 if (error) 1337 return error; 1338 type = htons(ETHERTYPE_IPV6); 1339 break; 1340 #endif 1341 1342 default: 1343 if_printf(ifp, "can't handle af%d\n", dst->sa_family); 1344 error = EAFNOSUPPORT; 1345 goto bad; 1346 } 1347 1348 /* 1349 * Add local net header. If no space in first mbuf, 1350 * allocate another. 1351 */ 1352 M_PREPEND(m, IPOIB_HEADER_LEN, M_NOWAIT); 1353 if (m == NULL) { 1354 error = ENOBUFS; 1355 goto bad; 1356 } 1357 eh = mtod(m, struct ipoib_header *); 1358 (void)memcpy(&eh->proto, &type, sizeof(eh->proto)); 1359 (void)memcpy(&eh->hwaddr, edst, sizeof (edst)); 1360 1361 /* 1362 * Queue message on interface, update output statistics if 1363 * successful, and start output if interface not yet active. 1364 */ 1365 return ((ifp->if_transmit)(ifp, m)); 1366 bad: 1367 if (m != NULL) 1368 m_freem(m); 1369 return (error); 1370 } 1371 1372 /* 1373 * Upper layer processing for a received Infiniband packet. 1374 */ 1375 void 1376 ipoib_demux(struct ifnet *ifp, struct mbuf *m, u_short proto) 1377 { 1378 int isr; 1379 1380 #ifdef MAC 1381 /* 1382 * Tag the mbuf with an appropriate MAC label before any other 1383 * consumers can get to it. 1384 */ 1385 mac_ifnet_create_mbuf(ifp, m); 1386 #endif 1387 /* Allow monitor mode to claim this frame, after stats are updated. */ 1388 if (ifp->if_flags & IFF_MONITOR) { 1389 if_printf(ifp, "discard frame at IFF_MONITOR\n"); 1390 m_freem(m); 1391 return; 1392 } 1393 /* 1394 * Dispatch frame to upper layer. 1395 */ 1396 switch (proto) { 1397 #ifdef INET 1398 case ETHERTYPE_IP: 1399 isr = NETISR_IP; 1400 break; 1401 1402 case ETHERTYPE_ARP: 1403 if (ifp->if_flags & IFF_NOARP) { 1404 /* Discard packet if ARP is disabled on interface */ 1405 m_freem(m); 1406 return; 1407 } 1408 isr = NETISR_ARP; 1409 break; 1410 #endif 1411 #ifdef INET6 1412 case ETHERTYPE_IPV6: 1413 isr = NETISR_IPV6; 1414 break; 1415 #endif 1416 default: 1417 goto discard; 1418 } 1419 netisr_dispatch(isr, m); 1420 return; 1421 1422 discard: 1423 m_freem(m); 1424 } 1425 1426 /* 1427 * Process a received Infiniband packet. 1428 */ 1429 static void 1430 ipoib_input(struct ifnet *ifp, struct mbuf *m) 1431 { 1432 struct ipoib_header *eh; 1433 1434 if ((ifp->if_flags & IFF_UP) == 0) { 1435 m_freem(m); 1436 return; 1437 } 1438 CURVNET_SET_QUIET(ifp->if_vnet); 1439 1440 /* Let BPF have it before we strip the header. */ 1441 IPOIB_MTAP(ifp, m); 1442 eh = mtod(m, struct ipoib_header *); 1443 /* 1444 * Reset layer specific mbuf flags to avoid confusing upper layers. 1445 * Strip off Infiniband header. 1446 */ 1447 m->m_flags &= ~M_VLANTAG; 1448 m_clrprotoflags(m); 1449 m_adj(m, IPOIB_HEADER_LEN); 1450 1451 if (IPOIB_IS_MULTICAST(eh->hwaddr)) { 1452 if (memcmp(eh->hwaddr, ifp->if_broadcastaddr, 1453 ifp->if_addrlen) == 0) 1454 m->m_flags |= M_BCAST; 1455 else 1456 m->m_flags |= M_MCAST; 1457 if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1); 1458 } 1459 1460 ipoib_demux(ifp, m, ntohs(eh->proto)); 1461 CURVNET_RESTORE(); 1462 } 1463 1464 static int 1465 ipoib_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa, 1466 struct sockaddr *sa) 1467 { 1468 struct sockaddr_dl *sdl; 1469 #ifdef INET 1470 struct sockaddr_in *sin; 1471 #endif 1472 #ifdef INET6 1473 struct sockaddr_in6 *sin6; 1474 #endif 1475 u_char *e_addr; 1476 1477 switch(sa->sa_family) { 1478 case AF_LINK: 1479 /* 1480 * No mapping needed. Just check that it's a valid MC address. 1481 */ 1482 sdl = (struct sockaddr_dl *)sa; 1483 e_addr = LLADDR(sdl); 1484 if (!IPOIB_IS_MULTICAST(e_addr)) 1485 return EADDRNOTAVAIL; 1486 *llsa = 0; 1487 return 0; 1488 1489 #ifdef INET 1490 case AF_INET: 1491 sin = (struct sockaddr_in *)sa; 1492 if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 1493 return EADDRNOTAVAIL; 1494 sdl = link_init_sdl(ifp, *llsa, IFT_INFINIBAND); 1495 sdl->sdl_alen = INFINIBAND_ALEN; 1496 e_addr = LLADDR(sdl); 1497 ip_ib_mc_map(sin->sin_addr.s_addr, ifp->if_broadcastaddr, 1498 e_addr); 1499 *llsa = (struct sockaddr *)sdl; 1500 return 0; 1501 #endif 1502 #ifdef INET6 1503 case AF_INET6: 1504 sin6 = (struct sockaddr_in6 *)sa; 1505 /* 1506 * An IP6 address of 0 means listen to all 1507 * of the multicast address used for IP6. 1508 * This has no meaning in ipoib. 1509 */ 1510 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 1511 return EADDRNOTAVAIL; 1512 if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) 1513 return EADDRNOTAVAIL; 1514 sdl = link_init_sdl(ifp, *llsa, IFT_INFINIBAND); 1515 sdl->sdl_alen = INFINIBAND_ALEN; 1516 e_addr = LLADDR(sdl); 1517 ipv6_ib_mc_map(&sin6->sin6_addr, ifp->if_broadcastaddr, e_addr); 1518 *llsa = (struct sockaddr *)sdl; 1519 return 0; 1520 #endif 1521 1522 default: 1523 return EAFNOSUPPORT; 1524 } 1525 } 1526 1527 module_init(ipoib_init_module); 1528 module_exit(ipoib_cleanup_module); 1529 1530 #undef MODULE_VERSION 1531 #include <sys/module.h> 1532 static int 1533 ipoib_evhand(module_t mod, int event, void *arg) 1534 { 1535 return (0); 1536 } 1537 1538 static moduledata_t ipoib_mod = { 1539 .name = "ipoib", 1540 .evhand = ipoib_evhand, 1541 }; 1542 1543 DECLARE_MODULE(ipoib, ipoib_mod, SI_SUB_SMP, SI_ORDER_ANY); 1544 MODULE_DEPEND(ipoib, ibcore, 1, 1, 1); 1545 1546