1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 static int ipoib_resolvemulti(struct ifnet *, struct sockaddr **, 38 struct sockaddr *); 39 40 41 #include <linux/module.h> 42 43 #include <linux/init.h> 44 #include <linux/slab.h> 45 #include <linux/kernel.h> 46 #include <linux/vmalloc.h> 47 48 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 49 #include <linux/if_vlan.h> 50 #include <net/ip.h> 51 #include <net/ipv6.h> 52 53 MODULE_AUTHOR("Roland Dreier"); 54 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 57 int ipoib_sendq_size = IPOIB_TX_RING_SIZE; 58 int ipoib_recvq_size = IPOIB_RX_RING_SIZE; 59 60 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 61 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 62 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 63 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 64 65 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 66 int ipoib_debug_level = 1; 67 68 module_param_named(debug_level, ipoib_debug_level, int, 0644); 69 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 70 #endif 71 72 struct ipoib_path_iter { 73 struct ipoib_dev_priv *priv; 74 struct ipoib_path path; 75 }; 76 77 static const u8 ipv4_bcast_addr[] = { 78 0x00, 0xff, 0xff, 0xff, 79 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 80 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 81 }; 82 83 struct workqueue_struct *ipoib_workqueue; 84 85 struct ib_sa_client ipoib_sa_client; 86 87 static void ipoib_add_one(struct ib_device *device); 88 static void ipoib_remove_one(struct ib_device *device); 89 static void ipoib_start(struct ifnet *dev); 90 static int ipoib_output(struct ifnet *ifp, struct mbuf *m, 91 struct sockaddr *dst, struct route *ro); 92 static int ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data); 93 static void ipoib_input(struct ifnet *ifp, struct mbuf *m); 94 95 #define IPOIB_MTAP(_ifp, _m) \ 96 do { \ 97 if (bpf_peers_present((_ifp)->if_bpf)) { \ 98 M_ASSERTVALID(_m); \ 99 ipoib_mtap_mb((_ifp), (_m)); \ 100 } \ 101 } while (0) 102 103 /* 104 * This is for clients that have an ipoib_header in the mbuf. 105 */ 106 static void 107 ipoib_mtap_mb(struct ifnet *ifp, struct mbuf *mb) 108 { 109 struct ipoib_header *ih; 110 struct ether_header eh; 111 112 ih = mtod(mb, struct ipoib_header *); 113 eh.ether_type = ih->proto; 114 bcopy(ih->hwaddr, &eh.ether_dhost, ETHER_ADDR_LEN); 115 bzero(&eh.ether_shost, ETHER_ADDR_LEN); 116 mb->m_data += sizeof(struct ipoib_header); 117 mb->m_len -= sizeof(struct ipoib_header); 118 bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb); 119 mb->m_data -= sizeof(struct ipoib_header); 120 mb->m_len += sizeof(struct ipoib_header); 121 } 122 123 void 124 ipoib_mtap_proto(struct ifnet *ifp, struct mbuf *mb, uint16_t proto) 125 { 126 struct ether_header eh; 127 128 eh.ether_type = proto; 129 bzero(&eh.ether_shost, ETHER_ADDR_LEN); 130 bzero(&eh.ether_dhost, ETHER_ADDR_LEN); 131 bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb); 132 } 133 134 static struct ib_client ipoib_client = { 135 .name = "ipoib", 136 .add = ipoib_add_one, 137 .remove = ipoib_remove_one 138 }; 139 140 int 141 ipoib_open(struct ipoib_dev_priv *priv) 142 { 143 struct ifnet *dev = priv->dev; 144 145 ipoib_dbg(priv, "bringing up interface\n"); 146 147 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 148 149 if (ipoib_pkey_dev_delay_open(priv)) 150 return 0; 151 152 if (ipoib_ib_dev_open(priv)) 153 goto err_disable; 154 155 if (ipoib_ib_dev_up(priv)) 156 goto err_stop; 157 158 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 159 struct ipoib_dev_priv *cpriv; 160 161 /* Bring up any child interfaces too */ 162 mutex_lock(&priv->vlan_mutex); 163 list_for_each_entry(cpriv, &priv->child_intfs, list) 164 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 165 ipoib_open(cpriv); 166 mutex_unlock(&priv->vlan_mutex); 167 } 168 dev->if_drv_flags |= IFF_DRV_RUNNING; 169 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 170 171 return 0; 172 173 err_stop: 174 ipoib_ib_dev_stop(priv, 1); 175 176 err_disable: 177 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 178 179 return -EINVAL; 180 } 181 182 static void 183 ipoib_init(void *arg) 184 { 185 struct ifnet *dev; 186 struct ipoib_dev_priv *priv; 187 188 priv = arg; 189 dev = priv->dev; 190 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 191 ipoib_open(priv); 192 queue_work(ipoib_workqueue, &priv->flush_light); 193 } 194 195 196 static int 197 ipoib_stop(struct ipoib_dev_priv *priv) 198 { 199 struct ifnet *dev = priv->dev; 200 201 ipoib_dbg(priv, "stopping interface\n"); 202 203 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 204 205 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 206 207 ipoib_ib_dev_down(priv, 0); 208 ipoib_ib_dev_stop(priv, 0); 209 210 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 211 struct ipoib_dev_priv *cpriv; 212 213 /* Bring down any child interfaces too */ 214 mutex_lock(&priv->vlan_mutex); 215 list_for_each_entry(cpriv, &priv->child_intfs, list) 216 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) != 0) 217 ipoib_stop(cpriv); 218 mutex_unlock(&priv->vlan_mutex); 219 } 220 221 return 0; 222 } 223 224 int 225 ipoib_change_mtu(struct ipoib_dev_priv *priv, int new_mtu) 226 { 227 struct ifnet *dev = priv->dev; 228 229 /* dev->if_mtu > 2K ==> connected mode */ 230 if (ipoib_cm_admin_enabled(priv)) { 231 if (new_mtu > IPOIB_CM_MTU(ipoib_cm_max_mtu(priv))) 232 return -EINVAL; 233 234 if (new_mtu > priv->mcast_mtu) 235 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 236 priv->mcast_mtu); 237 238 dev->if_mtu = new_mtu; 239 return 0; 240 } 241 242 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 243 return -EINVAL; 244 245 priv->admin_mtu = new_mtu; 246 247 dev->if_mtu = min(priv->mcast_mtu, priv->admin_mtu); 248 249 queue_work(ipoib_workqueue, &priv->flush_light); 250 251 return 0; 252 } 253 254 static int 255 ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 256 { 257 struct ipoib_dev_priv *priv = ifp->if_softc; 258 struct ifaddr *ifa = (struct ifaddr *) data; 259 struct ifreq *ifr = (struct ifreq *) data; 260 int error = 0; 261 262 switch (command) { 263 case SIOCSIFFLAGS: 264 if (ifp->if_flags & IFF_UP) { 265 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 266 error = -ipoib_open(priv); 267 } else 268 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 269 ipoib_stop(priv); 270 break; 271 case SIOCADDMULTI: 272 case SIOCDELMULTI: 273 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 274 queue_work(ipoib_workqueue, &priv->restart_task); 275 break; 276 case SIOCSIFADDR: 277 ifp->if_flags |= IFF_UP; 278 279 switch (ifa->ifa_addr->sa_family) { 280 #ifdef INET 281 case AF_INET: 282 ifp->if_init(ifp->if_softc); /* before arpwhohas */ 283 arp_ifinit(ifp, ifa); 284 break; 285 #endif 286 default: 287 ifp->if_init(ifp->if_softc); 288 break; 289 } 290 break; 291 292 case SIOCGIFADDR: 293 { 294 struct sockaddr *sa; 295 296 sa = (struct sockaddr *) & ifr->ifr_data; 297 bcopy(IF_LLADDR(ifp), 298 (caddr_t) sa->sa_data, INFINIBAND_ALEN); 299 } 300 break; 301 302 case SIOCSIFMTU: 303 /* 304 * Set the interface MTU. 305 */ 306 error = -ipoib_change_mtu(priv, ifr->ifr_mtu); 307 break; 308 default: 309 error = EINVAL; 310 break; 311 } 312 return (error); 313 } 314 315 316 static struct ipoib_path * 317 __path_find(struct ipoib_dev_priv *priv, void *gid) 318 { 319 struct rb_node *n = priv->path_tree.rb_node; 320 struct ipoib_path *path; 321 int ret; 322 323 while (n) { 324 path = rb_entry(n, struct ipoib_path, rb_node); 325 326 ret = memcmp(gid, path->pathrec.dgid.raw, 327 sizeof (union ib_gid)); 328 329 if (ret < 0) 330 n = n->rb_left; 331 else if (ret > 0) 332 n = n->rb_right; 333 else 334 return path; 335 } 336 337 return NULL; 338 } 339 340 static int 341 __path_add(struct ipoib_dev_priv *priv, struct ipoib_path *path) 342 { 343 struct rb_node **n = &priv->path_tree.rb_node; 344 struct rb_node *pn = NULL; 345 struct ipoib_path *tpath; 346 int ret; 347 348 while (*n) { 349 pn = *n; 350 tpath = rb_entry(pn, struct ipoib_path, rb_node); 351 352 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 353 sizeof (union ib_gid)); 354 if (ret < 0) 355 n = &pn->rb_left; 356 else if (ret > 0) 357 n = &pn->rb_right; 358 else 359 return -EEXIST; 360 } 361 362 rb_link_node(&path->rb_node, pn, n); 363 rb_insert_color(&path->rb_node, &priv->path_tree); 364 365 list_add_tail(&path->list, &priv->path_list); 366 367 return 0; 368 } 369 370 void 371 ipoib_path_free(struct ipoib_dev_priv *priv, struct ipoib_path *path) 372 { 373 374 _IF_DRAIN(&path->queue); 375 376 if (path->ah) 377 ipoib_put_ah(path->ah); 378 if (ipoib_cm_get(path)) 379 ipoib_cm_destroy_tx(ipoib_cm_get(path)); 380 381 kfree(path); 382 } 383 384 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 385 386 struct ipoib_path_iter * 387 ipoib_path_iter_init(struct ipoib_dev_priv *priv) 388 { 389 struct ipoib_path_iter *iter; 390 391 iter = kmalloc(sizeof *iter, GFP_KERNEL); 392 if (!iter) 393 return NULL; 394 395 iter->priv = priv; 396 memset(iter->path.pathrec.dgid.raw, 0, 16); 397 398 if (ipoib_path_iter_next(iter)) { 399 kfree(iter); 400 return NULL; 401 } 402 403 return iter; 404 } 405 406 int 407 ipoib_path_iter_next(struct ipoib_path_iter *iter) 408 { 409 struct ipoib_dev_priv *priv = iter->priv; 410 struct rb_node *n; 411 struct ipoib_path *path; 412 int ret = 1; 413 414 spin_lock_irq(&priv->lock); 415 416 n = rb_first(&priv->path_tree); 417 418 while (n) { 419 path = rb_entry(n, struct ipoib_path, rb_node); 420 421 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 422 sizeof (union ib_gid)) < 0) { 423 iter->path = *path; 424 ret = 0; 425 break; 426 } 427 428 n = rb_next(n); 429 } 430 431 spin_unlock_irq(&priv->lock); 432 433 return ret; 434 } 435 436 void 437 ipoib_path_iter_read(struct ipoib_path_iter *iter, struct ipoib_path *path) 438 { 439 *path = iter->path; 440 } 441 442 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 443 444 void 445 ipoib_mark_paths_invalid(struct ipoib_dev_priv *priv) 446 { 447 struct ipoib_path *path, *tp; 448 449 spin_lock_irq(&priv->lock); 450 451 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 452 ipoib_dbg(priv, "mark path LID 0x%04x GID %16D invalid\n", 453 be16_to_cpu(path->pathrec.dlid), 454 path->pathrec.dgid.raw, ":"); 455 path->valid = 0; 456 } 457 458 spin_unlock_irq(&priv->lock); 459 } 460 461 void 462 ipoib_flush_paths(struct ipoib_dev_priv *priv) 463 { 464 struct ipoib_path *path, *tp; 465 LIST_HEAD(remove_list); 466 unsigned long flags; 467 468 spin_lock_irqsave(&priv->lock, flags); 469 470 list_splice_init(&priv->path_list, &remove_list); 471 472 list_for_each_entry(path, &remove_list, list) 473 rb_erase(&path->rb_node, &priv->path_tree); 474 475 list_for_each_entry_safe(path, tp, &remove_list, list) { 476 if (path->query) 477 ib_sa_cancel_query(path->query_id, path->query); 478 spin_unlock_irqrestore(&priv->lock, flags); 479 wait_for_completion(&path->done); 480 ipoib_path_free(priv, path); 481 spin_lock_irqsave(&priv->lock, flags); 482 } 483 484 spin_unlock_irqrestore(&priv->lock, flags); 485 } 486 487 static void 488 path_rec_completion(int status, struct ib_sa_path_rec *pathrec, void *path_ptr) 489 { 490 struct ipoib_path *path = path_ptr; 491 struct ipoib_dev_priv *priv = path->priv; 492 struct ifnet *dev = priv->dev; 493 struct ipoib_ah *ah = NULL; 494 struct ipoib_ah *old_ah = NULL; 495 struct ifqueue mbqueue; 496 struct mbuf *mb; 497 unsigned long flags; 498 499 if (!status) 500 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %16D\n", 501 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw, ":"); 502 else 503 ipoib_dbg(priv, "PathRec status %d for GID %16D\n", 504 status, path->pathrec.dgid.raw, ":"); 505 506 bzero(&mbqueue, sizeof(mbqueue)); 507 508 if (!status) { 509 struct ib_ah_attr av; 510 511 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) 512 ah = ipoib_create_ah(priv, priv->pd, &av); 513 } 514 515 spin_lock_irqsave(&priv->lock, flags); 516 517 if (ah) { 518 path->pathrec = *pathrec; 519 520 old_ah = path->ah; 521 path->ah = ah; 522 523 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 524 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 525 526 for (;;) { 527 _IF_DEQUEUE(&path->queue, mb); 528 if (mb == NULL) 529 break; 530 _IF_ENQUEUE(&mbqueue, mb); 531 } 532 533 #ifdef CONFIG_INFINIBAND_IPOIB_CM 534 if (ipoib_cm_enabled(priv, path->hwaddr) && !ipoib_cm_get(path)) 535 ipoib_cm_set(path, ipoib_cm_create_tx(priv, path)); 536 #endif 537 538 path->valid = 1; 539 } 540 541 path->query = NULL; 542 complete(&path->done); 543 544 spin_unlock_irqrestore(&priv->lock, flags); 545 546 if (old_ah) 547 ipoib_put_ah(old_ah); 548 549 for (;;) { 550 _IF_DEQUEUE(&mbqueue, mb); 551 if (mb == NULL) 552 break; 553 mb->m_pkthdr.rcvif = dev; 554 if (dev->if_transmit(dev, mb)) 555 ipoib_warn(priv, "dev_queue_xmit failed " 556 "to requeue packet\n"); 557 } 558 } 559 560 static struct ipoib_path * 561 path_rec_create(struct ipoib_dev_priv *priv, uint8_t *hwaddr) 562 { 563 struct ipoib_path *path; 564 565 if (!priv->broadcast) 566 return NULL; 567 568 path = kzalloc(sizeof *path, GFP_ATOMIC); 569 if (!path) 570 return NULL; 571 572 path->priv = priv; 573 574 bzero(&path->queue, sizeof(path->queue)); 575 576 #ifdef CONFIG_INFINIBAND_IPOIB_CM 577 memcpy(&path->hwaddr, hwaddr, INFINIBAND_ALEN); 578 #endif 579 memcpy(path->pathrec.dgid.raw, &hwaddr[4], sizeof (union ib_gid)); 580 path->pathrec.sgid = priv->local_gid; 581 path->pathrec.pkey = cpu_to_be16(priv->pkey); 582 path->pathrec.numb_path = 1; 583 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 584 585 return path; 586 } 587 588 static int 589 path_rec_start(struct ipoib_dev_priv *priv, struct ipoib_path *path) 590 { 591 struct ifnet *dev = priv->dev; 592 593 ib_sa_comp_mask comp_mask = IB_SA_PATH_REC_MTU_SELECTOR | IB_SA_PATH_REC_MTU; 594 struct ib_sa_path_rec p_rec; 595 596 p_rec = path->pathrec; 597 p_rec.mtu_selector = IB_SA_GT; 598 599 switch (roundup_pow_of_two(dev->if_mtu + IPOIB_ENCAP_LEN)) { 600 case 512: 601 p_rec.mtu = IB_MTU_256; 602 break; 603 case 1024: 604 p_rec.mtu = IB_MTU_512; 605 break; 606 case 2048: 607 p_rec.mtu = IB_MTU_1024; 608 break; 609 case 4096: 610 p_rec.mtu = IB_MTU_2048; 611 break; 612 default: 613 /* Wildcard everything */ 614 comp_mask = 0; 615 p_rec.mtu = 0; 616 p_rec.mtu_selector = 0; 617 } 618 619 ipoib_dbg(priv, "Start path record lookup for %16D MTU > %d\n", 620 p_rec.dgid.raw, ":", 621 comp_mask ? ib_mtu_enum_to_int(p_rec.mtu) : 0); 622 623 init_completion(&path->done); 624 625 path->query_id = 626 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 627 &p_rec, comp_mask | 628 IB_SA_PATH_REC_DGID | 629 IB_SA_PATH_REC_SGID | 630 IB_SA_PATH_REC_NUMB_PATH | 631 IB_SA_PATH_REC_TRAFFIC_CLASS | 632 IB_SA_PATH_REC_PKEY, 633 1000, GFP_ATOMIC, 634 path_rec_completion, 635 path, &path->query); 636 if (path->query_id < 0) { 637 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 638 path->query = NULL; 639 complete(&path->done); 640 return path->query_id; 641 } 642 643 return 0; 644 } 645 646 static void 647 ipoib_unicast_send(struct mbuf *mb, struct ipoib_dev_priv *priv, struct ipoib_header *eh) 648 { 649 struct ipoib_path *path; 650 651 path = __path_find(priv, eh->hwaddr + 4); 652 if (!path || !path->valid) { 653 int new_path = 0; 654 655 if (!path) { 656 path = path_rec_create(priv, eh->hwaddr); 657 new_path = 1; 658 } 659 if (path) { 660 _IF_ENQUEUE(&path->queue, mb); 661 if (!path->query && path_rec_start(priv, path)) { 662 spin_unlock_irqrestore(&priv->lock, flags); 663 if (new_path) 664 ipoib_path_free(priv, path); 665 return; 666 } else 667 __path_add(priv, path); 668 } else { 669 ++priv->dev->if_oerrors; 670 m_freem(mb); 671 } 672 673 return; 674 } 675 676 if (ipoib_cm_get(path) && ipoib_cm_up(path)) { 677 ipoib_cm_send(priv, mb, ipoib_cm_get(path)); 678 } else if (path->ah) { 679 ipoib_send(priv, mb, path->ah, IPOIB_QPN(eh->hwaddr)); 680 } else if ((path->query || !path_rec_start(priv, path)) && 681 path->queue.ifq_len < IPOIB_MAX_PATH_REC_QUEUE) { 682 _IF_ENQUEUE(&path->queue, mb); 683 } else { 684 ++priv->dev->if_oerrors; 685 m_freem(mb); 686 } 687 } 688 689 static int 690 ipoib_send_one(struct ipoib_dev_priv *priv, struct mbuf *mb) 691 { 692 struct ipoib_header *eh; 693 694 eh = mtod(mb, struct ipoib_header *); 695 if (IPOIB_IS_MULTICAST(eh->hwaddr)) { 696 /* Add in the P_Key for multicast*/ 697 eh->hwaddr[8] = (priv->pkey >> 8) & 0xff; 698 eh->hwaddr[9] = priv->pkey & 0xff; 699 700 ipoib_mcast_send(priv, eh->hwaddr + 4, mb); 701 } else 702 ipoib_unicast_send(mb, priv, eh); 703 704 return 0; 705 } 706 707 708 static void 709 _ipoib_start(struct ifnet *dev, struct ipoib_dev_priv *priv) 710 { 711 struct mbuf *mb; 712 713 if ((dev->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 714 IFF_DRV_RUNNING) 715 return; 716 717 spin_lock(&priv->lock); 718 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd) && 719 (dev->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 720 IFQ_DRV_DEQUEUE(&dev->if_snd, mb); 721 if (mb == NULL) 722 break; 723 IPOIB_MTAP(dev, mb); 724 ipoib_send_one(priv, mb); 725 } 726 spin_unlock(&priv->lock); 727 } 728 729 static void 730 ipoib_start(struct ifnet *dev) 731 { 732 _ipoib_start(dev, dev->if_softc); 733 } 734 735 static void 736 ipoib_vlan_start(struct ifnet *dev) 737 { 738 struct ipoib_dev_priv *priv; 739 struct mbuf *mb; 740 741 priv = VLAN_COOKIE(dev); 742 if (priv != NULL) 743 return _ipoib_start(dev, priv); 744 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd)) { 745 IFQ_DRV_DEQUEUE(&dev->if_snd, mb); 746 if (mb == NULL) 747 break; 748 m_freem(mb); 749 dev->if_oerrors++; 750 } 751 } 752 753 int 754 ipoib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port) 755 { 756 757 /* Allocate RX/TX "rings" to hold queued mbs */ 758 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 759 GFP_KERNEL); 760 if (!priv->rx_ring) { 761 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 762 ca->name, ipoib_recvq_size); 763 goto out; 764 } 765 766 priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, GFP_KERNEL); 767 if (!priv->tx_ring) { 768 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 769 ca->name, ipoib_sendq_size); 770 goto out_rx_ring_cleanup; 771 } 772 memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); 773 774 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 775 776 if (ipoib_ib_dev_init(priv, ca, port)) 777 goto out_tx_ring_cleanup; 778 779 return 0; 780 781 out_tx_ring_cleanup: 782 kfree(priv->tx_ring); 783 784 out_rx_ring_cleanup: 785 kfree(priv->rx_ring); 786 787 out: 788 return -ENOMEM; 789 } 790 791 static void 792 ipoib_detach(struct ipoib_dev_priv *priv) 793 { 794 struct ifnet *dev; 795 796 dev = priv->dev; 797 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 798 bpfdetach(dev); 799 if_detach(dev); 800 if_free(dev); 801 } else 802 VLAN_SETCOOKIE(priv->dev, NULL); 803 804 free(priv, M_TEMP); 805 } 806 807 void 808 ipoib_dev_cleanup(struct ipoib_dev_priv *priv) 809 { 810 struct ipoib_dev_priv *cpriv, *tcpriv; 811 812 /* Delete any child interfaces first */ 813 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 814 ipoib_dev_cleanup(cpriv); 815 ipoib_detach(cpriv); 816 } 817 818 ipoib_ib_dev_cleanup(priv); 819 820 kfree(priv->rx_ring); 821 kfree(priv->tx_ring); 822 823 priv->rx_ring = NULL; 824 priv->tx_ring = NULL; 825 } 826 827 static volatile int ipoib_unit; 828 829 static struct ipoib_dev_priv * 830 ipoib_priv_alloc(void) 831 { 832 struct ipoib_dev_priv *priv; 833 834 priv = malloc(sizeof(struct ipoib_dev_priv), M_TEMP, M_ZERO|M_WAITOK); 835 spin_lock_init(&priv->lock); 836 mutex_init(&priv->vlan_mutex); 837 INIT_LIST_HEAD(&priv->path_list); 838 INIT_LIST_HEAD(&priv->child_intfs); 839 INIT_LIST_HEAD(&priv->dead_ahs); 840 INIT_LIST_HEAD(&priv->multicast_list); 841 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); 842 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 843 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 844 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 845 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 846 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 847 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 848 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 849 memcpy(priv->broadcastaddr, ipv4_bcast_addr, INFINIBAND_ALEN); 850 851 return (priv); 852 } 853 854 struct ipoib_dev_priv * 855 ipoib_intf_alloc(const char *name) 856 { 857 struct ipoib_dev_priv *priv; 858 struct sockaddr_dl *sdl; 859 struct ifnet *dev; 860 861 priv = ipoib_priv_alloc(); 862 dev = priv->dev = if_alloc(IFT_INFINIBAND); 863 if (!dev) { 864 free(priv, M_TEMP); 865 return NULL; 866 } 867 dev->if_softc = priv; 868 if_initname(dev, name, atomic_fetchadd_int(&ipoib_unit, 1)); 869 dev->if_flags = IFF_BROADCAST | IFF_MULTICAST; 870 dev->if_addrlen = INFINIBAND_ALEN; 871 dev->if_hdrlen = IPOIB_HEADER_LEN; 872 if_attach(dev); 873 dev->if_init = ipoib_init; 874 dev->if_ioctl = ipoib_ioctl; 875 dev->if_start = ipoib_start; 876 dev->if_output = ipoib_output; 877 dev->if_input = ipoib_input; 878 dev->if_resolvemulti = ipoib_resolvemulti; 879 dev->if_baudrate = IF_Gbps(10LL); 880 dev->if_broadcastaddr = priv->broadcastaddr; 881 dev->if_snd.ifq_maxlen = ipoib_sendq_size * 2; 882 sdl = (struct sockaddr_dl *)dev->if_addr->ifa_addr; 883 sdl->sdl_type = IFT_INFINIBAND; 884 sdl->sdl_alen = dev->if_addrlen; 885 priv->dev = dev; 886 if_link_state_change(dev, LINK_STATE_DOWN); 887 bpfattach(dev, DLT_EN10MB, ETHER_HDR_LEN); 888 889 return dev->if_softc; 890 } 891 892 int 893 ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 894 { 895 struct ib_device_attr *device_attr; 896 int result = -ENOMEM; 897 898 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 899 if (!device_attr) { 900 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n", 901 hca->name, sizeof *device_attr); 902 return result; 903 } 904 905 result = ib_query_device(hca, device_attr); 906 if (result) { 907 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n", 908 hca->name, result); 909 kfree(device_attr); 910 return result; 911 } 912 priv->hca_caps = device_attr->device_cap_flags; 913 914 kfree(device_attr); 915 916 priv->dev->if_hwassist = 0; 917 priv->dev->if_capabilities = 0; 918 919 #ifndef CONFIG_INFINIBAND_IPOIB_CM 920 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 921 set_bit(IPOIB_FLAG_CSUM, &priv->flags); 922 priv->dev->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; 923 priv->dev->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 924 } 925 926 #if 0 927 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) { 928 priv->dev->if_capabilities |= IFCAP_TSO4; 929 priv->dev->if_hwassist |= CSUM_TSO; 930 } 931 #endif 932 #endif 933 priv->dev->if_capabilities |= 934 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_LINKSTATE; 935 priv->dev->if_capenable = priv->dev->if_capabilities; 936 937 return 0; 938 } 939 940 941 static struct ifnet * 942 ipoib_add_port(const char *format, struct ib_device *hca, u8 port) 943 { 944 struct ipoib_dev_priv *priv; 945 struct ib_port_attr attr; 946 int result = -ENOMEM; 947 948 priv = ipoib_intf_alloc(format); 949 if (!priv) 950 goto alloc_mem_failed; 951 952 if (!ib_query_port(hca, port, &attr)) 953 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 954 else { 955 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 956 hca->name, port); 957 goto device_init_failed; 958 } 959 960 /* MTU will be reset when mcast join happens */ 961 priv->dev->if_mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 962 priv->mcast_mtu = priv->admin_mtu = priv->dev->if_mtu; 963 964 result = ib_query_pkey(hca, port, 0, &priv->pkey); 965 if (result) { 966 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 967 hca->name, port, result); 968 goto device_init_failed; 969 } 970 971 if (ipoib_set_dev_features(priv, hca)) 972 goto device_init_failed; 973 974 /* 975 * Set the full membership bit, so that we join the right 976 * broadcast group, etc. 977 */ 978 priv->pkey |= 0x8000; 979 980 priv->broadcastaddr[8] = priv->pkey >> 8; 981 priv->broadcastaddr[9] = priv->pkey & 0xff; 982 983 result = ib_query_gid(hca, port, 0, &priv->local_gid); 984 if (result) { 985 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 986 hca->name, port, result); 987 goto device_init_failed; 988 } 989 memcpy(IF_LLADDR(priv->dev) + 4, priv->local_gid.raw, sizeof (union ib_gid)); 990 991 result = ipoib_dev_init(priv, hca, port); 992 if (result < 0) { 993 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 994 hca->name, port, result); 995 goto device_init_failed; 996 } 997 if (ipoib_cm_admin_enabled(priv)) 998 priv->dev->if_mtu = IPOIB_CM_MTU(ipoib_cm_max_mtu(priv)); 999 1000 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1001 priv->ca, ipoib_event); 1002 result = ib_register_event_handler(&priv->event_handler); 1003 if (result < 0) { 1004 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1005 "port %d (ret = %d)\n", 1006 hca->name, port, result); 1007 goto event_failed; 1008 } 1009 if_printf(priv->dev, "Attached to %s port %d\n", hca->name, port); 1010 1011 return priv->dev; 1012 1013 event_failed: 1014 ipoib_dev_cleanup(priv); 1015 1016 device_init_failed: 1017 ipoib_detach(priv); 1018 1019 alloc_mem_failed: 1020 return ERR_PTR(result); 1021 } 1022 1023 static void 1024 ipoib_add_one(struct ib_device *device) 1025 { 1026 struct list_head *dev_list; 1027 struct ifnet *dev; 1028 struct ipoib_dev_priv *priv; 1029 int s, e, p; 1030 1031 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1032 return; 1033 1034 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1035 if (!dev_list) 1036 return; 1037 1038 INIT_LIST_HEAD(dev_list); 1039 1040 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1041 s = 0; 1042 e = 0; 1043 } else { 1044 s = 1; 1045 e = device->phys_port_cnt; 1046 } 1047 1048 for (p = s; p <= e; ++p) { 1049 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND) 1050 continue; 1051 dev = ipoib_add_port("ib", device, p); 1052 if (!IS_ERR(dev)) { 1053 priv = dev->if_softc; 1054 list_add_tail(&priv->list, dev_list); 1055 } 1056 } 1057 1058 ib_set_client_data(device, &ipoib_client, dev_list); 1059 } 1060 1061 static void 1062 ipoib_remove_one(struct ib_device *device) 1063 { 1064 struct ipoib_dev_priv *priv, *tmp; 1065 struct list_head *dev_list; 1066 1067 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1068 return; 1069 1070 dev_list = ib_get_client_data(device, &ipoib_client); 1071 1072 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1073 if (rdma_port_get_link_layer(device, priv->port) != IB_LINK_LAYER_INFINIBAND) 1074 continue; 1075 1076 ib_unregister_event_handler(&priv->event_handler); 1077 1078 /* dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); */ 1079 1080 flush_workqueue(ipoib_workqueue); 1081 1082 ipoib_dev_cleanup(priv); 1083 ipoib_detach(priv); 1084 } 1085 1086 kfree(dev_list); 1087 } 1088 1089 static void 1090 ipoib_config_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) 1091 { 1092 struct ipoib_dev_priv *parent; 1093 struct ipoib_dev_priv *priv; 1094 struct ifnet *dev; 1095 uint16_t pkey; 1096 int error; 1097 1098 if (ifp->if_type != IFT_INFINIBAND) 1099 return; 1100 dev = VLAN_DEVAT(ifp, vtag); 1101 if (dev == NULL) 1102 return; 1103 priv = NULL; 1104 error = 0; 1105 parent = ifp->if_softc; 1106 /* We only support 15 bits of pkey. */ 1107 if (vtag & 0x8000) 1108 return; 1109 pkey = vtag | 0x8000; /* Set full membership bit. */ 1110 if (pkey == parent->pkey) 1111 return; 1112 /* Check for dups */ 1113 mutex_lock(&parent->vlan_mutex); 1114 list_for_each_entry(priv, &parent->child_intfs, list) { 1115 if (priv->pkey == pkey) { 1116 priv = NULL; 1117 error = EBUSY; 1118 goto out; 1119 } 1120 } 1121 priv = ipoib_priv_alloc(); 1122 priv->dev = dev; 1123 priv->max_ib_mtu = parent->max_ib_mtu; 1124 priv->mcast_mtu = priv->admin_mtu = parent->dev->if_mtu; 1125 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 1126 error = ipoib_set_dev_features(priv, parent->ca); 1127 if (error) 1128 goto out; 1129 priv->pkey = pkey; 1130 priv->broadcastaddr[8] = pkey >> 8; 1131 priv->broadcastaddr[9] = pkey & 0xff; 1132 dev->if_broadcastaddr = priv->broadcastaddr; 1133 error = ipoib_dev_init(priv, parent->ca, parent->port); 1134 if (error) 1135 goto out; 1136 priv->parent = parent->dev; 1137 list_add_tail(&priv->list, &parent->child_intfs); 1138 VLAN_SETCOOKIE(dev, priv); 1139 dev->if_start = ipoib_vlan_start; 1140 dev->if_drv_flags &= ~IFF_DRV_RUNNING; 1141 dev->if_hdrlen = IPOIB_HEADER_LEN; 1142 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1143 ipoib_open(priv); 1144 mutex_unlock(&parent->vlan_mutex); 1145 return; 1146 out: 1147 mutex_unlock(&parent->vlan_mutex); 1148 if (priv) 1149 free(priv, M_TEMP); 1150 if (error) 1151 ipoib_warn(parent, 1152 "failed to initialize subinterface: device %s, port %d vtag 0x%X", 1153 parent->ca->name, parent->port, vtag); 1154 return; 1155 } 1156 1157 static void 1158 ipoib_unconfig_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) 1159 { 1160 struct ipoib_dev_priv *parent; 1161 struct ipoib_dev_priv *priv; 1162 struct ifnet *dev; 1163 uint16_t pkey; 1164 1165 if (ifp->if_type != IFT_INFINIBAND) 1166 return; 1167 1168 dev = VLAN_DEVAT(ifp, vtag); 1169 if (dev) 1170 VLAN_SETCOOKIE(dev, NULL); 1171 pkey = vtag | 0x8000; 1172 parent = ifp->if_softc; 1173 mutex_lock(&parent->vlan_mutex); 1174 list_for_each_entry(priv, &parent->child_intfs, list) { 1175 if (priv->pkey == pkey) { 1176 ipoib_dev_cleanup(priv); 1177 list_del(&priv->list); 1178 break; 1179 } 1180 } 1181 mutex_unlock(&parent->vlan_mutex); 1182 } 1183 1184 eventhandler_tag ipoib_vlan_attach; 1185 eventhandler_tag ipoib_vlan_detach; 1186 1187 static int __init 1188 ipoib_init_module(void) 1189 { 1190 int ret; 1191 1192 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1193 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1194 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1195 1196 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1197 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1198 ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE, 1199 IPOIB_MIN_QUEUE_SIZE)); 1200 #ifdef CONFIG_INFINIBAND_IPOIB_CM 1201 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 1202 #endif 1203 1204 ipoib_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 1205 ipoib_config_vlan, NULL, EVENTHANDLER_PRI_FIRST); 1206 ipoib_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 1207 ipoib_unconfig_vlan, NULL, EVENTHANDLER_PRI_FIRST); 1208 1209 /* 1210 * We create our own workqueue mainly because we want to be 1211 * able to flush it when devices are being removed. We can't 1212 * use schedule_work()/flush_scheduled_work() because both 1213 * unregister_netdev() and linkwatch_event take the rtnl lock, 1214 * so flush_scheduled_work() can deadlock during device 1215 * removal. 1216 */ 1217 ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1218 if (!ipoib_workqueue) { 1219 ret = -ENOMEM; 1220 goto err_fs; 1221 } 1222 1223 ib_sa_register_client(&ipoib_sa_client); 1224 1225 ret = ib_register_client(&ipoib_client); 1226 if (ret) 1227 goto err_sa; 1228 1229 return 0; 1230 1231 err_sa: 1232 ib_sa_unregister_client(&ipoib_sa_client); 1233 destroy_workqueue(ipoib_workqueue); 1234 1235 err_fs: 1236 return ret; 1237 } 1238 1239 static void __exit 1240 ipoib_cleanup_module(void) 1241 { 1242 1243 EVENTHANDLER_DEREGISTER(vlan_config, ipoib_vlan_attach); 1244 EVENTHANDLER_DEREGISTER(vlan_unconfig, ipoib_vlan_detach); 1245 ib_unregister_client(&ipoib_client); 1246 ib_sa_unregister_client(&ipoib_sa_client); 1247 destroy_workqueue(ipoib_workqueue); 1248 } 1249 1250 /* 1251 * Infiniband output routine. 1252 */ 1253 static int 1254 ipoib_output(struct ifnet *ifp, struct mbuf *m, 1255 struct sockaddr *dst, struct route *ro) 1256 { 1257 u_char edst[INFINIBAND_ALEN]; 1258 struct llentry *lle = NULL; 1259 struct rtentry *rt0 = NULL; 1260 struct ipoib_header *eh; 1261 int error = 0; 1262 short type; 1263 1264 if (ro != NULL) { 1265 if (!(m->m_flags & (M_BCAST | M_MCAST))) 1266 lle = ro->ro_lle; 1267 rt0 = ro->ro_rt; 1268 } 1269 #ifdef MAC 1270 error = mac_ifnet_check_transmit(ifp, m); 1271 if (error) 1272 goto bad; 1273 #endif 1274 1275 M_PROFILE(m); 1276 if (ifp->if_flags & IFF_MONITOR) { 1277 error = ENETDOWN; 1278 goto bad; 1279 } 1280 if (!((ifp->if_flags & IFF_UP) && 1281 (ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1282 error = ENETDOWN; 1283 goto bad; 1284 } 1285 1286 switch (dst->sa_family) { 1287 #ifdef INET 1288 case AF_INET: 1289 if (lle != NULL && (lle->la_flags & LLE_VALID)) 1290 memcpy(edst, &lle->ll_addr.mac8, sizeof(edst)); 1291 else if (m->m_flags & M_MCAST) 1292 ip_ib_mc_map(((struct sockaddr_in *)dst)->sin_addr.s_addr, ifp->if_broadcastaddr, edst); 1293 else 1294 error = arpresolve(ifp, rt0, m, dst, edst, &lle); 1295 if (error) 1296 return (error == EWOULDBLOCK ? 0 : error); 1297 type = htons(ETHERTYPE_IP); 1298 break; 1299 case AF_ARP: 1300 { 1301 struct arphdr *ah; 1302 ah = mtod(m, struct arphdr *); 1303 ah->ar_hrd = htons(ARPHRD_INFINIBAND); 1304 1305 switch(ntohs(ah->ar_op)) { 1306 case ARPOP_REVREQUEST: 1307 case ARPOP_REVREPLY: 1308 type = htons(ETHERTYPE_REVARP); 1309 break; 1310 case ARPOP_REQUEST: 1311 case ARPOP_REPLY: 1312 default: 1313 type = htons(ETHERTYPE_ARP); 1314 break; 1315 } 1316 1317 if (m->m_flags & M_BCAST) 1318 bcopy(ifp->if_broadcastaddr, edst, INFINIBAND_ALEN); 1319 else 1320 bcopy(ar_tha(ah), edst, INFINIBAND_ALEN); 1321 1322 } 1323 break; 1324 #endif 1325 #ifdef INET6 1326 case AF_INET6: 1327 if (lle != NULL && (lle->la_flags & LLE_VALID)) 1328 memcpy(edst, &lle->ll_addr.mac8, sizeof(edst)); 1329 else if (m->m_flags & M_MCAST) 1330 ipv6_ib_mc_map(&((struct sockaddr_in6 *)dst)->sin6_addr, ifp->if_broadcastaddr, edst); 1331 else 1332 error = nd6_storelladdr(ifp, m, dst, (u_char *)edst, &lle); 1333 if (error) 1334 return error; 1335 type = htons(ETHERTYPE_IPV6); 1336 break; 1337 #endif 1338 1339 default: 1340 if_printf(ifp, "can't handle af%d\n", dst->sa_family); 1341 error = EAFNOSUPPORT; 1342 goto bad; 1343 } 1344 1345 /* 1346 * Add local net header. If no space in first mbuf, 1347 * allocate another. 1348 */ 1349 M_PREPEND(m, IPOIB_HEADER_LEN, M_DONTWAIT); 1350 if (m == NULL) { 1351 error = ENOBUFS; 1352 goto bad; 1353 } 1354 eh = mtod(m, struct ipoib_header *); 1355 (void)memcpy(&eh->proto, &type, sizeof(eh->proto)); 1356 (void)memcpy(&eh->hwaddr, edst, sizeof (edst)); 1357 1358 /* 1359 * Queue message on interface, update output statistics if 1360 * successful, and start output if interface not yet active. 1361 */ 1362 return ((ifp->if_transmit)(ifp, m)); 1363 bad: 1364 if (m != NULL) 1365 m_freem(m); 1366 return (error); 1367 } 1368 1369 /* 1370 * Upper layer processing for a received Infiniband packet. 1371 */ 1372 void 1373 ipoib_demux(struct ifnet *ifp, struct mbuf *m, u_short proto) 1374 { 1375 int isr; 1376 1377 #ifdef MAC 1378 /* 1379 * Tag the mbuf with an appropriate MAC label before any other 1380 * consumers can get to it. 1381 */ 1382 mac_ifnet_create_mbuf(ifp, m); 1383 #endif 1384 /* Allow monitor mode to claim this frame, after stats are updated. */ 1385 if (ifp->if_flags & IFF_MONITOR) { 1386 if_printf(ifp, "discard frame at IFF_MONITOR\n"); 1387 m_freem(m); 1388 return; 1389 } 1390 /* 1391 * Dispatch frame to upper layer. 1392 */ 1393 switch (proto) { 1394 #ifdef INET 1395 case ETHERTYPE_IP: 1396 isr = NETISR_IP; 1397 break; 1398 1399 case ETHERTYPE_ARP: 1400 if (ifp->if_flags & IFF_NOARP) { 1401 /* Discard packet if ARP is disabled on interface */ 1402 m_freem(m); 1403 return; 1404 } 1405 isr = NETISR_ARP; 1406 break; 1407 #endif 1408 #ifdef INET6 1409 case ETHERTYPE_IPV6: 1410 isr = NETISR_IPV6; 1411 break; 1412 #endif 1413 default: 1414 goto discard; 1415 } 1416 netisr_dispatch(isr, m); 1417 return; 1418 1419 discard: 1420 m_freem(m); 1421 } 1422 1423 /* 1424 * Process a received Infiniband packet. 1425 */ 1426 static void 1427 ipoib_input(struct ifnet *ifp, struct mbuf *m) 1428 { 1429 struct ipoib_header *eh; 1430 1431 if ((ifp->if_flags & IFF_UP) == 0) { 1432 m_freem(m); 1433 return; 1434 } 1435 CURVNET_SET_QUIET(ifp->if_vnet); 1436 1437 /* Let BPF have it before we strip the header. */ 1438 IPOIB_MTAP(ifp, m); 1439 eh = mtod(m, struct ipoib_header *); 1440 /* 1441 * Reset layer specific mbuf flags to avoid confusing upper layers. 1442 * Strip off Infiniband header. 1443 */ 1444 m->m_flags &= ~M_VLANTAG; 1445 m->m_flags &= ~(M_PROTOFLAGS); 1446 m_adj(m, IPOIB_HEADER_LEN); 1447 1448 if (IPOIB_IS_MULTICAST(eh->hwaddr)) { 1449 if (memcmp(eh->hwaddr, ifp->if_broadcastaddr, 1450 ifp->if_addrlen) == 0) 1451 m->m_flags |= M_BCAST; 1452 else 1453 m->m_flags |= M_MCAST; 1454 ifp->if_imcasts++; 1455 } 1456 1457 ipoib_demux(ifp, m, ntohs(eh->proto)); 1458 CURVNET_RESTORE(); 1459 } 1460 1461 static int 1462 ipoib_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa, 1463 struct sockaddr *sa) 1464 { 1465 struct sockaddr_dl *sdl; 1466 #ifdef INET 1467 struct sockaddr_in *sin; 1468 #endif 1469 #ifdef INET6 1470 struct sockaddr_in6 *sin6; 1471 #endif 1472 u_char *e_addr; 1473 1474 switch(sa->sa_family) { 1475 case AF_LINK: 1476 /* 1477 * No mapping needed. Just check that it's a valid MC address. 1478 */ 1479 sdl = (struct sockaddr_dl *)sa; 1480 e_addr = LLADDR(sdl); 1481 if (!IPOIB_IS_MULTICAST(e_addr)) 1482 return EADDRNOTAVAIL; 1483 *llsa = 0; 1484 return 0; 1485 1486 #ifdef INET 1487 case AF_INET: 1488 sin = (struct sockaddr_in *)sa; 1489 if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 1490 return EADDRNOTAVAIL; 1491 sdl = malloc(sizeof *sdl, M_IFMADDR, 1492 M_NOWAIT|M_ZERO); 1493 if (sdl == NULL) 1494 return ENOMEM; 1495 sdl->sdl_len = sizeof *sdl; 1496 sdl->sdl_family = AF_LINK; 1497 sdl->sdl_index = ifp->if_index; 1498 sdl->sdl_type = IFT_INFINIBAND; 1499 sdl->sdl_alen = INFINIBAND_ALEN; 1500 e_addr = LLADDR(sdl); 1501 ip_ib_mc_map(sin->sin_addr.s_addr, ifp->if_broadcastaddr, 1502 e_addr); 1503 *llsa = (struct sockaddr *)sdl; 1504 return 0; 1505 #endif 1506 #ifdef INET6 1507 case AF_INET6: 1508 sin6 = (struct sockaddr_in6 *)sa; 1509 /* 1510 * An IP6 address of 0 means listen to all 1511 * of the multicast address used for IP6. 1512 * This has no meaning in ipoib. 1513 */ 1514 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 1515 return EADDRNOTAVAIL; 1516 if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) 1517 return EADDRNOTAVAIL; 1518 sdl = malloc(sizeof *sdl, M_IFMADDR, 1519 M_NOWAIT|M_ZERO); 1520 if (sdl == NULL) 1521 return (ENOMEM); 1522 sdl->sdl_len = sizeof *sdl; 1523 sdl->sdl_family = AF_LINK; 1524 sdl->sdl_index = ifp->if_index; 1525 sdl->sdl_type = IFT_INFINIBAND; 1526 sdl->sdl_alen = INFINIBAND_ALEN; 1527 e_addr = LLADDR(sdl); 1528 ipv6_ib_mc_map(&sin6->sin6_addr, ifp->if_broadcastaddr, e_addr); 1529 *llsa = (struct sockaddr *)sdl; 1530 return 0; 1531 #endif 1532 1533 default: 1534 return EAFNOSUPPORT; 1535 } 1536 } 1537 1538 module_init(ipoib_init_module); 1539 module_exit(ipoib_cleanup_module); 1540