1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 static int ipoib_resolvemulti(struct ifnet *, struct sockaddr **, 38 struct sockaddr *); 39 40 41 #include <linux/module.h> 42 43 #include <linux/slab.h> 44 #include <linux/kernel.h> 45 #include <linux/vmalloc.h> 46 47 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 48 #include <linux/if_vlan.h> 49 #include <net/ip.h> 50 #include <net/ipv6.h> 51 52 MODULE_AUTHOR("Roland Dreier"); 53 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 54 MODULE_LICENSE("Dual BSD/GPL"); 55 56 int ipoib_sendq_size = IPOIB_TX_RING_SIZE; 57 int ipoib_recvq_size = IPOIB_RX_RING_SIZE; 58 59 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 60 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 61 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 62 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 63 64 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 65 int ipoib_debug_level = 1; 66 67 module_param_named(debug_level, ipoib_debug_level, int, 0644); 68 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 69 #endif 70 71 struct ipoib_path_iter { 72 struct ipoib_dev_priv *priv; 73 struct ipoib_path path; 74 }; 75 76 static const u8 ipv4_bcast_addr[] = { 77 0x00, 0xff, 0xff, 0xff, 78 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 79 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 80 }; 81 82 struct workqueue_struct *ipoib_workqueue; 83 84 struct ib_sa_client ipoib_sa_client; 85 86 static void ipoib_add_one(struct ib_device *device); 87 static void ipoib_remove_one(struct ib_device *device); 88 static void ipoib_start(struct ifnet *dev); 89 static int ipoib_output(struct ifnet *ifp, struct mbuf *m, 90 const struct sockaddr *dst, struct route *ro); 91 static int ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data); 92 static void ipoib_input(struct ifnet *ifp, struct mbuf *m); 93 94 #define IPOIB_MTAP(_ifp, _m) \ 95 do { \ 96 if (bpf_peers_present((_ifp)->if_bpf)) { \ 97 M_ASSERTVALID(_m); \ 98 ipoib_mtap_mb((_ifp), (_m)); \ 99 } \ 100 } while (0) 101 102 /* 103 * This is for clients that have an ipoib_header in the mbuf. 104 */ 105 static void 106 ipoib_mtap_mb(struct ifnet *ifp, struct mbuf *mb) 107 { 108 struct ipoib_header *ih; 109 struct ether_header eh; 110 111 ih = mtod(mb, struct ipoib_header *); 112 eh.ether_type = ih->proto; 113 bcopy(ih->hwaddr, &eh.ether_dhost, ETHER_ADDR_LEN); 114 bzero(&eh.ether_shost, ETHER_ADDR_LEN); 115 mb->m_data += sizeof(struct ipoib_header); 116 mb->m_len -= sizeof(struct ipoib_header); 117 bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb); 118 mb->m_data -= sizeof(struct ipoib_header); 119 mb->m_len += sizeof(struct ipoib_header); 120 } 121 122 void 123 ipoib_mtap_proto(struct ifnet *ifp, struct mbuf *mb, uint16_t proto) 124 { 125 struct ether_header eh; 126 127 eh.ether_type = proto; 128 bzero(&eh.ether_shost, ETHER_ADDR_LEN); 129 bzero(&eh.ether_dhost, ETHER_ADDR_LEN); 130 bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb); 131 } 132 133 static struct ib_client ipoib_client = { 134 .name = "ipoib", 135 .add = ipoib_add_one, 136 .remove = ipoib_remove_one 137 }; 138 139 int 140 ipoib_open(struct ipoib_dev_priv *priv) 141 { 142 struct ifnet *dev = priv->dev; 143 144 ipoib_dbg(priv, "bringing up interface\n"); 145 146 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 147 148 if (ipoib_pkey_dev_delay_open(priv)) 149 return 0; 150 151 if (ipoib_ib_dev_open(priv)) 152 goto err_disable; 153 154 if (ipoib_ib_dev_up(priv)) 155 goto err_stop; 156 157 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 158 struct ipoib_dev_priv *cpriv; 159 160 /* Bring up any child interfaces too */ 161 mutex_lock(&priv->vlan_mutex); 162 list_for_each_entry(cpriv, &priv->child_intfs, list) 163 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 164 ipoib_open(cpriv); 165 mutex_unlock(&priv->vlan_mutex); 166 } 167 dev->if_drv_flags |= IFF_DRV_RUNNING; 168 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 169 170 return 0; 171 172 err_stop: 173 ipoib_ib_dev_stop(priv, 1); 174 175 err_disable: 176 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 177 178 return -EINVAL; 179 } 180 181 static void 182 ipoib_init(void *arg) 183 { 184 struct ifnet *dev; 185 struct ipoib_dev_priv *priv; 186 187 priv = arg; 188 dev = priv->dev; 189 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 190 ipoib_open(priv); 191 queue_work(ipoib_workqueue, &priv->flush_light); 192 } 193 194 195 static int 196 ipoib_stop(struct ipoib_dev_priv *priv) 197 { 198 struct ifnet *dev = priv->dev; 199 200 ipoib_dbg(priv, "stopping interface\n"); 201 202 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 203 204 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 205 206 ipoib_ib_dev_down(priv, 0); 207 ipoib_ib_dev_stop(priv, 0); 208 209 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 210 struct ipoib_dev_priv *cpriv; 211 212 /* Bring down any child interfaces too */ 213 mutex_lock(&priv->vlan_mutex); 214 list_for_each_entry(cpriv, &priv->child_intfs, list) 215 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) != 0) 216 ipoib_stop(cpriv); 217 mutex_unlock(&priv->vlan_mutex); 218 } 219 220 return 0; 221 } 222 223 int 224 ipoib_change_mtu(struct ipoib_dev_priv *priv, int new_mtu) 225 { 226 struct ifnet *dev = priv->dev; 227 228 /* dev->if_mtu > 2K ==> connected mode */ 229 if (ipoib_cm_admin_enabled(priv)) { 230 if (new_mtu > IPOIB_CM_MTU(ipoib_cm_max_mtu(priv))) 231 return -EINVAL; 232 233 if (new_mtu > priv->mcast_mtu) 234 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 235 priv->mcast_mtu); 236 237 dev->if_mtu = new_mtu; 238 return 0; 239 } 240 241 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 242 return -EINVAL; 243 244 priv->admin_mtu = new_mtu; 245 246 dev->if_mtu = min(priv->mcast_mtu, priv->admin_mtu); 247 248 queue_work(ipoib_workqueue, &priv->flush_light); 249 250 return 0; 251 } 252 253 static int 254 ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 255 { 256 struct ipoib_dev_priv *priv = ifp->if_softc; 257 struct ifaddr *ifa = (struct ifaddr *) data; 258 struct ifreq *ifr = (struct ifreq *) data; 259 int error = 0; 260 261 /* check if detaching */ 262 if (priv == NULL || priv->gone != 0) 263 return (ENXIO); 264 265 switch (command) { 266 case SIOCSIFFLAGS: 267 if (ifp->if_flags & IFF_UP) { 268 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 269 error = -ipoib_open(priv); 270 } else 271 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 272 ipoib_stop(priv); 273 break; 274 case SIOCADDMULTI: 275 case SIOCDELMULTI: 276 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 277 queue_work(ipoib_workqueue, &priv->restart_task); 278 break; 279 case SIOCSIFADDR: 280 ifp->if_flags |= IFF_UP; 281 282 switch (ifa->ifa_addr->sa_family) { 283 #ifdef INET 284 case AF_INET: 285 ifp->if_init(ifp->if_softc); /* before arpwhohas */ 286 arp_ifinit(ifp, ifa); 287 break; 288 #endif 289 default: 290 ifp->if_init(ifp->if_softc); 291 break; 292 } 293 break; 294 295 case SIOCGIFADDR: 296 { 297 struct sockaddr *sa; 298 299 sa = (struct sockaddr *) & ifr->ifr_data; 300 bcopy(IF_LLADDR(ifp), 301 (caddr_t) sa->sa_data, INFINIBAND_ALEN); 302 } 303 break; 304 305 case SIOCSIFMTU: 306 /* 307 * Set the interface MTU. 308 */ 309 error = -ipoib_change_mtu(priv, ifr->ifr_mtu); 310 break; 311 default: 312 error = EINVAL; 313 break; 314 } 315 return (error); 316 } 317 318 319 static struct ipoib_path * 320 __path_find(struct ipoib_dev_priv *priv, void *gid) 321 { 322 struct rb_node *n = priv->path_tree.rb_node; 323 struct ipoib_path *path; 324 int ret; 325 326 while (n) { 327 path = rb_entry(n, struct ipoib_path, rb_node); 328 329 ret = memcmp(gid, path->pathrec.dgid.raw, 330 sizeof (union ib_gid)); 331 332 if (ret < 0) 333 n = n->rb_left; 334 else if (ret > 0) 335 n = n->rb_right; 336 else 337 return path; 338 } 339 340 return NULL; 341 } 342 343 static int 344 __path_add(struct ipoib_dev_priv *priv, struct ipoib_path *path) 345 { 346 struct rb_node **n = &priv->path_tree.rb_node; 347 struct rb_node *pn = NULL; 348 struct ipoib_path *tpath; 349 int ret; 350 351 while (*n) { 352 pn = *n; 353 tpath = rb_entry(pn, struct ipoib_path, rb_node); 354 355 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 356 sizeof (union ib_gid)); 357 if (ret < 0) 358 n = &pn->rb_left; 359 else if (ret > 0) 360 n = &pn->rb_right; 361 else 362 return -EEXIST; 363 } 364 365 rb_link_node(&path->rb_node, pn, n); 366 rb_insert_color(&path->rb_node, &priv->path_tree); 367 368 list_add_tail(&path->list, &priv->path_list); 369 370 return 0; 371 } 372 373 void 374 ipoib_path_free(struct ipoib_dev_priv *priv, struct ipoib_path *path) 375 { 376 377 _IF_DRAIN(&path->queue); 378 379 if (path->ah) 380 ipoib_put_ah(path->ah); 381 if (ipoib_cm_get(path)) 382 ipoib_cm_destroy_tx(ipoib_cm_get(path)); 383 384 kfree(path); 385 } 386 387 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 388 389 struct ipoib_path_iter * 390 ipoib_path_iter_init(struct ipoib_dev_priv *priv) 391 { 392 struct ipoib_path_iter *iter; 393 394 iter = kmalloc(sizeof *iter, GFP_KERNEL); 395 if (!iter) 396 return NULL; 397 398 iter->priv = priv; 399 memset(iter->path.pathrec.dgid.raw, 0, 16); 400 401 if (ipoib_path_iter_next(iter)) { 402 kfree(iter); 403 return NULL; 404 } 405 406 return iter; 407 } 408 409 int 410 ipoib_path_iter_next(struct ipoib_path_iter *iter) 411 { 412 struct ipoib_dev_priv *priv = iter->priv; 413 struct rb_node *n; 414 struct ipoib_path *path; 415 int ret = 1; 416 417 spin_lock_irq(&priv->lock); 418 419 n = rb_first(&priv->path_tree); 420 421 while (n) { 422 path = rb_entry(n, struct ipoib_path, rb_node); 423 424 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 425 sizeof (union ib_gid)) < 0) { 426 iter->path = *path; 427 ret = 0; 428 break; 429 } 430 431 n = rb_next(n); 432 } 433 434 spin_unlock_irq(&priv->lock); 435 436 return ret; 437 } 438 439 void 440 ipoib_path_iter_read(struct ipoib_path_iter *iter, struct ipoib_path *path) 441 { 442 *path = iter->path; 443 } 444 445 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 446 447 void 448 ipoib_mark_paths_invalid(struct ipoib_dev_priv *priv) 449 { 450 struct ipoib_path *path, *tp; 451 452 spin_lock_irq(&priv->lock); 453 454 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 455 ipoib_dbg(priv, "mark path LID 0x%04x GID %16D invalid\n", 456 be16_to_cpu(path->pathrec.dlid), 457 path->pathrec.dgid.raw, ":"); 458 path->valid = 0; 459 } 460 461 spin_unlock_irq(&priv->lock); 462 } 463 464 void 465 ipoib_flush_paths(struct ipoib_dev_priv *priv) 466 { 467 struct ipoib_path *path, *tp; 468 LIST_HEAD(remove_list); 469 unsigned long flags; 470 471 spin_lock_irqsave(&priv->lock, flags); 472 473 list_splice_init(&priv->path_list, &remove_list); 474 475 list_for_each_entry(path, &remove_list, list) 476 rb_erase(&path->rb_node, &priv->path_tree); 477 478 list_for_each_entry_safe(path, tp, &remove_list, list) { 479 if (path->query) 480 ib_sa_cancel_query(path->query_id, path->query); 481 spin_unlock_irqrestore(&priv->lock, flags); 482 wait_for_completion(&path->done); 483 ipoib_path_free(priv, path); 484 spin_lock_irqsave(&priv->lock, flags); 485 } 486 487 spin_unlock_irqrestore(&priv->lock, flags); 488 } 489 490 static void 491 path_rec_completion(int status, struct ib_sa_path_rec *pathrec, void *path_ptr) 492 { 493 struct ipoib_path *path = path_ptr; 494 struct ipoib_dev_priv *priv = path->priv; 495 struct ifnet *dev = priv->dev; 496 struct ipoib_ah *ah = NULL; 497 struct ipoib_ah *old_ah = NULL; 498 struct ifqueue mbqueue; 499 struct mbuf *mb; 500 unsigned long flags; 501 502 if (!status) 503 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %16D\n", 504 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw, ":"); 505 else 506 ipoib_dbg(priv, "PathRec status %d for GID %16D\n", 507 status, path->pathrec.dgid.raw, ":"); 508 509 bzero(&mbqueue, sizeof(mbqueue)); 510 511 if (!status) { 512 struct ib_ah_attr av; 513 514 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) 515 ah = ipoib_create_ah(priv, priv->pd, &av); 516 } 517 518 spin_lock_irqsave(&priv->lock, flags); 519 520 if (ah) { 521 path->pathrec = *pathrec; 522 523 old_ah = path->ah; 524 path->ah = ah; 525 526 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 527 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 528 529 for (;;) { 530 _IF_DEQUEUE(&path->queue, mb); 531 if (mb == NULL) 532 break; 533 _IF_ENQUEUE(&mbqueue, mb); 534 } 535 536 #ifdef CONFIG_INFINIBAND_IPOIB_CM 537 if (ipoib_cm_enabled(priv, path->hwaddr) && !ipoib_cm_get(path)) 538 ipoib_cm_set(path, ipoib_cm_create_tx(priv, path)); 539 #endif 540 541 path->valid = 1; 542 } 543 544 path->query = NULL; 545 complete(&path->done); 546 547 spin_unlock_irqrestore(&priv->lock, flags); 548 549 if (old_ah) 550 ipoib_put_ah(old_ah); 551 552 for (;;) { 553 _IF_DEQUEUE(&mbqueue, mb); 554 if (mb == NULL) 555 break; 556 mb->m_pkthdr.rcvif = dev; 557 if (dev->if_transmit(dev, mb)) 558 ipoib_warn(priv, "dev_queue_xmit failed " 559 "to requeue packet\n"); 560 } 561 } 562 563 static struct ipoib_path * 564 path_rec_create(struct ipoib_dev_priv *priv, uint8_t *hwaddr) 565 { 566 struct ipoib_path *path; 567 568 if (!priv->broadcast) 569 return NULL; 570 571 path = kzalloc(sizeof *path, GFP_ATOMIC); 572 if (!path) 573 return NULL; 574 575 path->priv = priv; 576 577 bzero(&path->queue, sizeof(path->queue)); 578 579 #ifdef CONFIG_INFINIBAND_IPOIB_CM 580 memcpy(&path->hwaddr, hwaddr, INFINIBAND_ALEN); 581 #endif 582 memcpy(path->pathrec.dgid.raw, &hwaddr[4], sizeof (union ib_gid)); 583 path->pathrec.sgid = priv->local_gid; 584 path->pathrec.pkey = cpu_to_be16(priv->pkey); 585 path->pathrec.numb_path = 1; 586 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 587 588 return path; 589 } 590 591 static int 592 path_rec_start(struct ipoib_dev_priv *priv, struct ipoib_path *path) 593 { 594 struct ifnet *dev = priv->dev; 595 596 ib_sa_comp_mask comp_mask = IB_SA_PATH_REC_MTU_SELECTOR | IB_SA_PATH_REC_MTU; 597 struct ib_sa_path_rec p_rec; 598 599 p_rec = path->pathrec; 600 p_rec.mtu_selector = IB_SA_GT; 601 602 switch (roundup_pow_of_two(dev->if_mtu + IPOIB_ENCAP_LEN)) { 603 case 512: 604 p_rec.mtu = IB_MTU_256; 605 break; 606 case 1024: 607 p_rec.mtu = IB_MTU_512; 608 break; 609 case 2048: 610 p_rec.mtu = IB_MTU_1024; 611 break; 612 case 4096: 613 p_rec.mtu = IB_MTU_2048; 614 break; 615 default: 616 /* Wildcard everything */ 617 comp_mask = 0; 618 p_rec.mtu = 0; 619 p_rec.mtu_selector = 0; 620 } 621 622 ipoib_dbg(priv, "Start path record lookup for %16D MTU > %d\n", 623 p_rec.dgid.raw, ":", 624 comp_mask ? ib_mtu_enum_to_int(p_rec.mtu) : 0); 625 626 init_completion(&path->done); 627 628 path->query_id = 629 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 630 &p_rec, comp_mask | 631 IB_SA_PATH_REC_DGID | 632 IB_SA_PATH_REC_SGID | 633 IB_SA_PATH_REC_NUMB_PATH | 634 IB_SA_PATH_REC_TRAFFIC_CLASS | 635 IB_SA_PATH_REC_PKEY, 636 1000, GFP_ATOMIC, 637 path_rec_completion, 638 path, &path->query); 639 if (path->query_id < 0) { 640 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 641 path->query = NULL; 642 complete(&path->done); 643 return path->query_id; 644 } 645 646 return 0; 647 } 648 649 static void 650 ipoib_unicast_send(struct mbuf *mb, struct ipoib_dev_priv *priv, struct ipoib_header *eh) 651 { 652 struct ipoib_path *path; 653 654 path = __path_find(priv, eh->hwaddr + 4); 655 if (!path || !path->valid) { 656 int new_path = 0; 657 658 if (!path) { 659 path = path_rec_create(priv, eh->hwaddr); 660 new_path = 1; 661 } 662 if (path) { 663 if (_IF_QLEN(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) 664 _IF_ENQUEUE(&path->queue, mb); 665 else { 666 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); 667 m_freem(mb); 668 } 669 670 if (!path->query && path_rec_start(priv, path)) { 671 spin_unlock_irqrestore(&priv->lock, flags); 672 if (new_path) 673 ipoib_path_free(priv, path); 674 return; 675 } else 676 __path_add(priv, path); 677 } else { 678 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); 679 m_freem(mb); 680 } 681 682 return; 683 } 684 685 if (ipoib_cm_get(path) && ipoib_cm_up(path)) { 686 ipoib_cm_send(priv, mb, ipoib_cm_get(path)); 687 } else if (path->ah) { 688 ipoib_send(priv, mb, path->ah, IPOIB_QPN(eh->hwaddr)); 689 } else if ((path->query || !path_rec_start(priv, path)) && 690 path->queue.ifq_len < IPOIB_MAX_PATH_REC_QUEUE) { 691 _IF_ENQUEUE(&path->queue, mb); 692 } else { 693 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); 694 m_freem(mb); 695 } 696 } 697 698 static int 699 ipoib_send_one(struct ipoib_dev_priv *priv, struct mbuf *mb) 700 { 701 struct ipoib_header *eh; 702 703 eh = mtod(mb, struct ipoib_header *); 704 if (IPOIB_IS_MULTICAST(eh->hwaddr)) { 705 /* Add in the P_Key for multicast*/ 706 eh->hwaddr[8] = (priv->pkey >> 8) & 0xff; 707 eh->hwaddr[9] = priv->pkey & 0xff; 708 709 ipoib_mcast_send(priv, eh->hwaddr + 4, mb); 710 } else 711 ipoib_unicast_send(mb, priv, eh); 712 713 return 0; 714 } 715 716 717 static void 718 _ipoib_start(struct ifnet *dev, struct ipoib_dev_priv *priv) 719 { 720 struct mbuf *mb; 721 722 if ((dev->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 723 IFF_DRV_RUNNING) 724 return; 725 726 spin_lock(&priv->lock); 727 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd) && 728 (dev->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 729 IFQ_DRV_DEQUEUE(&dev->if_snd, mb); 730 if (mb == NULL) 731 break; 732 IPOIB_MTAP(dev, mb); 733 ipoib_send_one(priv, mb); 734 } 735 spin_unlock(&priv->lock); 736 } 737 738 static void 739 ipoib_start(struct ifnet *dev) 740 { 741 _ipoib_start(dev, dev->if_softc); 742 } 743 744 static void 745 ipoib_vlan_start(struct ifnet *dev) 746 { 747 struct ipoib_dev_priv *priv; 748 struct mbuf *mb; 749 750 priv = VLAN_COOKIE(dev); 751 if (priv != NULL) 752 return _ipoib_start(dev, priv); 753 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd)) { 754 IFQ_DRV_DEQUEUE(&dev->if_snd, mb); 755 if (mb == NULL) 756 break; 757 m_freem(mb); 758 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 759 } 760 } 761 762 int 763 ipoib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port) 764 { 765 766 /* Allocate RX/TX "rings" to hold queued mbs */ 767 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 768 GFP_KERNEL); 769 if (!priv->rx_ring) { 770 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 771 ca->name, ipoib_recvq_size); 772 goto out; 773 } 774 775 priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, GFP_KERNEL); 776 if (!priv->tx_ring) { 777 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 778 ca->name, ipoib_sendq_size); 779 goto out_rx_ring_cleanup; 780 } 781 memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); 782 783 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 784 785 if (ipoib_ib_dev_init(priv, ca, port)) 786 goto out_tx_ring_cleanup; 787 788 return 0; 789 790 out_tx_ring_cleanup: 791 kfree(priv->tx_ring); 792 793 out_rx_ring_cleanup: 794 kfree(priv->rx_ring); 795 796 out: 797 return -ENOMEM; 798 } 799 800 static void 801 ipoib_detach(struct ipoib_dev_priv *priv) 802 { 803 struct ifnet *dev; 804 805 dev = priv->dev; 806 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 807 priv->gone = 1; 808 bpfdetach(dev); 809 if_detach(dev); 810 if_free(dev); 811 } else 812 VLAN_SETCOOKIE(priv->dev, NULL); 813 814 free(priv, M_TEMP); 815 } 816 817 void 818 ipoib_dev_cleanup(struct ipoib_dev_priv *priv) 819 { 820 struct ipoib_dev_priv *cpriv, *tcpriv; 821 822 /* Delete any child interfaces first */ 823 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 824 ipoib_dev_cleanup(cpriv); 825 ipoib_detach(cpriv); 826 } 827 828 ipoib_ib_dev_cleanup(priv); 829 830 kfree(priv->rx_ring); 831 kfree(priv->tx_ring); 832 833 priv->rx_ring = NULL; 834 priv->tx_ring = NULL; 835 } 836 837 static volatile int ipoib_unit; 838 839 static struct ipoib_dev_priv * 840 ipoib_priv_alloc(void) 841 { 842 struct ipoib_dev_priv *priv; 843 844 priv = malloc(sizeof(struct ipoib_dev_priv), M_TEMP, M_ZERO|M_WAITOK); 845 spin_lock_init(&priv->lock); 846 spin_lock_init(&priv->drain_lock); 847 mutex_init(&priv->vlan_mutex); 848 INIT_LIST_HEAD(&priv->path_list); 849 INIT_LIST_HEAD(&priv->child_intfs); 850 INIT_LIST_HEAD(&priv->dead_ahs); 851 INIT_LIST_HEAD(&priv->multicast_list); 852 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); 853 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 854 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 855 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 856 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 857 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 858 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 859 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 860 memcpy(priv->broadcastaddr, ipv4_bcast_addr, INFINIBAND_ALEN); 861 862 return (priv); 863 } 864 865 struct ipoib_dev_priv * 866 ipoib_intf_alloc(const char *name) 867 { 868 struct ipoib_dev_priv *priv; 869 struct sockaddr_dl *sdl; 870 struct ifnet *dev; 871 872 priv = ipoib_priv_alloc(); 873 dev = priv->dev = if_alloc(IFT_INFINIBAND); 874 if (!dev) { 875 free(priv, M_TEMP); 876 return NULL; 877 } 878 dev->if_softc = priv; 879 if_initname(dev, name, atomic_fetchadd_int(&ipoib_unit, 1)); 880 dev->if_flags = IFF_BROADCAST | IFF_MULTICAST; 881 dev->if_addrlen = INFINIBAND_ALEN; 882 dev->if_hdrlen = IPOIB_HEADER_LEN; 883 if_attach(dev); 884 dev->if_init = ipoib_init; 885 dev->if_ioctl = ipoib_ioctl; 886 dev->if_start = ipoib_start; 887 dev->if_output = ipoib_output; 888 dev->if_input = ipoib_input; 889 dev->if_resolvemulti = ipoib_resolvemulti; 890 dev->if_baudrate = IF_Gbps(10); 891 dev->if_broadcastaddr = priv->broadcastaddr; 892 dev->if_snd.ifq_maxlen = ipoib_sendq_size * 2; 893 sdl = (struct sockaddr_dl *)dev->if_addr->ifa_addr; 894 sdl->sdl_type = IFT_INFINIBAND; 895 sdl->sdl_alen = dev->if_addrlen; 896 priv->dev = dev; 897 if_link_state_change(dev, LINK_STATE_DOWN); 898 bpfattach(dev, DLT_EN10MB, ETHER_HDR_LEN); 899 900 return dev->if_softc; 901 } 902 903 int 904 ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 905 { 906 struct ib_device_attr *device_attr; 907 int result = -ENOMEM; 908 909 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 910 if (!device_attr) { 911 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n", 912 hca->name, sizeof *device_attr); 913 return result; 914 } 915 916 result = ib_query_device(hca, device_attr); 917 if (result) { 918 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n", 919 hca->name, result); 920 kfree(device_attr); 921 return result; 922 } 923 priv->hca_caps = device_attr->device_cap_flags; 924 925 kfree(device_attr); 926 927 priv->dev->if_hwassist = 0; 928 priv->dev->if_capabilities = 0; 929 930 #ifndef CONFIG_INFINIBAND_IPOIB_CM 931 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 932 set_bit(IPOIB_FLAG_CSUM, &priv->flags); 933 priv->dev->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; 934 priv->dev->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 935 } 936 937 #if 0 938 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) { 939 priv->dev->if_capabilities |= IFCAP_TSO4; 940 priv->dev->if_hwassist |= CSUM_TSO; 941 } 942 #endif 943 #endif 944 priv->dev->if_capabilities |= 945 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_LINKSTATE; 946 priv->dev->if_capenable = priv->dev->if_capabilities; 947 948 return 0; 949 } 950 951 952 static struct ifnet * 953 ipoib_add_port(const char *format, struct ib_device *hca, u8 port) 954 { 955 struct ipoib_dev_priv *priv; 956 struct ib_port_attr attr; 957 int result = -ENOMEM; 958 959 priv = ipoib_intf_alloc(format); 960 if (!priv) 961 goto alloc_mem_failed; 962 963 if (!ib_query_port(hca, port, &attr)) 964 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 965 else { 966 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 967 hca->name, port); 968 goto device_init_failed; 969 } 970 971 /* MTU will be reset when mcast join happens */ 972 priv->dev->if_mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 973 priv->mcast_mtu = priv->admin_mtu = priv->dev->if_mtu; 974 975 result = ib_query_pkey(hca, port, 0, &priv->pkey); 976 if (result) { 977 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 978 hca->name, port, result); 979 goto device_init_failed; 980 } 981 982 if (ipoib_set_dev_features(priv, hca)) 983 goto device_init_failed; 984 985 /* 986 * Set the full membership bit, so that we join the right 987 * broadcast group, etc. 988 */ 989 priv->pkey |= 0x8000; 990 991 priv->broadcastaddr[8] = priv->pkey >> 8; 992 priv->broadcastaddr[9] = priv->pkey & 0xff; 993 994 result = ib_query_gid(hca, port, 0, &priv->local_gid); 995 if (result) { 996 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 997 hca->name, port, result); 998 goto device_init_failed; 999 } 1000 memcpy(IF_LLADDR(priv->dev) + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1001 1002 result = ipoib_dev_init(priv, hca, port); 1003 if (result < 0) { 1004 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1005 hca->name, port, result); 1006 goto device_init_failed; 1007 } 1008 if (ipoib_cm_admin_enabled(priv)) 1009 priv->dev->if_mtu = IPOIB_CM_MTU(ipoib_cm_max_mtu(priv)); 1010 1011 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1012 priv->ca, ipoib_event); 1013 result = ib_register_event_handler(&priv->event_handler); 1014 if (result < 0) { 1015 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1016 "port %d (ret = %d)\n", 1017 hca->name, port, result); 1018 goto event_failed; 1019 } 1020 if_printf(priv->dev, "Attached to %s port %d\n", hca->name, port); 1021 1022 return priv->dev; 1023 1024 event_failed: 1025 ipoib_dev_cleanup(priv); 1026 1027 device_init_failed: 1028 ipoib_detach(priv); 1029 1030 alloc_mem_failed: 1031 return ERR_PTR(result); 1032 } 1033 1034 static void 1035 ipoib_add_one(struct ib_device *device) 1036 { 1037 struct list_head *dev_list; 1038 struct ifnet *dev; 1039 struct ipoib_dev_priv *priv; 1040 int s, e, p; 1041 1042 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1043 return; 1044 1045 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1046 if (!dev_list) 1047 return; 1048 1049 INIT_LIST_HEAD(dev_list); 1050 1051 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1052 s = 0; 1053 e = 0; 1054 } else { 1055 s = 1; 1056 e = device->phys_port_cnt; 1057 } 1058 1059 for (p = s; p <= e; ++p) { 1060 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND) 1061 continue; 1062 dev = ipoib_add_port("ib", device, p); 1063 if (!IS_ERR(dev)) { 1064 priv = dev->if_softc; 1065 list_add_tail(&priv->list, dev_list); 1066 } 1067 } 1068 1069 ib_set_client_data(device, &ipoib_client, dev_list); 1070 } 1071 1072 static void 1073 ipoib_remove_one(struct ib_device *device) 1074 { 1075 struct ipoib_dev_priv *priv, *tmp; 1076 struct list_head *dev_list; 1077 1078 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1079 return; 1080 1081 dev_list = ib_get_client_data(device, &ipoib_client); 1082 1083 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1084 if (rdma_port_get_link_layer(device, priv->port) != IB_LINK_LAYER_INFINIBAND) 1085 continue; 1086 1087 ipoib_stop(priv); 1088 1089 ib_unregister_event_handler(&priv->event_handler); 1090 1091 /* dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); */ 1092 1093 flush_workqueue(ipoib_workqueue); 1094 1095 ipoib_dev_cleanup(priv); 1096 ipoib_detach(priv); 1097 } 1098 1099 kfree(dev_list); 1100 } 1101 1102 static void 1103 ipoib_config_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) 1104 { 1105 struct ipoib_dev_priv *parent; 1106 struct ipoib_dev_priv *priv; 1107 struct ifnet *dev; 1108 uint16_t pkey; 1109 int error; 1110 1111 if (ifp->if_type != IFT_INFINIBAND) 1112 return; 1113 dev = VLAN_DEVAT(ifp, vtag); 1114 if (dev == NULL) 1115 return; 1116 priv = NULL; 1117 error = 0; 1118 parent = ifp->if_softc; 1119 /* We only support 15 bits of pkey. */ 1120 if (vtag & 0x8000) 1121 return; 1122 pkey = vtag | 0x8000; /* Set full membership bit. */ 1123 if (pkey == parent->pkey) 1124 return; 1125 /* Check for dups */ 1126 mutex_lock(&parent->vlan_mutex); 1127 list_for_each_entry(priv, &parent->child_intfs, list) { 1128 if (priv->pkey == pkey) { 1129 priv = NULL; 1130 error = EBUSY; 1131 goto out; 1132 } 1133 } 1134 priv = ipoib_priv_alloc(); 1135 priv->dev = dev; 1136 priv->max_ib_mtu = parent->max_ib_mtu; 1137 priv->mcast_mtu = priv->admin_mtu = parent->dev->if_mtu; 1138 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 1139 error = ipoib_set_dev_features(priv, parent->ca); 1140 if (error) 1141 goto out; 1142 priv->pkey = pkey; 1143 priv->broadcastaddr[8] = pkey >> 8; 1144 priv->broadcastaddr[9] = pkey & 0xff; 1145 dev->if_broadcastaddr = priv->broadcastaddr; 1146 error = ipoib_dev_init(priv, parent->ca, parent->port); 1147 if (error) 1148 goto out; 1149 priv->parent = parent->dev; 1150 list_add_tail(&priv->list, &parent->child_intfs); 1151 VLAN_SETCOOKIE(dev, priv); 1152 dev->if_start = ipoib_vlan_start; 1153 dev->if_drv_flags &= ~IFF_DRV_RUNNING; 1154 dev->if_hdrlen = IPOIB_HEADER_LEN; 1155 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1156 ipoib_open(priv); 1157 mutex_unlock(&parent->vlan_mutex); 1158 return; 1159 out: 1160 mutex_unlock(&parent->vlan_mutex); 1161 if (priv) 1162 free(priv, M_TEMP); 1163 if (error) 1164 ipoib_warn(parent, 1165 "failed to initialize subinterface: device %s, port %d vtag 0x%X", 1166 parent->ca->name, parent->port, vtag); 1167 return; 1168 } 1169 1170 static void 1171 ipoib_unconfig_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) 1172 { 1173 struct ipoib_dev_priv *parent; 1174 struct ipoib_dev_priv *priv; 1175 struct ifnet *dev; 1176 uint16_t pkey; 1177 1178 if (ifp->if_type != IFT_INFINIBAND) 1179 return; 1180 1181 dev = VLAN_DEVAT(ifp, vtag); 1182 if (dev) 1183 VLAN_SETCOOKIE(dev, NULL); 1184 pkey = vtag | 0x8000; 1185 parent = ifp->if_softc; 1186 mutex_lock(&parent->vlan_mutex); 1187 list_for_each_entry(priv, &parent->child_intfs, list) { 1188 if (priv->pkey == pkey) { 1189 ipoib_dev_cleanup(priv); 1190 list_del(&priv->list); 1191 break; 1192 } 1193 } 1194 mutex_unlock(&parent->vlan_mutex); 1195 } 1196 1197 eventhandler_tag ipoib_vlan_attach; 1198 eventhandler_tag ipoib_vlan_detach; 1199 1200 static int __init 1201 ipoib_init_module(void) 1202 { 1203 int ret; 1204 1205 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1206 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1207 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1208 1209 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1210 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1211 ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE, 1212 IPOIB_MIN_QUEUE_SIZE)); 1213 #ifdef CONFIG_INFINIBAND_IPOIB_CM 1214 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 1215 #endif 1216 1217 ipoib_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 1218 ipoib_config_vlan, NULL, EVENTHANDLER_PRI_FIRST); 1219 ipoib_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 1220 ipoib_unconfig_vlan, NULL, EVENTHANDLER_PRI_FIRST); 1221 1222 /* 1223 * We create our own workqueue mainly because we want to be 1224 * able to flush it when devices are being removed. We can't 1225 * use schedule_work()/flush_scheduled_work() because both 1226 * unregister_netdev() and linkwatch_event take the rtnl lock, 1227 * so flush_scheduled_work() can deadlock during device 1228 * removal. 1229 */ 1230 ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1231 if (!ipoib_workqueue) { 1232 ret = -ENOMEM; 1233 goto err_fs; 1234 } 1235 1236 ib_sa_register_client(&ipoib_sa_client); 1237 1238 ret = ib_register_client(&ipoib_client); 1239 if (ret) 1240 goto err_sa; 1241 1242 return 0; 1243 1244 err_sa: 1245 ib_sa_unregister_client(&ipoib_sa_client); 1246 destroy_workqueue(ipoib_workqueue); 1247 1248 err_fs: 1249 return ret; 1250 } 1251 1252 static void __exit 1253 ipoib_cleanup_module(void) 1254 { 1255 1256 EVENTHANDLER_DEREGISTER(vlan_config, ipoib_vlan_attach); 1257 EVENTHANDLER_DEREGISTER(vlan_unconfig, ipoib_vlan_detach); 1258 ib_unregister_client(&ipoib_client); 1259 ib_sa_unregister_client(&ipoib_sa_client); 1260 destroy_workqueue(ipoib_workqueue); 1261 } 1262 1263 /* 1264 * Infiniband output routine. 1265 */ 1266 static int 1267 ipoib_output(struct ifnet *ifp, struct mbuf *m, 1268 const struct sockaddr *dst, struct route *ro) 1269 { 1270 u_char edst[INFINIBAND_ALEN]; 1271 #if defined(INET) || defined(INET6) 1272 struct llentry *lle = NULL; 1273 #endif 1274 struct ipoib_header *eh; 1275 int error = 0, is_gw = 0; 1276 short type; 1277 1278 if (ro != NULL) 1279 is_gw = (ro->ro_flags & RT_HAS_GW) != 0; 1280 #ifdef MAC 1281 error = mac_ifnet_check_transmit(ifp, m); 1282 if (error) 1283 goto bad; 1284 #endif 1285 1286 M_PROFILE(m); 1287 if (ifp->if_flags & IFF_MONITOR) { 1288 error = ENETDOWN; 1289 goto bad; 1290 } 1291 if (!((ifp->if_flags & IFF_UP) && 1292 (ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1293 error = ENETDOWN; 1294 goto bad; 1295 } 1296 1297 switch (dst->sa_family) { 1298 #ifdef INET 1299 case AF_INET: 1300 if (lle != NULL && (lle->la_flags & LLE_VALID)) 1301 memcpy(edst, lle->ll_addr, sizeof(edst)); 1302 else if (m->m_flags & M_MCAST) 1303 ip_ib_mc_map(((struct sockaddr_in *)dst)->sin_addr.s_addr, ifp->if_broadcastaddr, edst); 1304 else 1305 error = arpresolve(ifp, is_gw, m, dst, edst, NULL, NULL); 1306 if (error) 1307 return (error == EWOULDBLOCK ? 0 : error); 1308 type = htons(ETHERTYPE_IP); 1309 break; 1310 case AF_ARP: 1311 { 1312 struct arphdr *ah; 1313 ah = mtod(m, struct arphdr *); 1314 ah->ar_hrd = htons(ARPHRD_INFINIBAND); 1315 1316 switch(ntohs(ah->ar_op)) { 1317 case ARPOP_REVREQUEST: 1318 case ARPOP_REVREPLY: 1319 type = htons(ETHERTYPE_REVARP); 1320 break; 1321 case ARPOP_REQUEST: 1322 case ARPOP_REPLY: 1323 default: 1324 type = htons(ETHERTYPE_ARP); 1325 break; 1326 } 1327 1328 if (m->m_flags & M_BCAST) 1329 bcopy(ifp->if_broadcastaddr, edst, INFINIBAND_ALEN); 1330 else 1331 bcopy(ar_tha(ah), edst, INFINIBAND_ALEN); 1332 1333 } 1334 break; 1335 #endif 1336 #ifdef INET6 1337 case AF_INET6: 1338 if (lle != NULL && (lle->la_flags & LLE_VALID)) 1339 memcpy(edst, lle->ll_addr, sizeof(edst)); 1340 else if (m->m_flags & M_MCAST) 1341 ipv6_ib_mc_map(&((struct sockaddr_in6 *)dst)->sin6_addr, ifp->if_broadcastaddr, edst); 1342 else 1343 error = nd6_resolve(ifp, is_gw, m, dst, edst, NULL, NULL); 1344 if (error) 1345 return error; 1346 type = htons(ETHERTYPE_IPV6); 1347 break; 1348 #endif 1349 1350 default: 1351 if_printf(ifp, "can't handle af%d\n", dst->sa_family); 1352 error = EAFNOSUPPORT; 1353 goto bad; 1354 } 1355 1356 /* 1357 * Add local net header. If no space in first mbuf, 1358 * allocate another. 1359 */ 1360 M_PREPEND(m, IPOIB_HEADER_LEN, M_NOWAIT); 1361 if (m == NULL) { 1362 error = ENOBUFS; 1363 goto bad; 1364 } 1365 eh = mtod(m, struct ipoib_header *); 1366 (void)memcpy(&eh->proto, &type, sizeof(eh->proto)); 1367 (void)memcpy(&eh->hwaddr, edst, sizeof (edst)); 1368 1369 /* 1370 * Queue message on interface, update output statistics if 1371 * successful, and start output if interface not yet active. 1372 */ 1373 return ((ifp->if_transmit)(ifp, m)); 1374 bad: 1375 if (m != NULL) 1376 m_freem(m); 1377 return (error); 1378 } 1379 1380 /* 1381 * Upper layer processing for a received Infiniband packet. 1382 */ 1383 void 1384 ipoib_demux(struct ifnet *ifp, struct mbuf *m, u_short proto) 1385 { 1386 int isr; 1387 1388 #ifdef MAC 1389 /* 1390 * Tag the mbuf with an appropriate MAC label before any other 1391 * consumers can get to it. 1392 */ 1393 mac_ifnet_create_mbuf(ifp, m); 1394 #endif 1395 /* Allow monitor mode to claim this frame, after stats are updated. */ 1396 if (ifp->if_flags & IFF_MONITOR) { 1397 if_printf(ifp, "discard frame at IFF_MONITOR\n"); 1398 m_freem(m); 1399 return; 1400 } 1401 /* 1402 * Dispatch frame to upper layer. 1403 */ 1404 switch (proto) { 1405 #ifdef INET 1406 case ETHERTYPE_IP: 1407 isr = NETISR_IP; 1408 break; 1409 1410 case ETHERTYPE_ARP: 1411 if (ifp->if_flags & IFF_NOARP) { 1412 /* Discard packet if ARP is disabled on interface */ 1413 m_freem(m); 1414 return; 1415 } 1416 isr = NETISR_ARP; 1417 break; 1418 #endif 1419 #ifdef INET6 1420 case ETHERTYPE_IPV6: 1421 isr = NETISR_IPV6; 1422 break; 1423 #endif 1424 default: 1425 goto discard; 1426 } 1427 netisr_dispatch(isr, m); 1428 return; 1429 1430 discard: 1431 m_freem(m); 1432 } 1433 1434 /* 1435 * Process a received Infiniband packet. 1436 */ 1437 static void 1438 ipoib_input(struct ifnet *ifp, struct mbuf *m) 1439 { 1440 struct ipoib_header *eh; 1441 1442 if ((ifp->if_flags & IFF_UP) == 0) { 1443 m_freem(m); 1444 return; 1445 } 1446 CURVNET_SET_QUIET(ifp->if_vnet); 1447 1448 /* Let BPF have it before we strip the header. */ 1449 IPOIB_MTAP(ifp, m); 1450 eh = mtod(m, struct ipoib_header *); 1451 /* 1452 * Reset layer specific mbuf flags to avoid confusing upper layers. 1453 * Strip off Infiniband header. 1454 */ 1455 m->m_flags &= ~M_VLANTAG; 1456 m_clrprotoflags(m); 1457 m_adj(m, IPOIB_HEADER_LEN); 1458 1459 if (IPOIB_IS_MULTICAST(eh->hwaddr)) { 1460 if (memcmp(eh->hwaddr, ifp->if_broadcastaddr, 1461 ifp->if_addrlen) == 0) 1462 m->m_flags |= M_BCAST; 1463 else 1464 m->m_flags |= M_MCAST; 1465 if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1); 1466 } 1467 1468 ipoib_demux(ifp, m, ntohs(eh->proto)); 1469 CURVNET_RESTORE(); 1470 } 1471 1472 static int 1473 ipoib_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa, 1474 struct sockaddr *sa) 1475 { 1476 struct sockaddr_dl *sdl; 1477 #ifdef INET 1478 struct sockaddr_in *sin; 1479 #endif 1480 #ifdef INET6 1481 struct sockaddr_in6 *sin6; 1482 #endif 1483 u_char *e_addr; 1484 1485 switch(sa->sa_family) { 1486 case AF_LINK: 1487 /* 1488 * No mapping needed. Just check that it's a valid MC address. 1489 */ 1490 sdl = (struct sockaddr_dl *)sa; 1491 e_addr = LLADDR(sdl); 1492 if (!IPOIB_IS_MULTICAST(e_addr)) 1493 return EADDRNOTAVAIL; 1494 *llsa = NULL; 1495 return 0; 1496 1497 #ifdef INET 1498 case AF_INET: 1499 sin = (struct sockaddr_in *)sa; 1500 if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 1501 return EADDRNOTAVAIL; 1502 sdl = link_init_sdl(ifp, *llsa, IFT_INFINIBAND); 1503 sdl->sdl_alen = INFINIBAND_ALEN; 1504 e_addr = LLADDR(sdl); 1505 ip_ib_mc_map(sin->sin_addr.s_addr, ifp->if_broadcastaddr, 1506 e_addr); 1507 *llsa = (struct sockaddr *)sdl; 1508 return 0; 1509 #endif 1510 #ifdef INET6 1511 case AF_INET6: 1512 sin6 = (struct sockaddr_in6 *)sa; 1513 /* 1514 * An IP6 address of 0 means listen to all 1515 * of the multicast address used for IP6. 1516 * This has no meaning in ipoib. 1517 */ 1518 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 1519 return EADDRNOTAVAIL; 1520 if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) 1521 return EADDRNOTAVAIL; 1522 sdl = link_init_sdl(ifp, *llsa, IFT_INFINIBAND); 1523 sdl->sdl_alen = INFINIBAND_ALEN; 1524 e_addr = LLADDR(sdl); 1525 ipv6_ib_mc_map(&sin6->sin6_addr, ifp->if_broadcastaddr, e_addr); 1526 *llsa = (struct sockaddr *)sdl; 1527 return 0; 1528 #endif 1529 1530 default: 1531 return EAFNOSUPPORT; 1532 } 1533 } 1534 1535 module_init(ipoib_init_module); 1536 module_exit(ipoib_cleanup_module); 1537 1538 static int 1539 ipoib_evhand(module_t mod, int event, void *arg) 1540 { 1541 return (0); 1542 } 1543 1544 static moduledata_t ipoib_mod = { 1545 .name = "ipoib", 1546 .evhand = ipoib_evhand, 1547 }; 1548 1549 DECLARE_MODULE(ipoib, ipoib_mod, SI_SUB_LAST, SI_ORDER_ANY); 1550 MODULE_DEPEND(ipoib, ibcore, 1, 1, 1); 1551 MODULE_DEPEND(ipoib, linuxkpi, 1, 1, 1); 1552