1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $ 35 */ 36 37 #include "ipoib.h" 38 39 #include <linux/module.h> 40 41 #include <linux/init.h> 42 #include <linux/slab.h> 43 #include <linux/vmalloc.h> 44 #include <linux/kernel.h> 45 46 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 47 48 #include <linux/ip.h> 49 #include <linux/in.h> 50 51 #include <net/dst.h> 52 53 MODULE_AUTHOR("Roland Dreier"); 54 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 57 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; 58 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; 59 60 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 61 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 62 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 63 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 64 65 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 66 int ipoib_debug_level; 67 68 module_param_named(debug_level, ipoib_debug_level, int, 0644); 69 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 70 #endif 71 72 struct ipoib_path_iter { 73 struct net_device *dev; 74 struct ipoib_path path; 75 }; 76 77 static const u8 ipv4_bcast_addr[] = { 78 0x00, 0xff, 0xff, 0xff, 79 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 80 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 81 }; 82 83 struct workqueue_struct *ipoib_workqueue; 84 85 static void ipoib_add_one(struct ib_device *device); 86 static void ipoib_remove_one(struct ib_device *device); 87 88 static struct ib_client ipoib_client = { 89 .name = "ipoib", 90 .add = ipoib_add_one, 91 .remove = ipoib_remove_one 92 }; 93 94 int ipoib_open(struct net_device *dev) 95 { 96 struct ipoib_dev_priv *priv = netdev_priv(dev); 97 98 ipoib_dbg(priv, "bringing up interface\n"); 99 100 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 101 102 if (ipoib_pkey_dev_delay_open(dev)) 103 return 0; 104 105 if (ipoib_ib_dev_open(dev)) 106 return -EINVAL; 107 108 if (ipoib_ib_dev_up(dev)) { 109 ipoib_ib_dev_stop(dev); 110 return -EINVAL; 111 } 112 113 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 114 struct ipoib_dev_priv *cpriv; 115 116 /* Bring up any child interfaces too */ 117 mutex_lock(&priv->vlan_mutex); 118 list_for_each_entry(cpriv, &priv->child_intfs, list) { 119 int flags; 120 121 flags = cpriv->dev->flags; 122 if (flags & IFF_UP) 123 continue; 124 125 dev_change_flags(cpriv->dev, flags | IFF_UP); 126 } 127 mutex_unlock(&priv->vlan_mutex); 128 } 129 130 netif_start_queue(dev); 131 132 return 0; 133 } 134 135 static int ipoib_stop(struct net_device *dev) 136 { 137 struct ipoib_dev_priv *priv = netdev_priv(dev); 138 139 ipoib_dbg(priv, "stopping interface\n"); 140 141 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 142 143 netif_stop_queue(dev); 144 145 /* 146 * Now flush workqueue to make sure a scheduled task doesn't 147 * bring our internal state back up. 148 */ 149 flush_workqueue(ipoib_workqueue); 150 151 ipoib_ib_dev_down(dev, 1); 152 ipoib_ib_dev_stop(dev); 153 154 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 155 struct ipoib_dev_priv *cpriv; 156 157 /* Bring down any child interfaces too */ 158 mutex_lock(&priv->vlan_mutex); 159 list_for_each_entry(cpriv, &priv->child_intfs, list) { 160 int flags; 161 162 flags = cpriv->dev->flags; 163 if (!(flags & IFF_UP)) 164 continue; 165 166 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 167 } 168 mutex_unlock(&priv->vlan_mutex); 169 } 170 171 return 0; 172 } 173 174 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 175 { 176 struct ipoib_dev_priv *priv = netdev_priv(dev); 177 178 if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) 179 return -EINVAL; 180 181 priv->admin_mtu = new_mtu; 182 183 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 184 185 return 0; 186 } 187 188 static struct ipoib_path *__path_find(struct net_device *dev, void *gid) 189 { 190 struct ipoib_dev_priv *priv = netdev_priv(dev); 191 struct rb_node *n = priv->path_tree.rb_node; 192 struct ipoib_path *path; 193 int ret; 194 195 while (n) { 196 path = rb_entry(n, struct ipoib_path, rb_node); 197 198 ret = memcmp(gid, path->pathrec.dgid.raw, 199 sizeof (union ib_gid)); 200 201 if (ret < 0) 202 n = n->rb_left; 203 else if (ret > 0) 204 n = n->rb_right; 205 else 206 return path; 207 } 208 209 return NULL; 210 } 211 212 static int __path_add(struct net_device *dev, struct ipoib_path *path) 213 { 214 struct ipoib_dev_priv *priv = netdev_priv(dev); 215 struct rb_node **n = &priv->path_tree.rb_node; 216 struct rb_node *pn = NULL; 217 struct ipoib_path *tpath; 218 int ret; 219 220 while (*n) { 221 pn = *n; 222 tpath = rb_entry(pn, struct ipoib_path, rb_node); 223 224 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 225 sizeof (union ib_gid)); 226 if (ret < 0) 227 n = &pn->rb_left; 228 else if (ret > 0) 229 n = &pn->rb_right; 230 else 231 return -EEXIST; 232 } 233 234 rb_link_node(&path->rb_node, pn, n); 235 rb_insert_color(&path->rb_node, &priv->path_tree); 236 237 list_add_tail(&path->list, &priv->path_list); 238 239 return 0; 240 } 241 242 static void path_free(struct net_device *dev, struct ipoib_path *path) 243 { 244 struct ipoib_dev_priv *priv = netdev_priv(dev); 245 struct ipoib_neigh *neigh, *tn; 246 struct sk_buff *skb; 247 unsigned long flags; 248 249 while ((skb = __skb_dequeue(&path->queue))) 250 dev_kfree_skb_irq(skb); 251 252 spin_lock_irqsave(&priv->lock, flags); 253 254 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 255 /* 256 * It's safe to call ipoib_put_ah() inside priv->lock 257 * here, because we know that path->ah will always 258 * hold one more reference, so ipoib_put_ah() will 259 * never do more than decrement the ref count. 260 */ 261 if (neigh->ah) 262 ipoib_put_ah(neigh->ah); 263 264 ipoib_neigh_free(neigh); 265 } 266 267 spin_unlock_irqrestore(&priv->lock, flags); 268 269 if (path->ah) 270 ipoib_put_ah(path->ah); 271 272 kfree(path); 273 } 274 275 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 276 277 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 278 { 279 struct ipoib_path_iter *iter; 280 281 iter = kmalloc(sizeof *iter, GFP_KERNEL); 282 if (!iter) 283 return NULL; 284 285 iter->dev = dev; 286 memset(iter->path.pathrec.dgid.raw, 0, 16); 287 288 if (ipoib_path_iter_next(iter)) { 289 kfree(iter); 290 return NULL; 291 } 292 293 return iter; 294 } 295 296 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 297 { 298 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); 299 struct rb_node *n; 300 struct ipoib_path *path; 301 int ret = 1; 302 303 spin_lock_irq(&priv->lock); 304 305 n = rb_first(&priv->path_tree); 306 307 while (n) { 308 path = rb_entry(n, struct ipoib_path, rb_node); 309 310 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 311 sizeof (union ib_gid)) < 0) { 312 iter->path = *path; 313 ret = 0; 314 break; 315 } 316 317 n = rb_next(n); 318 } 319 320 spin_unlock_irq(&priv->lock); 321 322 return ret; 323 } 324 325 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 326 struct ipoib_path *path) 327 { 328 *path = iter->path; 329 } 330 331 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 332 333 void ipoib_flush_paths(struct net_device *dev) 334 { 335 struct ipoib_dev_priv *priv = netdev_priv(dev); 336 struct ipoib_path *path, *tp; 337 LIST_HEAD(remove_list); 338 339 spin_lock_irq(&priv->lock); 340 341 list_splice(&priv->path_list, &remove_list); 342 INIT_LIST_HEAD(&priv->path_list); 343 344 list_for_each_entry(path, &remove_list, list) 345 rb_erase(&path->rb_node, &priv->path_tree); 346 347 list_for_each_entry_safe(path, tp, &remove_list, list) { 348 if (path->query) 349 ib_sa_cancel_query(path->query_id, path->query); 350 spin_unlock_irq(&priv->lock); 351 wait_for_completion(&path->done); 352 path_free(dev, path); 353 spin_lock_irq(&priv->lock); 354 } 355 spin_unlock_irq(&priv->lock); 356 } 357 358 static void path_rec_completion(int status, 359 struct ib_sa_path_rec *pathrec, 360 void *path_ptr) 361 { 362 struct ipoib_path *path = path_ptr; 363 struct net_device *dev = path->dev; 364 struct ipoib_dev_priv *priv = netdev_priv(dev); 365 struct ipoib_ah *ah = NULL; 366 struct ipoib_neigh *neigh; 367 struct sk_buff_head skqueue; 368 struct sk_buff *skb; 369 unsigned long flags; 370 371 if (pathrec) 372 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n", 373 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid)); 374 else 375 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n", 376 status, IPOIB_GID_ARG(path->pathrec.dgid)); 377 378 skb_queue_head_init(&skqueue); 379 380 if (!status) { 381 struct ib_ah_attr av = { 382 .dlid = be16_to_cpu(pathrec->dlid), 383 .sl = pathrec->sl, 384 .port_num = priv->port, 385 .static_rate = pathrec->rate 386 }; 387 388 ah = ipoib_create_ah(dev, priv->pd, &av); 389 } 390 391 spin_lock_irqsave(&priv->lock, flags); 392 393 path->ah = ah; 394 395 if (ah) { 396 path->pathrec = *pathrec; 397 398 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 399 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 400 401 while ((skb = __skb_dequeue(&path->queue))) 402 __skb_queue_tail(&skqueue, skb); 403 404 list_for_each_entry(neigh, &path->neigh_list, list) { 405 kref_get(&path->ah->ref); 406 neigh->ah = path->ah; 407 408 while ((skb = __skb_dequeue(&neigh->queue))) 409 __skb_queue_tail(&skqueue, skb); 410 } 411 } 412 413 path->query = NULL; 414 complete(&path->done); 415 416 spin_unlock_irqrestore(&priv->lock, flags); 417 418 while ((skb = __skb_dequeue(&skqueue))) { 419 skb->dev = dev; 420 if (dev_queue_xmit(skb)) 421 ipoib_warn(priv, "dev_queue_xmit failed " 422 "to requeue packet\n"); 423 } 424 } 425 426 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid) 427 { 428 struct ipoib_dev_priv *priv = netdev_priv(dev); 429 struct ipoib_path *path; 430 431 path = kzalloc(sizeof *path, GFP_ATOMIC); 432 if (!path) 433 return NULL; 434 435 path->dev = dev; 436 437 skb_queue_head_init(&path->queue); 438 439 INIT_LIST_HEAD(&path->neigh_list); 440 441 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid)); 442 path->pathrec.sgid = priv->local_gid; 443 path->pathrec.pkey = cpu_to_be16(priv->pkey); 444 path->pathrec.numb_path = 1; 445 446 return path; 447 } 448 449 static int path_rec_start(struct net_device *dev, 450 struct ipoib_path *path) 451 { 452 struct ipoib_dev_priv *priv = netdev_priv(dev); 453 454 ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n", 455 IPOIB_GID_ARG(path->pathrec.dgid)); 456 457 init_completion(&path->done); 458 459 path->query_id = 460 ib_sa_path_rec_get(priv->ca, priv->port, 461 &path->pathrec, 462 IB_SA_PATH_REC_DGID | 463 IB_SA_PATH_REC_SGID | 464 IB_SA_PATH_REC_NUMB_PATH | 465 IB_SA_PATH_REC_PKEY, 466 1000, GFP_ATOMIC, 467 path_rec_completion, 468 path, &path->query); 469 if (path->query_id < 0) { 470 ipoib_warn(priv, "ib_sa_path_rec_get failed\n"); 471 path->query = NULL; 472 return path->query_id; 473 } 474 475 return 0; 476 } 477 478 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) 479 { 480 struct ipoib_dev_priv *priv = netdev_priv(dev); 481 struct ipoib_path *path; 482 struct ipoib_neigh *neigh; 483 484 neigh = ipoib_neigh_alloc(skb->dst->neighbour); 485 if (!neigh) { 486 ++priv->stats.tx_dropped; 487 dev_kfree_skb_any(skb); 488 return; 489 } 490 491 skb_queue_head_init(&neigh->queue); 492 493 /* 494 * We can only be called from ipoib_start_xmit, so we're 495 * inside tx_lock -- no need to save/restore flags. 496 */ 497 spin_lock(&priv->lock); 498 499 path = __path_find(dev, skb->dst->neighbour->ha + 4); 500 if (!path) { 501 path = path_rec_create(dev, skb->dst->neighbour->ha + 4); 502 if (!path) 503 goto err_path; 504 505 __path_add(dev, path); 506 } 507 508 list_add_tail(&neigh->list, &path->neigh_list); 509 510 if (path->ah) { 511 kref_get(&path->ah->ref); 512 neigh->ah = path->ah; 513 514 ipoib_send(dev, skb, path->ah, 515 be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); 516 } else { 517 neigh->ah = NULL; 518 __skb_queue_tail(&neigh->queue, skb); 519 520 if (!path->query && path_rec_start(dev, path)) 521 goto err_list; 522 } 523 524 spin_unlock(&priv->lock); 525 return; 526 527 err_list: 528 list_del(&neigh->list); 529 530 err_path: 531 ipoib_neigh_free(neigh); 532 ++priv->stats.tx_dropped; 533 dev_kfree_skb_any(skb); 534 535 spin_unlock(&priv->lock); 536 } 537 538 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) 539 { 540 struct ipoib_dev_priv *priv = netdev_priv(skb->dev); 541 542 /* Look up path record for unicasts */ 543 if (skb->dst->neighbour->ha[4] != 0xff) { 544 neigh_add_path(skb, dev); 545 return; 546 } 547 548 /* Add in the P_Key for multicasts */ 549 skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff; 550 skb->dst->neighbour->ha[9] = priv->pkey & 0xff; 551 ipoib_mcast_send(dev, skb->dst->neighbour->ha + 4, skb); 552 } 553 554 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 555 struct ipoib_pseudoheader *phdr) 556 { 557 struct ipoib_dev_priv *priv = netdev_priv(dev); 558 struct ipoib_path *path; 559 560 /* 561 * We can only be called from ipoib_start_xmit, so we're 562 * inside tx_lock -- no need to save/restore flags. 563 */ 564 spin_lock(&priv->lock); 565 566 path = __path_find(dev, phdr->hwaddr + 4); 567 if (!path) { 568 path = path_rec_create(dev, phdr->hwaddr + 4); 569 if (path) { 570 /* put pseudoheader back on for next time */ 571 skb_push(skb, sizeof *phdr); 572 __skb_queue_tail(&path->queue, skb); 573 574 if (path_rec_start(dev, path)) { 575 spin_unlock(&priv->lock); 576 path_free(dev, path); 577 return; 578 } else 579 __path_add(dev, path); 580 } else { 581 ++priv->stats.tx_dropped; 582 dev_kfree_skb_any(skb); 583 } 584 585 spin_unlock(&priv->lock); 586 return; 587 } 588 589 if (path->ah) { 590 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 591 be16_to_cpu(path->pathrec.dlid)); 592 593 ipoib_send(dev, skb, path->ah, 594 be32_to_cpup((__be32 *) phdr->hwaddr)); 595 } else if ((path->query || !path_rec_start(dev, path)) && 596 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 597 /* put pseudoheader back on for next time */ 598 skb_push(skb, sizeof *phdr); 599 __skb_queue_tail(&path->queue, skb); 600 } else { 601 ++priv->stats.tx_dropped; 602 dev_kfree_skb_any(skb); 603 } 604 605 spin_unlock(&priv->lock); 606 } 607 608 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 609 { 610 struct ipoib_dev_priv *priv = netdev_priv(dev); 611 struct ipoib_neigh *neigh; 612 unsigned long flags; 613 614 if (!spin_trylock_irqsave(&priv->tx_lock, flags)) 615 return NETDEV_TX_LOCKED; 616 617 /* 618 * Check if our queue is stopped. Since we have the LLTX bit 619 * set, we can't rely on netif_stop_queue() preventing our 620 * xmit function from being called with a full queue. 621 */ 622 if (unlikely(netif_queue_stopped(dev))) { 623 spin_unlock_irqrestore(&priv->tx_lock, flags); 624 return NETDEV_TX_BUSY; 625 } 626 627 if (skb->dst && skb->dst->neighbour) { 628 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 629 ipoib_path_lookup(skb, dev); 630 goto out; 631 } 632 633 neigh = *to_ipoib_neigh(skb->dst->neighbour); 634 635 if (likely(neigh->ah)) { 636 ipoib_send(dev, skb, neigh->ah, 637 be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); 638 goto out; 639 } 640 641 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 642 spin_lock(&priv->lock); 643 __skb_queue_tail(&neigh->queue, skb); 644 spin_unlock(&priv->lock); 645 } else { 646 ++priv->stats.tx_dropped; 647 dev_kfree_skb_any(skb); 648 } 649 } else { 650 struct ipoib_pseudoheader *phdr = 651 (struct ipoib_pseudoheader *) skb->data; 652 skb_pull(skb, sizeof *phdr); 653 654 if (phdr->hwaddr[4] == 0xff) { 655 /* Add in the P_Key for multicast*/ 656 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; 657 phdr->hwaddr[9] = priv->pkey & 0xff; 658 659 ipoib_mcast_send(dev, phdr->hwaddr + 4, skb); 660 } else { 661 /* unicast GID -- should be ARP or RARP reply */ 662 663 if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) && 664 (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) { 665 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x " 666 IPOIB_GID_FMT "\n", 667 skb->dst ? "neigh" : "dst", 668 be16_to_cpup((__be16 *) skb->data), 669 be32_to_cpup((__be32 *) phdr->hwaddr), 670 IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); 671 dev_kfree_skb_any(skb); 672 ++priv->stats.tx_dropped; 673 goto out; 674 } 675 676 unicast_arp_send(skb, dev, phdr); 677 } 678 } 679 680 out: 681 spin_unlock_irqrestore(&priv->tx_lock, flags); 682 683 return NETDEV_TX_OK; 684 } 685 686 static struct net_device_stats *ipoib_get_stats(struct net_device *dev) 687 { 688 struct ipoib_dev_priv *priv = netdev_priv(dev); 689 690 return &priv->stats; 691 } 692 693 static void ipoib_timeout(struct net_device *dev) 694 { 695 struct ipoib_dev_priv *priv = netdev_priv(dev); 696 697 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 698 jiffies_to_msecs(jiffies - dev->trans_start)); 699 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 700 netif_queue_stopped(dev), 701 priv->tx_head, priv->tx_tail); 702 /* XXX reset QP, etc. */ 703 } 704 705 static int ipoib_hard_header(struct sk_buff *skb, 706 struct net_device *dev, 707 unsigned short type, 708 void *daddr, void *saddr, unsigned len) 709 { 710 struct ipoib_header *header; 711 712 header = (struct ipoib_header *) skb_push(skb, sizeof *header); 713 714 header->proto = htons(type); 715 header->reserved = 0; 716 717 /* 718 * If we don't have a neighbour structure, stuff the 719 * destination address onto the front of the skb so we can 720 * figure out where to send the packet later. 721 */ 722 if ((!skb->dst || !skb->dst->neighbour) && daddr) { 723 struct ipoib_pseudoheader *phdr = 724 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); 725 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); 726 } 727 728 return 0; 729 } 730 731 static void ipoib_set_mcast_list(struct net_device *dev) 732 { 733 struct ipoib_dev_priv *priv = netdev_priv(dev); 734 735 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 736 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 737 return; 738 } 739 740 queue_work(ipoib_workqueue, &priv->restart_task); 741 } 742 743 static void ipoib_neigh_destructor(struct neighbour *n) 744 { 745 struct ipoib_neigh *neigh; 746 struct ipoib_dev_priv *priv = netdev_priv(n->dev); 747 unsigned long flags; 748 struct ipoib_ah *ah = NULL; 749 750 ipoib_dbg(priv, 751 "neigh_destructor for %06x " IPOIB_GID_FMT "\n", 752 be32_to_cpup((__be32 *) n->ha), 753 IPOIB_GID_RAW_ARG(n->ha + 4)); 754 755 spin_lock_irqsave(&priv->lock, flags); 756 757 neigh = *to_ipoib_neigh(n); 758 if (neigh) { 759 if (neigh->ah) 760 ah = neigh->ah; 761 list_del(&neigh->list); 762 ipoib_neigh_free(neigh); 763 } 764 765 spin_unlock_irqrestore(&priv->lock, flags); 766 767 if (ah) 768 ipoib_put_ah(ah); 769 } 770 771 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour) 772 { 773 struct ipoib_neigh *neigh; 774 775 neigh = kmalloc(sizeof *neigh, GFP_ATOMIC); 776 if (!neigh) 777 return NULL; 778 779 neigh->neighbour = neighbour; 780 *to_ipoib_neigh(neighbour) = neigh; 781 782 return neigh; 783 } 784 785 void ipoib_neigh_free(struct ipoib_neigh *neigh) 786 { 787 *to_ipoib_neigh(neigh->neighbour) = NULL; 788 kfree(neigh); 789 } 790 791 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms) 792 { 793 parms->neigh_destructor = ipoib_neigh_destructor; 794 795 return 0; 796 } 797 798 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 799 { 800 struct ipoib_dev_priv *priv = netdev_priv(dev); 801 802 /* Allocate RX/TX "rings" to hold queued skbs */ 803 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 804 GFP_KERNEL); 805 if (!priv->rx_ring) { 806 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 807 ca->name, ipoib_recvq_size); 808 goto out; 809 } 810 811 priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, 812 GFP_KERNEL); 813 if (!priv->tx_ring) { 814 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 815 ca->name, ipoib_sendq_size); 816 goto out_rx_ring_cleanup; 817 } 818 819 /* priv->tx_head & tx_tail are already 0 */ 820 821 if (ipoib_ib_dev_init(dev, ca, port)) 822 goto out_tx_ring_cleanup; 823 824 return 0; 825 826 out_tx_ring_cleanup: 827 kfree(priv->tx_ring); 828 829 out_rx_ring_cleanup: 830 kfree(priv->rx_ring); 831 832 out: 833 return -ENOMEM; 834 } 835 836 void ipoib_dev_cleanup(struct net_device *dev) 837 { 838 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; 839 840 ipoib_delete_debug_files(dev); 841 842 /* Delete any child interfaces first */ 843 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 844 unregister_netdev(cpriv->dev); 845 ipoib_dev_cleanup(cpriv->dev); 846 free_netdev(cpriv->dev); 847 } 848 849 ipoib_ib_dev_cleanup(dev); 850 851 kfree(priv->rx_ring); 852 kfree(priv->tx_ring); 853 854 priv->rx_ring = NULL; 855 priv->tx_ring = NULL; 856 } 857 858 static void ipoib_setup(struct net_device *dev) 859 { 860 struct ipoib_dev_priv *priv = netdev_priv(dev); 861 862 dev->open = ipoib_open; 863 dev->stop = ipoib_stop; 864 dev->change_mtu = ipoib_change_mtu; 865 dev->hard_start_xmit = ipoib_start_xmit; 866 dev->get_stats = ipoib_get_stats; 867 dev->tx_timeout = ipoib_timeout; 868 dev->hard_header = ipoib_hard_header; 869 dev->set_multicast_list = ipoib_set_mcast_list; 870 dev->neigh_setup = ipoib_neigh_setup_dev; 871 872 dev->watchdog_timeo = HZ; 873 874 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 875 876 /* 877 * We add in INFINIBAND_ALEN to allow for the destination 878 * address "pseudoheader" for skbs without neighbour struct. 879 */ 880 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN; 881 dev->addr_len = INFINIBAND_ALEN; 882 dev->type = ARPHRD_INFINIBAND; 883 dev->tx_queue_len = ipoib_sendq_size * 2; 884 dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX; 885 886 /* MTU will be reset when mcast join happens */ 887 dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; 888 priv->mcast_mtu = priv->admin_mtu = dev->mtu; 889 890 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 891 892 netif_carrier_off(dev); 893 894 SET_MODULE_OWNER(dev); 895 896 priv->dev = dev; 897 898 spin_lock_init(&priv->lock); 899 spin_lock_init(&priv->tx_lock); 900 901 mutex_init(&priv->mcast_mutex); 902 mutex_init(&priv->vlan_mutex); 903 904 INIT_LIST_HEAD(&priv->path_list); 905 INIT_LIST_HEAD(&priv->child_intfs); 906 INIT_LIST_HEAD(&priv->dead_ahs); 907 INIT_LIST_HEAD(&priv->multicast_list); 908 909 INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev); 910 INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev); 911 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev); 912 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev); 913 INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev); 914 } 915 916 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) 917 { 918 struct net_device *dev; 919 920 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name, 921 ipoib_setup); 922 if (!dev) 923 return NULL; 924 925 return netdev_priv(dev); 926 } 927 928 static ssize_t show_pkey(struct class_device *cdev, char *buf) 929 { 930 struct ipoib_dev_priv *priv = 931 netdev_priv(container_of(cdev, struct net_device, class_dev)); 932 933 return sprintf(buf, "0x%04x\n", priv->pkey); 934 } 935 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 936 937 static ssize_t create_child(struct class_device *cdev, 938 const char *buf, size_t count) 939 { 940 int pkey; 941 int ret; 942 943 if (sscanf(buf, "%i", &pkey) != 1) 944 return -EINVAL; 945 946 if (pkey < 0 || pkey > 0xffff) 947 return -EINVAL; 948 949 /* 950 * Set the full membership bit, so that we join the right 951 * broadcast group, etc. 952 */ 953 pkey |= 0x8000; 954 955 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev), 956 pkey); 957 958 return ret ? ret : count; 959 } 960 static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child); 961 962 static ssize_t delete_child(struct class_device *cdev, 963 const char *buf, size_t count) 964 { 965 int pkey; 966 int ret; 967 968 if (sscanf(buf, "%i", &pkey) != 1) 969 return -EINVAL; 970 971 if (pkey < 0 || pkey > 0xffff) 972 return -EINVAL; 973 974 ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev), 975 pkey); 976 977 return ret ? ret : count; 978 979 } 980 static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child); 981 982 int ipoib_add_pkey_attr(struct net_device *dev) 983 { 984 return class_device_create_file(&dev->class_dev, 985 &class_device_attr_pkey); 986 } 987 988 static struct net_device *ipoib_add_port(const char *format, 989 struct ib_device *hca, u8 port) 990 { 991 struct ipoib_dev_priv *priv; 992 int result = -ENOMEM; 993 994 priv = ipoib_intf_alloc(format); 995 if (!priv) 996 goto alloc_mem_failed; 997 998 SET_NETDEV_DEV(priv->dev, hca->dma_device); 999 1000 result = ib_query_pkey(hca, port, 0, &priv->pkey); 1001 if (result) { 1002 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 1003 hca->name, port, result); 1004 goto alloc_mem_failed; 1005 } 1006 1007 /* 1008 * Set the full membership bit, so that we join the right 1009 * broadcast group, etc. 1010 */ 1011 priv->pkey |= 0x8000; 1012 1013 priv->dev->broadcast[8] = priv->pkey >> 8; 1014 priv->dev->broadcast[9] = priv->pkey & 0xff; 1015 1016 result = ib_query_gid(hca, port, 0, &priv->local_gid); 1017 if (result) { 1018 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 1019 hca->name, port, result); 1020 goto alloc_mem_failed; 1021 } else 1022 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1023 1024 1025 result = ipoib_dev_init(priv->dev, hca, port); 1026 if (result < 0) { 1027 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1028 hca->name, port, result); 1029 goto device_init_failed; 1030 } 1031 1032 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1033 priv->ca, ipoib_event); 1034 result = ib_register_event_handler(&priv->event_handler); 1035 if (result < 0) { 1036 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1037 "port %d (ret = %d)\n", 1038 hca->name, port, result); 1039 goto event_failed; 1040 } 1041 1042 result = register_netdev(priv->dev); 1043 if (result) { 1044 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1045 hca->name, port, result); 1046 goto register_failed; 1047 } 1048 1049 ipoib_create_debug_files(priv->dev); 1050 1051 if (ipoib_add_pkey_attr(priv->dev)) 1052 goto sysfs_failed; 1053 if (class_device_create_file(&priv->dev->class_dev, 1054 &class_device_attr_create_child)) 1055 goto sysfs_failed; 1056 if (class_device_create_file(&priv->dev->class_dev, 1057 &class_device_attr_delete_child)) 1058 goto sysfs_failed; 1059 1060 return priv->dev; 1061 1062 sysfs_failed: 1063 ipoib_delete_debug_files(priv->dev); 1064 unregister_netdev(priv->dev); 1065 1066 register_failed: 1067 ib_unregister_event_handler(&priv->event_handler); 1068 flush_scheduled_work(); 1069 1070 event_failed: 1071 ipoib_dev_cleanup(priv->dev); 1072 1073 device_init_failed: 1074 free_netdev(priv->dev); 1075 1076 alloc_mem_failed: 1077 return ERR_PTR(result); 1078 } 1079 1080 static void ipoib_add_one(struct ib_device *device) 1081 { 1082 struct list_head *dev_list; 1083 struct net_device *dev; 1084 struct ipoib_dev_priv *priv; 1085 int s, e, p; 1086 1087 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1088 if (!dev_list) 1089 return; 1090 1091 INIT_LIST_HEAD(dev_list); 1092 1093 if (device->node_type == IB_NODE_SWITCH) { 1094 s = 0; 1095 e = 0; 1096 } else { 1097 s = 1; 1098 e = device->phys_port_cnt; 1099 } 1100 1101 for (p = s; p <= e; ++p) { 1102 dev = ipoib_add_port("ib%d", device, p); 1103 if (!IS_ERR(dev)) { 1104 priv = netdev_priv(dev); 1105 list_add_tail(&priv->list, dev_list); 1106 } 1107 } 1108 1109 ib_set_client_data(device, &ipoib_client, dev_list); 1110 } 1111 1112 static void ipoib_remove_one(struct ib_device *device) 1113 { 1114 struct ipoib_dev_priv *priv, *tmp; 1115 struct list_head *dev_list; 1116 1117 dev_list = ib_get_client_data(device, &ipoib_client); 1118 1119 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1120 ib_unregister_event_handler(&priv->event_handler); 1121 flush_scheduled_work(); 1122 1123 unregister_netdev(priv->dev); 1124 ipoib_dev_cleanup(priv->dev); 1125 free_netdev(priv->dev); 1126 } 1127 1128 kfree(dev_list); 1129 } 1130 1131 static int __init ipoib_init_module(void) 1132 { 1133 int ret; 1134 1135 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1136 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1137 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1138 1139 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1140 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1141 ipoib_sendq_size = max(ipoib_sendq_size, IPOIB_MIN_QUEUE_SIZE); 1142 1143 ret = ipoib_register_debugfs(); 1144 if (ret) 1145 return ret; 1146 1147 /* 1148 * We create our own workqueue mainly because we want to be 1149 * able to flush it when devices are being removed. We can't 1150 * use schedule_work()/flush_scheduled_work() because both 1151 * unregister_netdev() and linkwatch_event take the rtnl lock, 1152 * so flush_scheduled_work() can deadlock during device 1153 * removal. 1154 */ 1155 ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1156 if (!ipoib_workqueue) { 1157 ret = -ENOMEM; 1158 goto err_fs; 1159 } 1160 1161 ret = ib_register_client(&ipoib_client); 1162 if (ret) 1163 goto err_wq; 1164 1165 return 0; 1166 1167 err_wq: 1168 destroy_workqueue(ipoib_workqueue); 1169 1170 err_fs: 1171 ipoib_unregister_debugfs(); 1172 1173 return ret; 1174 } 1175 1176 static void __exit ipoib_cleanup_module(void) 1177 { 1178 ib_unregister_client(&ipoib_client); 1179 ipoib_unregister_debugfs(); 1180 destroy_workqueue(ipoib_workqueue); 1181 } 1182 1183 module_init(ipoib_init_module); 1184 module_exit(ipoib_cleanup_module); 1185