1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $ 35 */ 36 37 #include "ipoib.h" 38 39 #include <linux/module.h> 40 41 #include <linux/init.h> 42 #include <linux/slab.h> 43 #include <linux/vmalloc.h> 44 45 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 46 47 #include <linux/ip.h> 48 #include <linux/in.h> 49 50 #include <net/dst.h> 51 52 MODULE_AUTHOR("Roland Dreier"); 53 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 54 MODULE_LICENSE("Dual BSD/GPL"); 55 56 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 57 int ipoib_debug_level; 58 59 module_param_named(debug_level, ipoib_debug_level, int, 0644); 60 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 61 #endif 62 63 struct ipoib_path_iter { 64 struct net_device *dev; 65 struct ipoib_path path; 66 }; 67 68 static const u8 ipv4_bcast_addr[] = { 69 0x00, 0xff, 0xff, 0xff, 70 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 71 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 72 }; 73 74 struct workqueue_struct *ipoib_workqueue; 75 76 static void ipoib_add_one(struct ib_device *device); 77 static void ipoib_remove_one(struct ib_device *device); 78 79 static struct ib_client ipoib_client = { 80 .name = "ipoib", 81 .add = ipoib_add_one, 82 .remove = ipoib_remove_one 83 }; 84 85 int ipoib_open(struct net_device *dev) 86 { 87 struct ipoib_dev_priv *priv = netdev_priv(dev); 88 89 ipoib_dbg(priv, "bringing up interface\n"); 90 91 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 92 93 if (ipoib_pkey_dev_delay_open(dev)) 94 return 0; 95 96 if (ipoib_ib_dev_open(dev)) 97 return -EINVAL; 98 99 if (ipoib_ib_dev_up(dev)) { 100 ipoib_ib_dev_stop(dev); 101 return -EINVAL; 102 } 103 104 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 105 struct ipoib_dev_priv *cpriv; 106 107 /* Bring up any child interfaces too */ 108 mutex_lock(&priv->vlan_mutex); 109 list_for_each_entry(cpriv, &priv->child_intfs, list) { 110 int flags; 111 112 flags = cpriv->dev->flags; 113 if (flags & IFF_UP) 114 continue; 115 116 dev_change_flags(cpriv->dev, flags | IFF_UP); 117 } 118 mutex_unlock(&priv->vlan_mutex); 119 } 120 121 netif_start_queue(dev); 122 123 return 0; 124 } 125 126 static int ipoib_stop(struct net_device *dev) 127 { 128 struct ipoib_dev_priv *priv = netdev_priv(dev); 129 130 ipoib_dbg(priv, "stopping interface\n"); 131 132 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 133 134 netif_stop_queue(dev); 135 136 /* 137 * Now flush workqueue to make sure a scheduled task doesn't 138 * bring our internal state back up. 139 */ 140 flush_workqueue(ipoib_workqueue); 141 142 ipoib_ib_dev_down(dev, 1); 143 ipoib_ib_dev_stop(dev); 144 145 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 146 struct ipoib_dev_priv *cpriv; 147 148 /* Bring down any child interfaces too */ 149 mutex_lock(&priv->vlan_mutex); 150 list_for_each_entry(cpriv, &priv->child_intfs, list) { 151 int flags; 152 153 flags = cpriv->dev->flags; 154 if (!(flags & IFF_UP)) 155 continue; 156 157 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 158 } 159 mutex_unlock(&priv->vlan_mutex); 160 } 161 162 return 0; 163 } 164 165 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 166 { 167 struct ipoib_dev_priv *priv = netdev_priv(dev); 168 169 if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) 170 return -EINVAL; 171 172 priv->admin_mtu = new_mtu; 173 174 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 175 176 return 0; 177 } 178 179 static struct ipoib_path *__path_find(struct net_device *dev, 180 union ib_gid *gid) 181 { 182 struct ipoib_dev_priv *priv = netdev_priv(dev); 183 struct rb_node *n = priv->path_tree.rb_node; 184 struct ipoib_path *path; 185 int ret; 186 187 while (n) { 188 path = rb_entry(n, struct ipoib_path, rb_node); 189 190 ret = memcmp(gid->raw, path->pathrec.dgid.raw, 191 sizeof (union ib_gid)); 192 193 if (ret < 0) 194 n = n->rb_left; 195 else if (ret > 0) 196 n = n->rb_right; 197 else 198 return path; 199 } 200 201 return NULL; 202 } 203 204 static int __path_add(struct net_device *dev, struct ipoib_path *path) 205 { 206 struct ipoib_dev_priv *priv = netdev_priv(dev); 207 struct rb_node **n = &priv->path_tree.rb_node; 208 struct rb_node *pn = NULL; 209 struct ipoib_path *tpath; 210 int ret; 211 212 while (*n) { 213 pn = *n; 214 tpath = rb_entry(pn, struct ipoib_path, rb_node); 215 216 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 217 sizeof (union ib_gid)); 218 if (ret < 0) 219 n = &pn->rb_left; 220 else if (ret > 0) 221 n = &pn->rb_right; 222 else 223 return -EEXIST; 224 } 225 226 rb_link_node(&path->rb_node, pn, n); 227 rb_insert_color(&path->rb_node, &priv->path_tree); 228 229 list_add_tail(&path->list, &priv->path_list); 230 231 return 0; 232 } 233 234 static void path_free(struct net_device *dev, struct ipoib_path *path) 235 { 236 struct ipoib_dev_priv *priv = netdev_priv(dev); 237 struct ipoib_neigh *neigh, *tn; 238 struct sk_buff *skb; 239 unsigned long flags; 240 241 while ((skb = __skb_dequeue(&path->queue))) 242 dev_kfree_skb_irq(skb); 243 244 spin_lock_irqsave(&priv->lock, flags); 245 246 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 247 /* 248 * It's safe to call ipoib_put_ah() inside priv->lock 249 * here, because we know that path->ah will always 250 * hold one more reference, so ipoib_put_ah() will 251 * never do more than decrement the ref count. 252 */ 253 if (neigh->ah) 254 ipoib_put_ah(neigh->ah); 255 *to_ipoib_neigh(neigh->neighbour) = NULL; 256 kfree(neigh); 257 } 258 259 spin_unlock_irqrestore(&priv->lock, flags); 260 261 if (path->ah) 262 ipoib_put_ah(path->ah); 263 264 kfree(path); 265 } 266 267 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 268 269 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 270 { 271 struct ipoib_path_iter *iter; 272 273 iter = kmalloc(sizeof *iter, GFP_KERNEL); 274 if (!iter) 275 return NULL; 276 277 iter->dev = dev; 278 memset(iter->path.pathrec.dgid.raw, 0, 16); 279 280 if (ipoib_path_iter_next(iter)) { 281 kfree(iter); 282 return NULL; 283 } 284 285 return iter; 286 } 287 288 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 289 { 290 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); 291 struct rb_node *n; 292 struct ipoib_path *path; 293 int ret = 1; 294 295 spin_lock_irq(&priv->lock); 296 297 n = rb_first(&priv->path_tree); 298 299 while (n) { 300 path = rb_entry(n, struct ipoib_path, rb_node); 301 302 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 303 sizeof (union ib_gid)) < 0) { 304 iter->path = *path; 305 ret = 0; 306 break; 307 } 308 309 n = rb_next(n); 310 } 311 312 spin_unlock_irq(&priv->lock); 313 314 return ret; 315 } 316 317 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 318 struct ipoib_path *path) 319 { 320 *path = iter->path; 321 } 322 323 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 324 325 void ipoib_flush_paths(struct net_device *dev) 326 { 327 struct ipoib_dev_priv *priv = netdev_priv(dev); 328 struct ipoib_path *path, *tp; 329 LIST_HEAD(remove_list); 330 unsigned long flags; 331 332 spin_lock_irqsave(&priv->lock, flags); 333 334 list_splice(&priv->path_list, &remove_list); 335 INIT_LIST_HEAD(&priv->path_list); 336 337 list_for_each_entry(path, &remove_list, list) 338 rb_erase(&path->rb_node, &priv->path_tree); 339 340 spin_unlock_irqrestore(&priv->lock, flags); 341 342 list_for_each_entry_safe(path, tp, &remove_list, list) { 343 if (path->query) 344 ib_sa_cancel_query(path->query_id, path->query); 345 wait_for_completion(&path->done); 346 path_free(dev, path); 347 } 348 } 349 350 static void path_rec_completion(int status, 351 struct ib_sa_path_rec *pathrec, 352 void *path_ptr) 353 { 354 struct ipoib_path *path = path_ptr; 355 struct net_device *dev = path->dev; 356 struct ipoib_dev_priv *priv = netdev_priv(dev); 357 struct ipoib_ah *ah = NULL; 358 struct ipoib_neigh *neigh; 359 struct sk_buff_head skqueue; 360 struct sk_buff *skb; 361 unsigned long flags; 362 363 if (pathrec) 364 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n", 365 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid)); 366 else 367 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n", 368 status, IPOIB_GID_ARG(path->pathrec.dgid)); 369 370 skb_queue_head_init(&skqueue); 371 372 if (!status) { 373 struct ib_ah_attr av = { 374 .dlid = be16_to_cpu(pathrec->dlid), 375 .sl = pathrec->sl, 376 .port_num = priv->port 377 }; 378 int path_rate = ib_sa_rate_enum_to_int(pathrec->rate); 379 380 if (path_rate > 0 && priv->local_rate > path_rate) 381 av.static_rate = (priv->local_rate - 1) / path_rate; 382 383 ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n", 384 av.static_rate, priv->local_rate, 385 ib_sa_rate_enum_to_int(pathrec->rate)); 386 387 ah = ipoib_create_ah(dev, priv->pd, &av); 388 } 389 390 spin_lock_irqsave(&priv->lock, flags); 391 392 path->ah = ah; 393 394 if (ah) { 395 path->pathrec = *pathrec; 396 397 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 398 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 399 400 while ((skb = __skb_dequeue(&path->queue))) 401 __skb_queue_tail(&skqueue, skb); 402 403 list_for_each_entry(neigh, &path->neigh_list, list) { 404 kref_get(&path->ah->ref); 405 neigh->ah = path->ah; 406 407 while ((skb = __skb_dequeue(&neigh->queue))) 408 __skb_queue_tail(&skqueue, skb); 409 } 410 } 411 412 path->query = NULL; 413 complete(&path->done); 414 415 spin_unlock_irqrestore(&priv->lock, flags); 416 417 while ((skb = __skb_dequeue(&skqueue))) { 418 skb->dev = dev; 419 if (dev_queue_xmit(skb)) 420 ipoib_warn(priv, "dev_queue_xmit failed " 421 "to requeue packet\n"); 422 } 423 } 424 425 static struct ipoib_path *path_rec_create(struct net_device *dev, 426 union ib_gid *gid) 427 { 428 struct ipoib_dev_priv *priv = netdev_priv(dev); 429 struct ipoib_path *path; 430 431 path = kzalloc(sizeof *path, GFP_ATOMIC); 432 if (!path) 433 return NULL; 434 435 path->dev = dev; 436 437 skb_queue_head_init(&path->queue); 438 439 INIT_LIST_HEAD(&path->neigh_list); 440 441 memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid)); 442 path->pathrec.sgid = priv->local_gid; 443 path->pathrec.pkey = cpu_to_be16(priv->pkey); 444 path->pathrec.numb_path = 1; 445 446 return path; 447 } 448 449 static int path_rec_start(struct net_device *dev, 450 struct ipoib_path *path) 451 { 452 struct ipoib_dev_priv *priv = netdev_priv(dev); 453 454 ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n", 455 IPOIB_GID_ARG(path->pathrec.dgid)); 456 457 init_completion(&path->done); 458 459 path->query_id = 460 ib_sa_path_rec_get(priv->ca, priv->port, 461 &path->pathrec, 462 IB_SA_PATH_REC_DGID | 463 IB_SA_PATH_REC_SGID | 464 IB_SA_PATH_REC_NUMB_PATH | 465 IB_SA_PATH_REC_PKEY, 466 1000, GFP_ATOMIC, 467 path_rec_completion, 468 path, &path->query); 469 if (path->query_id < 0) { 470 ipoib_warn(priv, "ib_sa_path_rec_get failed\n"); 471 path->query = NULL; 472 return path->query_id; 473 } 474 475 return 0; 476 } 477 478 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) 479 { 480 struct ipoib_dev_priv *priv = netdev_priv(dev); 481 struct ipoib_path *path; 482 struct ipoib_neigh *neigh; 483 484 neigh = kmalloc(sizeof *neigh, GFP_ATOMIC); 485 if (!neigh) { 486 ++priv->stats.tx_dropped; 487 dev_kfree_skb_any(skb); 488 return; 489 } 490 491 skb_queue_head_init(&neigh->queue); 492 neigh->neighbour = skb->dst->neighbour; 493 *to_ipoib_neigh(skb->dst->neighbour) = neigh; 494 495 /* 496 * We can only be called from ipoib_start_xmit, so we're 497 * inside tx_lock -- no need to save/restore flags. 498 */ 499 spin_lock(&priv->lock); 500 501 path = __path_find(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4)); 502 if (!path) { 503 path = path_rec_create(dev, 504 (union ib_gid *) (skb->dst->neighbour->ha + 4)); 505 if (!path) 506 goto err; 507 508 __path_add(dev, path); 509 } 510 511 list_add_tail(&neigh->list, &path->neigh_list); 512 513 if (path->ah) { 514 kref_get(&path->ah->ref); 515 neigh->ah = path->ah; 516 517 ipoib_send(dev, skb, path->ah, 518 be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); 519 } else { 520 neigh->ah = NULL; 521 __skb_queue_tail(&neigh->queue, skb); 522 523 if (!path->query && path_rec_start(dev, path)) 524 goto err; 525 } 526 527 spin_unlock(&priv->lock); 528 return; 529 530 err: 531 *to_ipoib_neigh(skb->dst->neighbour) = NULL; 532 list_del(&neigh->list); 533 kfree(neigh); 534 535 ++priv->stats.tx_dropped; 536 dev_kfree_skb_any(skb); 537 538 spin_unlock(&priv->lock); 539 } 540 541 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) 542 { 543 struct ipoib_dev_priv *priv = netdev_priv(skb->dev); 544 545 /* Look up path record for unicasts */ 546 if (skb->dst->neighbour->ha[4] != 0xff) { 547 neigh_add_path(skb, dev); 548 return; 549 } 550 551 /* Add in the P_Key for multicasts */ 552 skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff; 553 skb->dst->neighbour->ha[9] = priv->pkey & 0xff; 554 ipoib_mcast_send(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4), skb); 555 } 556 557 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 558 struct ipoib_pseudoheader *phdr) 559 { 560 struct ipoib_dev_priv *priv = netdev_priv(dev); 561 struct ipoib_path *path; 562 563 /* 564 * We can only be called from ipoib_start_xmit, so we're 565 * inside tx_lock -- no need to save/restore flags. 566 */ 567 spin_lock(&priv->lock); 568 569 path = __path_find(dev, (union ib_gid *) (phdr->hwaddr + 4)); 570 if (!path) { 571 path = path_rec_create(dev, 572 (union ib_gid *) (phdr->hwaddr + 4)); 573 if (path) { 574 /* put pseudoheader back on for next time */ 575 skb_push(skb, sizeof *phdr); 576 __skb_queue_tail(&path->queue, skb); 577 578 if (path_rec_start(dev, path)) { 579 spin_unlock(&priv->lock); 580 path_free(dev, path); 581 return; 582 } else 583 __path_add(dev, path); 584 } else { 585 ++priv->stats.tx_dropped; 586 dev_kfree_skb_any(skb); 587 } 588 589 spin_unlock(&priv->lock); 590 return; 591 } 592 593 if (path->ah) { 594 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 595 be16_to_cpu(path->pathrec.dlid)); 596 597 ipoib_send(dev, skb, path->ah, 598 be32_to_cpup((__be32 *) phdr->hwaddr)); 599 } else if ((path->query || !path_rec_start(dev, path)) && 600 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 601 /* put pseudoheader back on for next time */ 602 skb_push(skb, sizeof *phdr); 603 __skb_queue_tail(&path->queue, skb); 604 } else { 605 ++priv->stats.tx_dropped; 606 dev_kfree_skb_any(skb); 607 } 608 609 spin_unlock(&priv->lock); 610 } 611 612 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 613 { 614 struct ipoib_dev_priv *priv = netdev_priv(dev); 615 struct ipoib_neigh *neigh; 616 unsigned long flags; 617 618 if (!spin_trylock_irqsave(&priv->tx_lock, flags)) 619 return NETDEV_TX_LOCKED; 620 621 /* 622 * Check if our queue is stopped. Since we have the LLTX bit 623 * set, we can't rely on netif_stop_queue() preventing our 624 * xmit function from being called with a full queue. 625 */ 626 if (unlikely(netif_queue_stopped(dev))) { 627 spin_unlock_irqrestore(&priv->tx_lock, flags); 628 return NETDEV_TX_BUSY; 629 } 630 631 if (skb->dst && skb->dst->neighbour) { 632 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 633 ipoib_path_lookup(skb, dev); 634 goto out; 635 } 636 637 neigh = *to_ipoib_neigh(skb->dst->neighbour); 638 639 if (likely(neigh->ah)) { 640 ipoib_send(dev, skb, neigh->ah, 641 be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); 642 goto out; 643 } 644 645 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 646 spin_lock(&priv->lock); 647 __skb_queue_tail(&neigh->queue, skb); 648 spin_unlock(&priv->lock); 649 } else { 650 ++priv->stats.tx_dropped; 651 dev_kfree_skb_any(skb); 652 } 653 } else { 654 struct ipoib_pseudoheader *phdr = 655 (struct ipoib_pseudoheader *) skb->data; 656 skb_pull(skb, sizeof *phdr); 657 658 if (phdr->hwaddr[4] == 0xff) { 659 /* Add in the P_Key for multicast*/ 660 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; 661 phdr->hwaddr[9] = priv->pkey & 0xff; 662 663 ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb); 664 } else { 665 /* unicast GID -- should be ARP or RARP reply */ 666 667 if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) && 668 (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) { 669 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x " 670 IPOIB_GID_FMT "\n", 671 skb->dst ? "neigh" : "dst", 672 be16_to_cpup((__be16 *) skb->data), 673 be32_to_cpup((__be32 *) phdr->hwaddr), 674 IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4))); 675 dev_kfree_skb_any(skb); 676 ++priv->stats.tx_dropped; 677 goto out; 678 } 679 680 unicast_arp_send(skb, dev, phdr); 681 } 682 } 683 684 out: 685 spin_unlock_irqrestore(&priv->tx_lock, flags); 686 687 return NETDEV_TX_OK; 688 } 689 690 static struct net_device_stats *ipoib_get_stats(struct net_device *dev) 691 { 692 struct ipoib_dev_priv *priv = netdev_priv(dev); 693 694 return &priv->stats; 695 } 696 697 static void ipoib_timeout(struct net_device *dev) 698 { 699 struct ipoib_dev_priv *priv = netdev_priv(dev); 700 701 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 702 jiffies_to_msecs(jiffies - dev->trans_start)); 703 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 704 netif_queue_stopped(dev), 705 priv->tx_head, priv->tx_tail); 706 /* XXX reset QP, etc. */ 707 } 708 709 static int ipoib_hard_header(struct sk_buff *skb, 710 struct net_device *dev, 711 unsigned short type, 712 void *daddr, void *saddr, unsigned len) 713 { 714 struct ipoib_header *header; 715 716 header = (struct ipoib_header *) skb_push(skb, sizeof *header); 717 718 header->proto = htons(type); 719 header->reserved = 0; 720 721 /* 722 * If we don't have a neighbour structure, stuff the 723 * destination address onto the front of the skb so we can 724 * figure out where to send the packet later. 725 */ 726 if ((!skb->dst || !skb->dst->neighbour) && daddr) { 727 struct ipoib_pseudoheader *phdr = 728 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); 729 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); 730 } 731 732 return 0; 733 } 734 735 static void ipoib_set_mcast_list(struct net_device *dev) 736 { 737 struct ipoib_dev_priv *priv = netdev_priv(dev); 738 739 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 740 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 741 return; 742 } 743 744 queue_work(ipoib_workqueue, &priv->restart_task); 745 } 746 747 static void ipoib_neigh_destructor(struct neighbour *n) 748 { 749 struct ipoib_neigh *neigh; 750 struct ipoib_dev_priv *priv = netdev_priv(n->dev); 751 unsigned long flags; 752 struct ipoib_ah *ah = NULL; 753 754 ipoib_dbg(priv, 755 "neigh_destructor for %06x " IPOIB_GID_FMT "\n", 756 be32_to_cpup((__be32 *) n->ha), 757 IPOIB_GID_ARG(*((union ib_gid *) (n->ha + 4)))); 758 759 spin_lock_irqsave(&priv->lock, flags); 760 761 neigh = *to_ipoib_neigh(n); 762 if (neigh) { 763 if (neigh->ah) 764 ah = neigh->ah; 765 list_del(&neigh->list); 766 *to_ipoib_neigh(n) = NULL; 767 kfree(neigh); 768 } 769 770 spin_unlock_irqrestore(&priv->lock, flags); 771 772 if (ah) 773 ipoib_put_ah(ah); 774 } 775 776 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms) 777 { 778 parms->neigh_destructor = ipoib_neigh_destructor; 779 780 return 0; 781 } 782 783 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 784 { 785 struct ipoib_dev_priv *priv = netdev_priv(dev); 786 787 /* Allocate RX/TX "rings" to hold queued skbs */ 788 789 priv->rx_ring = kzalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf), 790 GFP_KERNEL); 791 if (!priv->rx_ring) { 792 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 793 ca->name, IPOIB_RX_RING_SIZE); 794 goto out; 795 } 796 797 priv->tx_ring = kzalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf), 798 GFP_KERNEL); 799 if (!priv->tx_ring) { 800 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 801 ca->name, IPOIB_TX_RING_SIZE); 802 goto out_rx_ring_cleanup; 803 } 804 805 /* priv->tx_head & tx_tail are already 0 */ 806 807 if (ipoib_ib_dev_init(dev, ca, port)) 808 goto out_tx_ring_cleanup; 809 810 return 0; 811 812 out_tx_ring_cleanup: 813 kfree(priv->tx_ring); 814 815 out_rx_ring_cleanup: 816 kfree(priv->rx_ring); 817 818 out: 819 return -ENOMEM; 820 } 821 822 void ipoib_dev_cleanup(struct net_device *dev) 823 { 824 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; 825 826 ipoib_delete_debug_files(dev); 827 828 /* Delete any child interfaces first */ 829 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 830 unregister_netdev(cpriv->dev); 831 ipoib_dev_cleanup(cpriv->dev); 832 free_netdev(cpriv->dev); 833 } 834 835 ipoib_ib_dev_cleanup(dev); 836 837 kfree(priv->rx_ring); 838 kfree(priv->tx_ring); 839 840 priv->rx_ring = NULL; 841 priv->tx_ring = NULL; 842 } 843 844 static void ipoib_setup(struct net_device *dev) 845 { 846 struct ipoib_dev_priv *priv = netdev_priv(dev); 847 848 dev->open = ipoib_open; 849 dev->stop = ipoib_stop; 850 dev->change_mtu = ipoib_change_mtu; 851 dev->hard_start_xmit = ipoib_start_xmit; 852 dev->get_stats = ipoib_get_stats; 853 dev->tx_timeout = ipoib_timeout; 854 dev->hard_header = ipoib_hard_header; 855 dev->set_multicast_list = ipoib_set_mcast_list; 856 dev->neigh_setup = ipoib_neigh_setup_dev; 857 858 dev->watchdog_timeo = HZ; 859 860 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 861 862 /* 863 * We add in INFINIBAND_ALEN to allow for the destination 864 * address "pseudoheader" for skbs without neighbour struct. 865 */ 866 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN; 867 dev->addr_len = INFINIBAND_ALEN; 868 dev->type = ARPHRD_INFINIBAND; 869 dev->tx_queue_len = IPOIB_TX_RING_SIZE * 2; 870 dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX; 871 872 /* MTU will be reset when mcast join happens */ 873 dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; 874 priv->mcast_mtu = priv->admin_mtu = dev->mtu; 875 876 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 877 878 netif_carrier_off(dev); 879 880 SET_MODULE_OWNER(dev); 881 882 priv->dev = dev; 883 884 spin_lock_init(&priv->lock); 885 spin_lock_init(&priv->tx_lock); 886 887 mutex_init(&priv->mcast_mutex); 888 mutex_init(&priv->vlan_mutex); 889 890 INIT_LIST_HEAD(&priv->path_list); 891 INIT_LIST_HEAD(&priv->child_intfs); 892 INIT_LIST_HEAD(&priv->dead_ahs); 893 INIT_LIST_HEAD(&priv->multicast_list); 894 895 INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev); 896 INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev); 897 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev); 898 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev); 899 INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev); 900 } 901 902 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) 903 { 904 struct net_device *dev; 905 906 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name, 907 ipoib_setup); 908 if (!dev) 909 return NULL; 910 911 return netdev_priv(dev); 912 } 913 914 static ssize_t show_pkey(struct class_device *cdev, char *buf) 915 { 916 struct ipoib_dev_priv *priv = 917 netdev_priv(container_of(cdev, struct net_device, class_dev)); 918 919 return sprintf(buf, "0x%04x\n", priv->pkey); 920 } 921 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 922 923 static ssize_t create_child(struct class_device *cdev, 924 const char *buf, size_t count) 925 { 926 int pkey; 927 int ret; 928 929 if (sscanf(buf, "%i", &pkey) != 1) 930 return -EINVAL; 931 932 if (pkey < 0 || pkey > 0xffff) 933 return -EINVAL; 934 935 /* 936 * Set the full membership bit, so that we join the right 937 * broadcast group, etc. 938 */ 939 pkey |= 0x8000; 940 941 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev), 942 pkey); 943 944 return ret ? ret : count; 945 } 946 static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child); 947 948 static ssize_t delete_child(struct class_device *cdev, 949 const char *buf, size_t count) 950 { 951 int pkey; 952 int ret; 953 954 if (sscanf(buf, "%i", &pkey) != 1) 955 return -EINVAL; 956 957 if (pkey < 0 || pkey > 0xffff) 958 return -EINVAL; 959 960 ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev), 961 pkey); 962 963 return ret ? ret : count; 964 965 } 966 static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child); 967 968 int ipoib_add_pkey_attr(struct net_device *dev) 969 { 970 return class_device_create_file(&dev->class_dev, 971 &class_device_attr_pkey); 972 } 973 974 static struct net_device *ipoib_add_port(const char *format, 975 struct ib_device *hca, u8 port) 976 { 977 struct ipoib_dev_priv *priv; 978 int result = -ENOMEM; 979 980 priv = ipoib_intf_alloc(format); 981 if (!priv) 982 goto alloc_mem_failed; 983 984 SET_NETDEV_DEV(priv->dev, hca->dma_device); 985 986 result = ib_query_pkey(hca, port, 0, &priv->pkey); 987 if (result) { 988 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 989 hca->name, port, result); 990 goto alloc_mem_failed; 991 } 992 993 /* 994 * Set the full membership bit, so that we join the right 995 * broadcast group, etc. 996 */ 997 priv->pkey |= 0x8000; 998 999 priv->dev->broadcast[8] = priv->pkey >> 8; 1000 priv->dev->broadcast[9] = priv->pkey & 0xff; 1001 1002 result = ib_query_gid(hca, port, 0, &priv->local_gid); 1003 if (result) { 1004 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 1005 hca->name, port, result); 1006 goto alloc_mem_failed; 1007 } else 1008 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1009 1010 1011 result = ipoib_dev_init(priv->dev, hca, port); 1012 if (result < 0) { 1013 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1014 hca->name, port, result); 1015 goto device_init_failed; 1016 } 1017 1018 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1019 priv->ca, ipoib_event); 1020 result = ib_register_event_handler(&priv->event_handler); 1021 if (result < 0) { 1022 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1023 "port %d (ret = %d)\n", 1024 hca->name, port, result); 1025 goto event_failed; 1026 } 1027 1028 result = register_netdev(priv->dev); 1029 if (result) { 1030 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1031 hca->name, port, result); 1032 goto register_failed; 1033 } 1034 1035 ipoib_create_debug_files(priv->dev); 1036 1037 if (ipoib_add_pkey_attr(priv->dev)) 1038 goto sysfs_failed; 1039 if (class_device_create_file(&priv->dev->class_dev, 1040 &class_device_attr_create_child)) 1041 goto sysfs_failed; 1042 if (class_device_create_file(&priv->dev->class_dev, 1043 &class_device_attr_delete_child)) 1044 goto sysfs_failed; 1045 1046 return priv->dev; 1047 1048 sysfs_failed: 1049 ipoib_delete_debug_files(priv->dev); 1050 unregister_netdev(priv->dev); 1051 1052 register_failed: 1053 ib_unregister_event_handler(&priv->event_handler); 1054 flush_scheduled_work(); 1055 1056 event_failed: 1057 ipoib_dev_cleanup(priv->dev); 1058 1059 device_init_failed: 1060 free_netdev(priv->dev); 1061 1062 alloc_mem_failed: 1063 return ERR_PTR(result); 1064 } 1065 1066 static void ipoib_add_one(struct ib_device *device) 1067 { 1068 struct list_head *dev_list; 1069 struct net_device *dev; 1070 struct ipoib_dev_priv *priv; 1071 int s, e, p; 1072 1073 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1074 if (!dev_list) 1075 return; 1076 1077 INIT_LIST_HEAD(dev_list); 1078 1079 if (device->node_type == IB_NODE_SWITCH) { 1080 s = 0; 1081 e = 0; 1082 } else { 1083 s = 1; 1084 e = device->phys_port_cnt; 1085 } 1086 1087 for (p = s; p <= e; ++p) { 1088 dev = ipoib_add_port("ib%d", device, p); 1089 if (!IS_ERR(dev)) { 1090 priv = netdev_priv(dev); 1091 list_add_tail(&priv->list, dev_list); 1092 } 1093 } 1094 1095 ib_set_client_data(device, &ipoib_client, dev_list); 1096 } 1097 1098 static void ipoib_remove_one(struct ib_device *device) 1099 { 1100 struct ipoib_dev_priv *priv, *tmp; 1101 struct list_head *dev_list; 1102 1103 dev_list = ib_get_client_data(device, &ipoib_client); 1104 1105 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1106 ib_unregister_event_handler(&priv->event_handler); 1107 flush_scheduled_work(); 1108 1109 unregister_netdev(priv->dev); 1110 ipoib_dev_cleanup(priv->dev); 1111 free_netdev(priv->dev); 1112 } 1113 1114 kfree(dev_list); 1115 } 1116 1117 static int __init ipoib_init_module(void) 1118 { 1119 int ret; 1120 1121 ret = ipoib_register_debugfs(); 1122 if (ret) 1123 return ret; 1124 1125 /* 1126 * We create our own workqueue mainly because we want to be 1127 * able to flush it when devices are being removed. We can't 1128 * use schedule_work()/flush_scheduled_work() because both 1129 * unregister_netdev() and linkwatch_event take the rtnl lock, 1130 * so flush_scheduled_work() can deadlock during device 1131 * removal. 1132 */ 1133 ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1134 if (!ipoib_workqueue) { 1135 ret = -ENOMEM; 1136 goto err_fs; 1137 } 1138 1139 ret = ib_register_client(&ipoib_client); 1140 if (ret) 1141 goto err_wq; 1142 1143 return 0; 1144 1145 err_wq: 1146 destroy_workqueue(ipoib_workqueue); 1147 1148 err_fs: 1149 ipoib_unregister_debugfs(); 1150 1151 return ret; 1152 } 1153 1154 static void __exit ipoib_cleanup_module(void) 1155 { 1156 ib_unregister_client(&ipoib_client); 1157 ipoib_unregister_debugfs(); 1158 destroy_workqueue(ipoib_workqueue); 1159 } 1160 1161 module_init(ipoib_init_module); 1162 module_exit(ipoib_cleanup_module); 1163