1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $ 35 */ 36 37 #include "ipoib.h" 38 39 #include <linux/module.h> 40 41 #include <linux/init.h> 42 #include <linux/slab.h> 43 #include <linux/vmalloc.h> 44 #include <linux/kernel.h> 45 46 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 47 48 #include <linux/ip.h> 49 #include <linux/in.h> 50 51 #include <net/dst.h> 52 53 MODULE_AUTHOR("Roland Dreier"); 54 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 57 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; 58 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; 59 60 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 61 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 62 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 63 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 64 65 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 66 int ipoib_debug_level; 67 68 module_param_named(debug_level, ipoib_debug_level, int, 0644); 69 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 70 #endif 71 72 struct ipoib_path_iter { 73 struct net_device *dev; 74 struct ipoib_path path; 75 }; 76 77 static const u8 ipv4_bcast_addr[] = { 78 0x00, 0xff, 0xff, 0xff, 79 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 80 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 81 }; 82 83 struct workqueue_struct *ipoib_workqueue; 84 85 static void ipoib_add_one(struct ib_device *device); 86 static void ipoib_remove_one(struct ib_device *device); 87 88 static struct ib_client ipoib_client = { 89 .name = "ipoib", 90 .add = ipoib_add_one, 91 .remove = ipoib_remove_one 92 }; 93 94 int ipoib_open(struct net_device *dev) 95 { 96 struct ipoib_dev_priv *priv = netdev_priv(dev); 97 98 ipoib_dbg(priv, "bringing up interface\n"); 99 100 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 101 102 if (ipoib_pkey_dev_delay_open(dev)) 103 return 0; 104 105 if (ipoib_ib_dev_open(dev)) 106 return -EINVAL; 107 108 if (ipoib_ib_dev_up(dev)) { 109 ipoib_ib_dev_stop(dev); 110 return -EINVAL; 111 } 112 113 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 114 struct ipoib_dev_priv *cpriv; 115 116 /* Bring up any child interfaces too */ 117 mutex_lock(&priv->vlan_mutex); 118 list_for_each_entry(cpriv, &priv->child_intfs, list) { 119 int flags; 120 121 flags = cpriv->dev->flags; 122 if (flags & IFF_UP) 123 continue; 124 125 dev_change_flags(cpriv->dev, flags | IFF_UP); 126 } 127 mutex_unlock(&priv->vlan_mutex); 128 } 129 130 netif_start_queue(dev); 131 132 return 0; 133 } 134 135 static int ipoib_stop(struct net_device *dev) 136 { 137 struct ipoib_dev_priv *priv = netdev_priv(dev); 138 139 ipoib_dbg(priv, "stopping interface\n"); 140 141 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 142 143 netif_stop_queue(dev); 144 145 /* 146 * Now flush workqueue to make sure a scheduled task doesn't 147 * bring our internal state back up. 148 */ 149 flush_workqueue(ipoib_workqueue); 150 151 ipoib_ib_dev_down(dev, 1); 152 ipoib_ib_dev_stop(dev); 153 154 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 155 struct ipoib_dev_priv *cpriv; 156 157 /* Bring down any child interfaces too */ 158 mutex_lock(&priv->vlan_mutex); 159 list_for_each_entry(cpriv, &priv->child_intfs, list) { 160 int flags; 161 162 flags = cpriv->dev->flags; 163 if (!(flags & IFF_UP)) 164 continue; 165 166 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 167 } 168 mutex_unlock(&priv->vlan_mutex); 169 } 170 171 return 0; 172 } 173 174 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 175 { 176 struct ipoib_dev_priv *priv = netdev_priv(dev); 177 178 if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) 179 return -EINVAL; 180 181 priv->admin_mtu = new_mtu; 182 183 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 184 185 return 0; 186 } 187 188 static struct ipoib_path *__path_find(struct net_device *dev, 189 union ib_gid *gid) 190 { 191 struct ipoib_dev_priv *priv = netdev_priv(dev); 192 struct rb_node *n = priv->path_tree.rb_node; 193 struct ipoib_path *path; 194 int ret; 195 196 while (n) { 197 path = rb_entry(n, struct ipoib_path, rb_node); 198 199 ret = memcmp(gid->raw, path->pathrec.dgid.raw, 200 sizeof (union ib_gid)); 201 202 if (ret < 0) 203 n = n->rb_left; 204 else if (ret > 0) 205 n = n->rb_right; 206 else 207 return path; 208 } 209 210 return NULL; 211 } 212 213 static int __path_add(struct net_device *dev, struct ipoib_path *path) 214 { 215 struct ipoib_dev_priv *priv = netdev_priv(dev); 216 struct rb_node **n = &priv->path_tree.rb_node; 217 struct rb_node *pn = NULL; 218 struct ipoib_path *tpath; 219 int ret; 220 221 while (*n) { 222 pn = *n; 223 tpath = rb_entry(pn, struct ipoib_path, rb_node); 224 225 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 226 sizeof (union ib_gid)); 227 if (ret < 0) 228 n = &pn->rb_left; 229 else if (ret > 0) 230 n = &pn->rb_right; 231 else 232 return -EEXIST; 233 } 234 235 rb_link_node(&path->rb_node, pn, n); 236 rb_insert_color(&path->rb_node, &priv->path_tree); 237 238 list_add_tail(&path->list, &priv->path_list); 239 240 return 0; 241 } 242 243 static void path_free(struct net_device *dev, struct ipoib_path *path) 244 { 245 struct ipoib_dev_priv *priv = netdev_priv(dev); 246 struct ipoib_neigh *neigh, *tn; 247 struct sk_buff *skb; 248 unsigned long flags; 249 250 while ((skb = __skb_dequeue(&path->queue))) 251 dev_kfree_skb_irq(skb); 252 253 spin_lock_irqsave(&priv->lock, flags); 254 255 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 256 /* 257 * It's safe to call ipoib_put_ah() inside priv->lock 258 * here, because we know that path->ah will always 259 * hold one more reference, so ipoib_put_ah() will 260 * never do more than decrement the ref count. 261 */ 262 if (neigh->ah) 263 ipoib_put_ah(neigh->ah); 264 265 ipoib_neigh_free(neigh); 266 } 267 268 spin_unlock_irqrestore(&priv->lock, flags); 269 270 if (path->ah) 271 ipoib_put_ah(path->ah); 272 273 kfree(path); 274 } 275 276 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 277 278 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 279 { 280 struct ipoib_path_iter *iter; 281 282 iter = kmalloc(sizeof *iter, GFP_KERNEL); 283 if (!iter) 284 return NULL; 285 286 iter->dev = dev; 287 memset(iter->path.pathrec.dgid.raw, 0, 16); 288 289 if (ipoib_path_iter_next(iter)) { 290 kfree(iter); 291 return NULL; 292 } 293 294 return iter; 295 } 296 297 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 298 { 299 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); 300 struct rb_node *n; 301 struct ipoib_path *path; 302 int ret = 1; 303 304 spin_lock_irq(&priv->lock); 305 306 n = rb_first(&priv->path_tree); 307 308 while (n) { 309 path = rb_entry(n, struct ipoib_path, rb_node); 310 311 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 312 sizeof (union ib_gid)) < 0) { 313 iter->path = *path; 314 ret = 0; 315 break; 316 } 317 318 n = rb_next(n); 319 } 320 321 spin_unlock_irq(&priv->lock); 322 323 return ret; 324 } 325 326 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 327 struct ipoib_path *path) 328 { 329 *path = iter->path; 330 } 331 332 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 333 334 void ipoib_flush_paths(struct net_device *dev) 335 { 336 struct ipoib_dev_priv *priv = netdev_priv(dev); 337 struct ipoib_path *path, *tp; 338 LIST_HEAD(remove_list); 339 340 spin_lock_irq(&priv->lock); 341 342 list_splice(&priv->path_list, &remove_list); 343 INIT_LIST_HEAD(&priv->path_list); 344 345 list_for_each_entry(path, &remove_list, list) 346 rb_erase(&path->rb_node, &priv->path_tree); 347 348 list_for_each_entry_safe(path, tp, &remove_list, list) { 349 if (path->query) 350 ib_sa_cancel_query(path->query_id, path->query); 351 spin_unlock_irq(&priv->lock); 352 wait_for_completion(&path->done); 353 path_free(dev, path); 354 spin_lock_irq(&priv->lock); 355 } 356 spin_unlock_irq(&priv->lock); 357 } 358 359 static void path_rec_completion(int status, 360 struct ib_sa_path_rec *pathrec, 361 void *path_ptr) 362 { 363 struct ipoib_path *path = path_ptr; 364 struct net_device *dev = path->dev; 365 struct ipoib_dev_priv *priv = netdev_priv(dev); 366 struct ipoib_ah *ah = NULL; 367 struct ipoib_neigh *neigh; 368 struct sk_buff_head skqueue; 369 struct sk_buff *skb; 370 unsigned long flags; 371 372 if (pathrec) 373 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n", 374 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid)); 375 else 376 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n", 377 status, IPOIB_GID_ARG(path->pathrec.dgid)); 378 379 skb_queue_head_init(&skqueue); 380 381 if (!status) { 382 struct ib_ah_attr av = { 383 .dlid = be16_to_cpu(pathrec->dlid), 384 .sl = pathrec->sl, 385 .port_num = priv->port, 386 .static_rate = pathrec->rate 387 }; 388 389 ah = ipoib_create_ah(dev, priv->pd, &av); 390 } 391 392 spin_lock_irqsave(&priv->lock, flags); 393 394 path->ah = ah; 395 396 if (ah) { 397 path->pathrec = *pathrec; 398 399 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 400 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 401 402 while ((skb = __skb_dequeue(&path->queue))) 403 __skb_queue_tail(&skqueue, skb); 404 405 list_for_each_entry(neigh, &path->neigh_list, list) { 406 kref_get(&path->ah->ref); 407 neigh->ah = path->ah; 408 409 while ((skb = __skb_dequeue(&neigh->queue))) 410 __skb_queue_tail(&skqueue, skb); 411 } 412 } 413 414 path->query = NULL; 415 complete(&path->done); 416 417 spin_unlock_irqrestore(&priv->lock, flags); 418 419 while ((skb = __skb_dequeue(&skqueue))) { 420 skb->dev = dev; 421 if (dev_queue_xmit(skb)) 422 ipoib_warn(priv, "dev_queue_xmit failed " 423 "to requeue packet\n"); 424 } 425 } 426 427 static struct ipoib_path *path_rec_create(struct net_device *dev, 428 union ib_gid *gid) 429 { 430 struct ipoib_dev_priv *priv = netdev_priv(dev); 431 struct ipoib_path *path; 432 433 path = kzalloc(sizeof *path, GFP_ATOMIC); 434 if (!path) 435 return NULL; 436 437 path->dev = dev; 438 439 skb_queue_head_init(&path->queue); 440 441 INIT_LIST_HEAD(&path->neigh_list); 442 443 memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid)); 444 path->pathrec.sgid = priv->local_gid; 445 path->pathrec.pkey = cpu_to_be16(priv->pkey); 446 path->pathrec.numb_path = 1; 447 448 return path; 449 } 450 451 static int path_rec_start(struct net_device *dev, 452 struct ipoib_path *path) 453 { 454 struct ipoib_dev_priv *priv = netdev_priv(dev); 455 456 ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n", 457 IPOIB_GID_ARG(path->pathrec.dgid)); 458 459 init_completion(&path->done); 460 461 path->query_id = 462 ib_sa_path_rec_get(priv->ca, priv->port, 463 &path->pathrec, 464 IB_SA_PATH_REC_DGID | 465 IB_SA_PATH_REC_SGID | 466 IB_SA_PATH_REC_NUMB_PATH | 467 IB_SA_PATH_REC_PKEY, 468 1000, GFP_ATOMIC, 469 path_rec_completion, 470 path, &path->query); 471 if (path->query_id < 0) { 472 ipoib_warn(priv, "ib_sa_path_rec_get failed\n"); 473 path->query = NULL; 474 return path->query_id; 475 } 476 477 return 0; 478 } 479 480 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) 481 { 482 struct ipoib_dev_priv *priv = netdev_priv(dev); 483 struct ipoib_path *path; 484 struct ipoib_neigh *neigh; 485 486 neigh = ipoib_neigh_alloc(skb->dst->neighbour); 487 if (!neigh) { 488 ++priv->stats.tx_dropped; 489 dev_kfree_skb_any(skb); 490 return; 491 } 492 493 skb_queue_head_init(&neigh->queue); 494 495 /* 496 * We can only be called from ipoib_start_xmit, so we're 497 * inside tx_lock -- no need to save/restore flags. 498 */ 499 spin_lock(&priv->lock); 500 501 path = __path_find(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4)); 502 if (!path) { 503 path = path_rec_create(dev, 504 (union ib_gid *) (skb->dst->neighbour->ha + 4)); 505 if (!path) 506 goto err_path; 507 508 __path_add(dev, path); 509 } 510 511 list_add_tail(&neigh->list, &path->neigh_list); 512 513 if (path->ah) { 514 kref_get(&path->ah->ref); 515 neigh->ah = path->ah; 516 517 ipoib_send(dev, skb, path->ah, 518 be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); 519 } else { 520 neigh->ah = NULL; 521 __skb_queue_tail(&neigh->queue, skb); 522 523 if (!path->query && path_rec_start(dev, path)) 524 goto err_list; 525 } 526 527 spin_unlock(&priv->lock); 528 return; 529 530 err_list: 531 list_del(&neigh->list); 532 533 err_path: 534 ipoib_neigh_free(neigh); 535 ++priv->stats.tx_dropped; 536 dev_kfree_skb_any(skb); 537 538 spin_unlock(&priv->lock); 539 } 540 541 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) 542 { 543 struct ipoib_dev_priv *priv = netdev_priv(skb->dev); 544 545 /* Look up path record for unicasts */ 546 if (skb->dst->neighbour->ha[4] != 0xff) { 547 neigh_add_path(skb, dev); 548 return; 549 } 550 551 /* Add in the P_Key for multicasts */ 552 skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff; 553 skb->dst->neighbour->ha[9] = priv->pkey & 0xff; 554 ipoib_mcast_send(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4), skb); 555 } 556 557 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 558 struct ipoib_pseudoheader *phdr) 559 { 560 struct ipoib_dev_priv *priv = netdev_priv(dev); 561 struct ipoib_path *path; 562 563 /* 564 * We can only be called from ipoib_start_xmit, so we're 565 * inside tx_lock -- no need to save/restore flags. 566 */ 567 spin_lock(&priv->lock); 568 569 path = __path_find(dev, (union ib_gid *) (phdr->hwaddr + 4)); 570 if (!path) { 571 path = path_rec_create(dev, 572 (union ib_gid *) (phdr->hwaddr + 4)); 573 if (path) { 574 /* put pseudoheader back on for next time */ 575 skb_push(skb, sizeof *phdr); 576 __skb_queue_tail(&path->queue, skb); 577 578 if (path_rec_start(dev, path)) { 579 spin_unlock(&priv->lock); 580 path_free(dev, path); 581 return; 582 } else 583 __path_add(dev, path); 584 } else { 585 ++priv->stats.tx_dropped; 586 dev_kfree_skb_any(skb); 587 } 588 589 spin_unlock(&priv->lock); 590 return; 591 } 592 593 if (path->ah) { 594 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 595 be16_to_cpu(path->pathrec.dlid)); 596 597 ipoib_send(dev, skb, path->ah, 598 be32_to_cpup((__be32 *) phdr->hwaddr)); 599 } else if ((path->query || !path_rec_start(dev, path)) && 600 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 601 /* put pseudoheader back on for next time */ 602 skb_push(skb, sizeof *phdr); 603 __skb_queue_tail(&path->queue, skb); 604 } else { 605 ++priv->stats.tx_dropped; 606 dev_kfree_skb_any(skb); 607 } 608 609 spin_unlock(&priv->lock); 610 } 611 612 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 613 { 614 struct ipoib_dev_priv *priv = netdev_priv(dev); 615 struct ipoib_neigh *neigh; 616 unsigned long flags; 617 618 if (!spin_trylock_irqsave(&priv->tx_lock, flags)) 619 return NETDEV_TX_LOCKED; 620 621 /* 622 * Check if our queue is stopped. Since we have the LLTX bit 623 * set, we can't rely on netif_stop_queue() preventing our 624 * xmit function from being called with a full queue. 625 */ 626 if (unlikely(netif_queue_stopped(dev))) { 627 spin_unlock_irqrestore(&priv->tx_lock, flags); 628 return NETDEV_TX_BUSY; 629 } 630 631 if (skb->dst && skb->dst->neighbour) { 632 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 633 ipoib_path_lookup(skb, dev); 634 goto out; 635 } 636 637 neigh = *to_ipoib_neigh(skb->dst->neighbour); 638 639 if (likely(neigh->ah)) { 640 ipoib_send(dev, skb, neigh->ah, 641 be32_to_cpup((__be32 *) skb->dst->neighbour->ha)); 642 goto out; 643 } 644 645 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 646 spin_lock(&priv->lock); 647 __skb_queue_tail(&neigh->queue, skb); 648 spin_unlock(&priv->lock); 649 } else { 650 ++priv->stats.tx_dropped; 651 dev_kfree_skb_any(skb); 652 } 653 } else { 654 struct ipoib_pseudoheader *phdr = 655 (struct ipoib_pseudoheader *) skb->data; 656 skb_pull(skb, sizeof *phdr); 657 658 if (phdr->hwaddr[4] == 0xff) { 659 /* Add in the P_Key for multicast*/ 660 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; 661 phdr->hwaddr[9] = priv->pkey & 0xff; 662 663 ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb); 664 } else { 665 /* unicast GID -- should be ARP or RARP reply */ 666 667 if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) && 668 (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) { 669 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x " 670 IPOIB_GID_FMT "\n", 671 skb->dst ? "neigh" : "dst", 672 be16_to_cpup((__be16 *) skb->data), 673 be32_to_cpup((__be32 *) phdr->hwaddr), 674 IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4))); 675 dev_kfree_skb_any(skb); 676 ++priv->stats.tx_dropped; 677 goto out; 678 } 679 680 unicast_arp_send(skb, dev, phdr); 681 } 682 } 683 684 out: 685 spin_unlock_irqrestore(&priv->tx_lock, flags); 686 687 return NETDEV_TX_OK; 688 } 689 690 static struct net_device_stats *ipoib_get_stats(struct net_device *dev) 691 { 692 struct ipoib_dev_priv *priv = netdev_priv(dev); 693 694 return &priv->stats; 695 } 696 697 static void ipoib_timeout(struct net_device *dev) 698 { 699 struct ipoib_dev_priv *priv = netdev_priv(dev); 700 701 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 702 jiffies_to_msecs(jiffies - dev->trans_start)); 703 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 704 netif_queue_stopped(dev), 705 priv->tx_head, priv->tx_tail); 706 /* XXX reset QP, etc. */ 707 } 708 709 static int ipoib_hard_header(struct sk_buff *skb, 710 struct net_device *dev, 711 unsigned short type, 712 void *daddr, void *saddr, unsigned len) 713 { 714 struct ipoib_header *header; 715 716 header = (struct ipoib_header *) skb_push(skb, sizeof *header); 717 718 header->proto = htons(type); 719 header->reserved = 0; 720 721 /* 722 * If we don't have a neighbour structure, stuff the 723 * destination address onto the front of the skb so we can 724 * figure out where to send the packet later. 725 */ 726 if ((!skb->dst || !skb->dst->neighbour) && daddr) { 727 struct ipoib_pseudoheader *phdr = 728 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); 729 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); 730 } 731 732 return 0; 733 } 734 735 static void ipoib_set_mcast_list(struct net_device *dev) 736 { 737 struct ipoib_dev_priv *priv = netdev_priv(dev); 738 739 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 740 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 741 return; 742 } 743 744 queue_work(ipoib_workqueue, &priv->restart_task); 745 } 746 747 static void ipoib_neigh_destructor(struct neighbour *n) 748 { 749 struct ipoib_neigh *neigh; 750 struct ipoib_dev_priv *priv = netdev_priv(n->dev); 751 unsigned long flags; 752 struct ipoib_ah *ah = NULL; 753 754 ipoib_dbg(priv, 755 "neigh_destructor for %06x " IPOIB_GID_FMT "\n", 756 be32_to_cpup((__be32 *) n->ha), 757 IPOIB_GID_ARG(*((union ib_gid *) (n->ha + 4)))); 758 759 spin_lock_irqsave(&priv->lock, flags); 760 761 neigh = *to_ipoib_neigh(n); 762 if (neigh) { 763 if (neigh->ah) 764 ah = neigh->ah; 765 list_del(&neigh->list); 766 ipoib_neigh_free(neigh); 767 } 768 769 spin_unlock_irqrestore(&priv->lock, flags); 770 771 if (ah) 772 ipoib_put_ah(ah); 773 } 774 775 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour) 776 { 777 struct ipoib_neigh *neigh; 778 779 neigh = kmalloc(sizeof *neigh, GFP_ATOMIC); 780 if (!neigh) 781 return NULL; 782 783 neigh->neighbour = neighbour; 784 *to_ipoib_neigh(neighbour) = neigh; 785 786 return neigh; 787 } 788 789 void ipoib_neigh_free(struct ipoib_neigh *neigh) 790 { 791 *to_ipoib_neigh(neigh->neighbour) = NULL; 792 kfree(neigh); 793 } 794 795 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms) 796 { 797 parms->neigh_destructor = ipoib_neigh_destructor; 798 799 return 0; 800 } 801 802 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 803 { 804 struct ipoib_dev_priv *priv = netdev_priv(dev); 805 806 /* Allocate RX/TX "rings" to hold queued skbs */ 807 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 808 GFP_KERNEL); 809 if (!priv->rx_ring) { 810 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 811 ca->name, ipoib_recvq_size); 812 goto out; 813 } 814 815 priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, 816 GFP_KERNEL); 817 if (!priv->tx_ring) { 818 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 819 ca->name, ipoib_sendq_size); 820 goto out_rx_ring_cleanup; 821 } 822 823 /* priv->tx_head & tx_tail are already 0 */ 824 825 if (ipoib_ib_dev_init(dev, ca, port)) 826 goto out_tx_ring_cleanup; 827 828 return 0; 829 830 out_tx_ring_cleanup: 831 kfree(priv->tx_ring); 832 833 out_rx_ring_cleanup: 834 kfree(priv->rx_ring); 835 836 out: 837 return -ENOMEM; 838 } 839 840 void ipoib_dev_cleanup(struct net_device *dev) 841 { 842 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; 843 844 ipoib_delete_debug_files(dev); 845 846 /* Delete any child interfaces first */ 847 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 848 unregister_netdev(cpriv->dev); 849 ipoib_dev_cleanup(cpriv->dev); 850 free_netdev(cpriv->dev); 851 } 852 853 ipoib_ib_dev_cleanup(dev); 854 855 kfree(priv->rx_ring); 856 kfree(priv->tx_ring); 857 858 priv->rx_ring = NULL; 859 priv->tx_ring = NULL; 860 } 861 862 static void ipoib_setup(struct net_device *dev) 863 { 864 struct ipoib_dev_priv *priv = netdev_priv(dev); 865 866 dev->open = ipoib_open; 867 dev->stop = ipoib_stop; 868 dev->change_mtu = ipoib_change_mtu; 869 dev->hard_start_xmit = ipoib_start_xmit; 870 dev->get_stats = ipoib_get_stats; 871 dev->tx_timeout = ipoib_timeout; 872 dev->hard_header = ipoib_hard_header; 873 dev->set_multicast_list = ipoib_set_mcast_list; 874 dev->neigh_setup = ipoib_neigh_setup_dev; 875 876 dev->watchdog_timeo = HZ; 877 878 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 879 880 /* 881 * We add in INFINIBAND_ALEN to allow for the destination 882 * address "pseudoheader" for skbs without neighbour struct. 883 */ 884 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN; 885 dev->addr_len = INFINIBAND_ALEN; 886 dev->type = ARPHRD_INFINIBAND; 887 dev->tx_queue_len = ipoib_sendq_size * 2; 888 dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX; 889 890 /* MTU will be reset when mcast join happens */ 891 dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; 892 priv->mcast_mtu = priv->admin_mtu = dev->mtu; 893 894 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 895 896 netif_carrier_off(dev); 897 898 SET_MODULE_OWNER(dev); 899 900 priv->dev = dev; 901 902 spin_lock_init(&priv->lock); 903 spin_lock_init(&priv->tx_lock); 904 905 mutex_init(&priv->mcast_mutex); 906 mutex_init(&priv->vlan_mutex); 907 908 INIT_LIST_HEAD(&priv->path_list); 909 INIT_LIST_HEAD(&priv->child_intfs); 910 INIT_LIST_HEAD(&priv->dead_ahs); 911 INIT_LIST_HEAD(&priv->multicast_list); 912 913 INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev); 914 INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev); 915 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev); 916 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev); 917 INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev); 918 } 919 920 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) 921 { 922 struct net_device *dev; 923 924 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name, 925 ipoib_setup); 926 if (!dev) 927 return NULL; 928 929 return netdev_priv(dev); 930 } 931 932 static ssize_t show_pkey(struct class_device *cdev, char *buf) 933 { 934 struct ipoib_dev_priv *priv = 935 netdev_priv(container_of(cdev, struct net_device, class_dev)); 936 937 return sprintf(buf, "0x%04x\n", priv->pkey); 938 } 939 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 940 941 static ssize_t create_child(struct class_device *cdev, 942 const char *buf, size_t count) 943 { 944 int pkey; 945 int ret; 946 947 if (sscanf(buf, "%i", &pkey) != 1) 948 return -EINVAL; 949 950 if (pkey < 0 || pkey > 0xffff) 951 return -EINVAL; 952 953 /* 954 * Set the full membership bit, so that we join the right 955 * broadcast group, etc. 956 */ 957 pkey |= 0x8000; 958 959 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev), 960 pkey); 961 962 return ret ? ret : count; 963 } 964 static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child); 965 966 static ssize_t delete_child(struct class_device *cdev, 967 const char *buf, size_t count) 968 { 969 int pkey; 970 int ret; 971 972 if (sscanf(buf, "%i", &pkey) != 1) 973 return -EINVAL; 974 975 if (pkey < 0 || pkey > 0xffff) 976 return -EINVAL; 977 978 ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev), 979 pkey); 980 981 return ret ? ret : count; 982 983 } 984 static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child); 985 986 int ipoib_add_pkey_attr(struct net_device *dev) 987 { 988 return class_device_create_file(&dev->class_dev, 989 &class_device_attr_pkey); 990 } 991 992 static struct net_device *ipoib_add_port(const char *format, 993 struct ib_device *hca, u8 port) 994 { 995 struct ipoib_dev_priv *priv; 996 int result = -ENOMEM; 997 998 priv = ipoib_intf_alloc(format); 999 if (!priv) 1000 goto alloc_mem_failed; 1001 1002 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1003 1004 result = ib_query_pkey(hca, port, 0, &priv->pkey); 1005 if (result) { 1006 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 1007 hca->name, port, result); 1008 goto alloc_mem_failed; 1009 } 1010 1011 /* 1012 * Set the full membership bit, so that we join the right 1013 * broadcast group, etc. 1014 */ 1015 priv->pkey |= 0x8000; 1016 1017 priv->dev->broadcast[8] = priv->pkey >> 8; 1018 priv->dev->broadcast[9] = priv->pkey & 0xff; 1019 1020 result = ib_query_gid(hca, port, 0, &priv->local_gid); 1021 if (result) { 1022 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 1023 hca->name, port, result); 1024 goto alloc_mem_failed; 1025 } else 1026 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1027 1028 1029 result = ipoib_dev_init(priv->dev, hca, port); 1030 if (result < 0) { 1031 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1032 hca->name, port, result); 1033 goto device_init_failed; 1034 } 1035 1036 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1037 priv->ca, ipoib_event); 1038 result = ib_register_event_handler(&priv->event_handler); 1039 if (result < 0) { 1040 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1041 "port %d (ret = %d)\n", 1042 hca->name, port, result); 1043 goto event_failed; 1044 } 1045 1046 result = register_netdev(priv->dev); 1047 if (result) { 1048 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1049 hca->name, port, result); 1050 goto register_failed; 1051 } 1052 1053 ipoib_create_debug_files(priv->dev); 1054 1055 if (ipoib_add_pkey_attr(priv->dev)) 1056 goto sysfs_failed; 1057 if (class_device_create_file(&priv->dev->class_dev, 1058 &class_device_attr_create_child)) 1059 goto sysfs_failed; 1060 if (class_device_create_file(&priv->dev->class_dev, 1061 &class_device_attr_delete_child)) 1062 goto sysfs_failed; 1063 1064 return priv->dev; 1065 1066 sysfs_failed: 1067 ipoib_delete_debug_files(priv->dev); 1068 unregister_netdev(priv->dev); 1069 1070 register_failed: 1071 ib_unregister_event_handler(&priv->event_handler); 1072 flush_scheduled_work(); 1073 1074 event_failed: 1075 ipoib_dev_cleanup(priv->dev); 1076 1077 device_init_failed: 1078 free_netdev(priv->dev); 1079 1080 alloc_mem_failed: 1081 return ERR_PTR(result); 1082 } 1083 1084 static void ipoib_add_one(struct ib_device *device) 1085 { 1086 struct list_head *dev_list; 1087 struct net_device *dev; 1088 struct ipoib_dev_priv *priv; 1089 int s, e, p; 1090 1091 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1092 if (!dev_list) 1093 return; 1094 1095 INIT_LIST_HEAD(dev_list); 1096 1097 if (device->node_type == IB_NODE_SWITCH) { 1098 s = 0; 1099 e = 0; 1100 } else { 1101 s = 1; 1102 e = device->phys_port_cnt; 1103 } 1104 1105 for (p = s; p <= e; ++p) { 1106 dev = ipoib_add_port("ib%d", device, p); 1107 if (!IS_ERR(dev)) { 1108 priv = netdev_priv(dev); 1109 list_add_tail(&priv->list, dev_list); 1110 } 1111 } 1112 1113 ib_set_client_data(device, &ipoib_client, dev_list); 1114 } 1115 1116 static void ipoib_remove_one(struct ib_device *device) 1117 { 1118 struct ipoib_dev_priv *priv, *tmp; 1119 struct list_head *dev_list; 1120 1121 dev_list = ib_get_client_data(device, &ipoib_client); 1122 1123 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1124 ib_unregister_event_handler(&priv->event_handler); 1125 flush_scheduled_work(); 1126 1127 unregister_netdev(priv->dev); 1128 ipoib_dev_cleanup(priv->dev); 1129 free_netdev(priv->dev); 1130 } 1131 1132 kfree(dev_list); 1133 } 1134 1135 static int __init ipoib_init_module(void) 1136 { 1137 int ret; 1138 1139 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1140 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1141 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1142 1143 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1144 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1145 ipoib_sendq_size = max(ipoib_sendq_size, IPOIB_MIN_QUEUE_SIZE); 1146 1147 ret = ipoib_register_debugfs(); 1148 if (ret) 1149 return ret; 1150 1151 /* 1152 * We create our own workqueue mainly because we want to be 1153 * able to flush it when devices are being removed. We can't 1154 * use schedule_work()/flush_scheduled_work() because both 1155 * unregister_netdev() and linkwatch_event take the rtnl lock, 1156 * so flush_scheduled_work() can deadlock during device 1157 * removal. 1158 */ 1159 ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1160 if (!ipoib_workqueue) { 1161 ret = -ENOMEM; 1162 goto err_fs; 1163 } 1164 1165 ret = ib_register_client(&ipoib_client); 1166 if (ret) 1167 goto err_wq; 1168 1169 return 0; 1170 1171 err_wq: 1172 destroy_workqueue(ipoib_workqueue); 1173 1174 err_fs: 1175 ipoib_unregister_debugfs(); 1176 1177 return ret; 1178 } 1179 1180 static void __exit ipoib_cleanup_module(void) 1181 { 1182 ib_unregister_client(&ipoib_client); 1183 ipoib_unregister_debugfs(); 1184 destroy_workqueue(ipoib_workqueue); 1185 } 1186 1187 module_init(ipoib_init_module); 1188 module_exit(ipoib_cleanup_module); 1189