1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * $Id: ipoib_multicast.c 1362 2004-12-18 15:56:29Z roland $ 35 */ 36 37 #include <linux/skbuff.h> 38 #include <linux/rtnetlink.h> 39 #include <linux/ip.h> 40 #include <linux/in.h> 41 #include <linux/igmp.h> 42 #include <linux/inetdevice.h> 43 #include <linux/delay.h> 44 #include <linux/completion.h> 45 46 #include <net/dst.h> 47 48 #include "ipoib.h" 49 50 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 51 static int mcast_debug_level; 52 53 module_param(mcast_debug_level, int, 0644); 54 MODULE_PARM_DESC(mcast_debug_level, 55 "Enable multicast debug tracing if > 0"); 56 #endif 57 58 static DEFINE_MUTEX(mcast_mutex); 59 60 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ 61 struct ipoib_mcast { 62 struct ib_sa_mcmember_rec mcmember; 63 struct ib_sa_multicast *mc; 64 struct ipoib_ah *ah; 65 66 struct rb_node rb_node; 67 struct list_head list; 68 69 unsigned long created; 70 unsigned long backoff; 71 72 unsigned long flags; 73 unsigned char logcount; 74 75 struct list_head neigh_list; 76 77 struct sk_buff_head pkt_queue; 78 79 struct net_device *dev; 80 }; 81 82 struct ipoib_mcast_iter { 83 struct net_device *dev; 84 union ib_gid mgid; 85 unsigned long created; 86 unsigned int queuelen; 87 unsigned int complete; 88 unsigned int send_only; 89 }; 90 91 static void ipoib_mcast_free(struct ipoib_mcast *mcast) 92 { 93 struct net_device *dev = mcast->dev; 94 struct ipoib_dev_priv *priv = netdev_priv(dev); 95 struct ipoib_neigh *neigh, *tmp; 96 unsigned long flags; 97 int tx_dropped = 0; 98 99 ipoib_dbg_mcast(netdev_priv(dev), 100 "deleting multicast group " IPOIB_GID_FMT "\n", 101 IPOIB_GID_ARG(mcast->mcmember.mgid)); 102 103 spin_lock_irqsave(&priv->lock, flags); 104 105 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { 106 /* 107 * It's safe to call ipoib_put_ah() inside priv->lock 108 * here, because we know that mcast->ah will always 109 * hold one more reference, so ipoib_put_ah() will 110 * never do more than decrement the ref count. 111 */ 112 if (neigh->ah) 113 ipoib_put_ah(neigh->ah); 114 ipoib_neigh_free(dev, neigh); 115 } 116 117 spin_unlock_irqrestore(&priv->lock, flags); 118 119 if (mcast->ah) 120 ipoib_put_ah(mcast->ah); 121 122 while (!skb_queue_empty(&mcast->pkt_queue)) { 123 ++tx_dropped; 124 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 125 } 126 127 spin_lock_irqsave(&priv->tx_lock, flags); 128 priv->stats.tx_dropped += tx_dropped; 129 spin_unlock_irqrestore(&priv->tx_lock, flags); 130 131 kfree(mcast); 132 } 133 134 static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, 135 int can_sleep) 136 { 137 struct ipoib_mcast *mcast; 138 139 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC); 140 if (!mcast) 141 return NULL; 142 143 mcast->dev = dev; 144 mcast->created = jiffies; 145 mcast->backoff = 1; 146 147 INIT_LIST_HEAD(&mcast->list); 148 INIT_LIST_HEAD(&mcast->neigh_list); 149 skb_queue_head_init(&mcast->pkt_queue); 150 151 return mcast; 152 } 153 154 static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) 155 { 156 struct ipoib_dev_priv *priv = netdev_priv(dev); 157 struct rb_node *n = priv->multicast_tree.rb_node; 158 159 while (n) { 160 struct ipoib_mcast *mcast; 161 int ret; 162 163 mcast = rb_entry(n, struct ipoib_mcast, rb_node); 164 165 ret = memcmp(mgid, mcast->mcmember.mgid.raw, 166 sizeof (union ib_gid)); 167 if (ret < 0) 168 n = n->rb_left; 169 else if (ret > 0) 170 n = n->rb_right; 171 else 172 return mcast; 173 } 174 175 return NULL; 176 } 177 178 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast) 179 { 180 struct ipoib_dev_priv *priv = netdev_priv(dev); 181 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL; 182 183 while (*n) { 184 struct ipoib_mcast *tmcast; 185 int ret; 186 187 pn = *n; 188 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node); 189 190 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw, 191 sizeof (union ib_gid)); 192 if (ret < 0) 193 n = &pn->rb_left; 194 else if (ret > 0) 195 n = &pn->rb_right; 196 else 197 return -EEXIST; 198 } 199 200 rb_link_node(&mcast->rb_node, pn, n); 201 rb_insert_color(&mcast->rb_node, &priv->multicast_tree); 202 203 return 0; 204 } 205 206 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, 207 struct ib_sa_mcmember_rec *mcmember) 208 { 209 struct net_device *dev = mcast->dev; 210 struct ipoib_dev_priv *priv = netdev_priv(dev); 211 struct ipoib_ah *ah; 212 int ret; 213 214 mcast->mcmember = *mcmember; 215 216 /* Set the cached Q_Key before we attach if it's the broadcast group */ 217 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, 218 sizeof (union ib_gid))) { 219 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); 220 priv->tx_wr.wr.ud.remote_qkey = priv->qkey; 221 } 222 223 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 224 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 225 ipoib_warn(priv, "multicast group " IPOIB_GID_FMT 226 " already attached\n", 227 IPOIB_GID_ARG(mcast->mcmember.mgid)); 228 229 return 0; 230 } 231 232 ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid), 233 &mcast->mcmember.mgid); 234 if (ret < 0) { 235 ipoib_warn(priv, "couldn't attach QP to multicast group " 236 IPOIB_GID_FMT "\n", 237 IPOIB_GID_ARG(mcast->mcmember.mgid)); 238 239 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags); 240 return ret; 241 } 242 } 243 244 { 245 struct ib_ah_attr av = { 246 .dlid = be16_to_cpu(mcast->mcmember.mlid), 247 .port_num = priv->port, 248 .sl = mcast->mcmember.sl, 249 .ah_flags = IB_AH_GRH, 250 .static_rate = mcast->mcmember.rate, 251 .grh = { 252 .flow_label = be32_to_cpu(mcast->mcmember.flow_label), 253 .hop_limit = mcast->mcmember.hop_limit, 254 .sgid_index = 0, 255 .traffic_class = mcast->mcmember.traffic_class 256 } 257 }; 258 av.grh.dgid = mcast->mcmember.mgid; 259 260 ah = ipoib_create_ah(dev, priv->pd, &av); 261 if (!ah) { 262 ipoib_warn(priv, "ib_address_create failed\n"); 263 } else { 264 spin_lock_irq(&priv->lock); 265 mcast->ah = ah; 266 spin_unlock_irq(&priv->lock); 267 268 ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT 269 " AV %p, LID 0x%04x, SL %d\n", 270 IPOIB_GID_ARG(mcast->mcmember.mgid), 271 mcast->ah->ah, 272 be16_to_cpu(mcast->mcmember.mlid), 273 mcast->mcmember.sl); 274 } 275 } 276 277 /* actually send any queued packets */ 278 spin_lock_irq(&priv->tx_lock); 279 while (!skb_queue_empty(&mcast->pkt_queue)) { 280 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); 281 spin_unlock_irq(&priv->tx_lock); 282 283 skb->dev = dev; 284 285 if (!skb->dst || !skb->dst->neighbour) { 286 /* put pseudoheader back on for next time */ 287 skb_push(skb, sizeof (struct ipoib_pseudoheader)); 288 } 289 290 if (dev_queue_xmit(skb)) 291 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); 292 spin_lock_irq(&priv->tx_lock); 293 } 294 spin_unlock_irq(&priv->tx_lock); 295 296 return 0; 297 } 298 299 static int 300 ipoib_mcast_sendonly_join_complete(int status, 301 struct ib_sa_multicast *multicast) 302 { 303 struct ipoib_mcast *mcast = multicast->context; 304 struct net_device *dev = mcast->dev; 305 struct ipoib_dev_priv *priv = netdev_priv(dev); 306 307 /* We trap for port events ourselves. */ 308 if (status == -ENETRESET) 309 return 0; 310 311 if (!status) 312 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 313 314 if (status) { 315 if (mcast->logcount++ < 20) 316 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for " 317 IPOIB_GID_FMT ", status %d\n", 318 IPOIB_GID_ARG(mcast->mcmember.mgid), status); 319 320 /* Flush out any queued packets */ 321 spin_lock_irq(&priv->tx_lock); 322 while (!skb_queue_empty(&mcast->pkt_queue)) { 323 ++priv->stats.tx_dropped; 324 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 325 } 326 spin_unlock_irq(&priv->tx_lock); 327 328 /* Clear the busy flag so we try again */ 329 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, 330 &mcast->flags); 331 } 332 return status; 333 } 334 335 static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) 336 { 337 struct net_device *dev = mcast->dev; 338 struct ipoib_dev_priv *priv = netdev_priv(dev); 339 struct ib_sa_mcmember_rec rec = { 340 #if 0 /* Some SMs don't support send-only yet */ 341 .join_state = 4 342 #else 343 .join_state = 1 344 #endif 345 }; 346 int ret = 0; 347 348 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 349 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n"); 350 return -ENODEV; 351 } 352 353 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { 354 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n"); 355 return -EBUSY; 356 } 357 358 rec.mgid = mcast->mcmember.mgid; 359 rec.port_gid = priv->local_gid; 360 rec.pkey = cpu_to_be16(priv->pkey); 361 362 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, 363 priv->port, &rec, 364 IB_SA_MCMEMBER_REC_MGID | 365 IB_SA_MCMEMBER_REC_PORT_GID | 366 IB_SA_MCMEMBER_REC_PKEY | 367 IB_SA_MCMEMBER_REC_JOIN_STATE, 368 GFP_ATOMIC, 369 ipoib_mcast_sendonly_join_complete, 370 mcast); 371 if (IS_ERR(mcast->mc)) { 372 ret = PTR_ERR(mcast->mc); 373 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 374 ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n", 375 ret); 376 } else { 377 ipoib_dbg_mcast(priv, "no multicast record for " IPOIB_GID_FMT 378 ", starting join\n", 379 IPOIB_GID_ARG(mcast->mcmember.mgid)); 380 } 381 382 return ret; 383 } 384 385 static int ipoib_mcast_join_complete(int status, 386 struct ib_sa_multicast *multicast) 387 { 388 struct ipoib_mcast *mcast = multicast->context; 389 struct net_device *dev = mcast->dev; 390 struct ipoib_dev_priv *priv = netdev_priv(dev); 391 392 ipoib_dbg_mcast(priv, "join completion for " IPOIB_GID_FMT 393 " (status %d)\n", 394 IPOIB_GID_ARG(mcast->mcmember.mgid), status); 395 396 /* We trap for port events ourselves. */ 397 if (status == -ENETRESET) 398 return 0; 399 400 if (!status) 401 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 402 403 if (!status) { 404 mcast->backoff = 1; 405 mutex_lock(&mcast_mutex); 406 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 407 queue_delayed_work(ipoib_workqueue, 408 &priv->mcast_task, 0); 409 mutex_unlock(&mcast_mutex); 410 411 if (mcast == priv->broadcast) 412 netif_carrier_on(dev); 413 414 return 0; 415 } 416 417 if (mcast->logcount++ < 20) { 418 if (status == -ETIMEDOUT) { 419 ipoib_dbg_mcast(priv, "multicast join failed for " IPOIB_GID_FMT 420 ", status %d\n", 421 IPOIB_GID_ARG(mcast->mcmember.mgid), 422 status); 423 } else { 424 ipoib_warn(priv, "multicast join failed for " 425 IPOIB_GID_FMT ", status %d\n", 426 IPOIB_GID_ARG(mcast->mcmember.mgid), 427 status); 428 } 429 } 430 431 mcast->backoff *= 2; 432 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 433 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 434 435 /* Clear the busy flag so we try again */ 436 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 437 438 mutex_lock(&mcast_mutex); 439 spin_lock_irq(&priv->lock); 440 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 441 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 442 mcast->backoff * HZ); 443 spin_unlock_irq(&priv->lock); 444 mutex_unlock(&mcast_mutex); 445 446 return status; 447 } 448 449 static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, 450 int create) 451 { 452 struct ipoib_dev_priv *priv = netdev_priv(dev); 453 struct ib_sa_mcmember_rec rec = { 454 .join_state = 1 455 }; 456 ib_sa_comp_mask comp_mask; 457 int ret = 0; 458 459 ipoib_dbg_mcast(priv, "joining MGID " IPOIB_GID_FMT "\n", 460 IPOIB_GID_ARG(mcast->mcmember.mgid)); 461 462 rec.mgid = mcast->mcmember.mgid; 463 rec.port_gid = priv->local_gid; 464 rec.pkey = cpu_to_be16(priv->pkey); 465 466 comp_mask = 467 IB_SA_MCMEMBER_REC_MGID | 468 IB_SA_MCMEMBER_REC_PORT_GID | 469 IB_SA_MCMEMBER_REC_PKEY | 470 IB_SA_MCMEMBER_REC_JOIN_STATE; 471 472 if (create) { 473 comp_mask |= 474 IB_SA_MCMEMBER_REC_QKEY | 475 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 476 IB_SA_MCMEMBER_REC_MTU | 477 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS | 478 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 479 IB_SA_MCMEMBER_REC_RATE | 480 IB_SA_MCMEMBER_REC_SL | 481 IB_SA_MCMEMBER_REC_FLOW_LABEL | 482 IB_SA_MCMEMBER_REC_HOP_LIMIT; 483 484 rec.qkey = priv->broadcast->mcmember.qkey; 485 rec.mtu_selector = IB_SA_EQ; 486 rec.mtu = priv->broadcast->mcmember.mtu; 487 rec.traffic_class = priv->broadcast->mcmember.traffic_class; 488 rec.rate_selector = IB_SA_EQ; 489 rec.rate = priv->broadcast->mcmember.rate; 490 rec.sl = priv->broadcast->mcmember.sl; 491 rec.flow_label = priv->broadcast->mcmember.flow_label; 492 rec.hop_limit = priv->broadcast->mcmember.hop_limit; 493 } 494 495 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 496 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, 497 &rec, comp_mask, GFP_KERNEL, 498 ipoib_mcast_join_complete, mcast); 499 if (IS_ERR(mcast->mc)) { 500 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 501 ret = PTR_ERR(mcast->mc); 502 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); 503 504 mcast->backoff *= 2; 505 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 506 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 507 508 mutex_lock(&mcast_mutex); 509 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 510 queue_delayed_work(ipoib_workqueue, 511 &priv->mcast_task, 512 mcast->backoff * HZ); 513 mutex_unlock(&mcast_mutex); 514 } 515 } 516 517 void ipoib_mcast_join_task(struct work_struct *work) 518 { 519 struct ipoib_dev_priv *priv = 520 container_of(work, struct ipoib_dev_priv, mcast_task.work); 521 struct net_device *dev = priv->dev; 522 523 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) 524 return; 525 526 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) 527 ipoib_warn(priv, "ib_gid_entry_get() failed\n"); 528 else 529 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 530 531 { 532 struct ib_port_attr attr; 533 534 if (!ib_query_port(priv->ca, priv->port, &attr)) 535 priv->local_lid = attr.lid; 536 else 537 ipoib_warn(priv, "ib_query_port failed\n"); 538 } 539 540 if (!priv->broadcast) { 541 struct ipoib_mcast *broadcast; 542 543 broadcast = ipoib_mcast_alloc(dev, 1); 544 if (!broadcast) { 545 ipoib_warn(priv, "failed to allocate broadcast group\n"); 546 mutex_lock(&mcast_mutex); 547 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 548 queue_delayed_work(ipoib_workqueue, 549 &priv->mcast_task, HZ); 550 mutex_unlock(&mcast_mutex); 551 return; 552 } 553 554 spin_lock_irq(&priv->lock); 555 memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, 556 sizeof (union ib_gid)); 557 priv->broadcast = broadcast; 558 559 __ipoib_mcast_add(dev, priv->broadcast); 560 spin_unlock_irq(&priv->lock); 561 } 562 563 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 564 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) 565 ipoib_mcast_join(dev, priv->broadcast, 0); 566 return; 567 } 568 569 while (1) { 570 struct ipoib_mcast *mcast = NULL; 571 572 spin_lock_irq(&priv->lock); 573 list_for_each_entry(mcast, &priv->multicast_list, list) { 574 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) 575 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) 576 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 577 /* Found the next unjoined group */ 578 break; 579 } 580 } 581 spin_unlock_irq(&priv->lock); 582 583 if (&mcast->list == &priv->multicast_list) { 584 /* All done */ 585 break; 586 } 587 588 ipoib_mcast_join(dev, mcast, 1); 589 return; 590 } 591 592 priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) - 593 IPOIB_ENCAP_LEN; 594 595 if (!ipoib_cm_admin_enabled(dev)) 596 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 597 598 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); 599 600 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 601 } 602 603 int ipoib_mcast_start_thread(struct net_device *dev) 604 { 605 struct ipoib_dev_priv *priv = netdev_priv(dev); 606 607 ipoib_dbg_mcast(priv, "starting multicast thread\n"); 608 609 mutex_lock(&mcast_mutex); 610 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 611 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); 612 mutex_unlock(&mcast_mutex); 613 614 spin_lock_irq(&priv->lock); 615 set_bit(IPOIB_MCAST_STARTED, &priv->flags); 616 spin_unlock_irq(&priv->lock); 617 618 return 0; 619 } 620 621 int ipoib_mcast_stop_thread(struct net_device *dev, int flush) 622 { 623 struct ipoib_dev_priv *priv = netdev_priv(dev); 624 625 ipoib_dbg_mcast(priv, "stopping multicast thread\n"); 626 627 spin_lock_irq(&priv->lock); 628 clear_bit(IPOIB_MCAST_STARTED, &priv->flags); 629 spin_unlock_irq(&priv->lock); 630 631 mutex_lock(&mcast_mutex); 632 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 633 cancel_delayed_work(&priv->mcast_task); 634 mutex_unlock(&mcast_mutex); 635 636 if (flush) 637 flush_workqueue(ipoib_workqueue); 638 639 return 0; 640 } 641 642 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) 643 { 644 struct ipoib_dev_priv *priv = netdev_priv(dev); 645 int ret = 0; 646 647 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 648 ipoib_dbg_mcast(priv, "leaving MGID " IPOIB_GID_FMT "\n", 649 IPOIB_GID_ARG(mcast->mcmember.mgid)); 650 651 /* Remove ourselves from the multicast group */ 652 ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid), 653 &mcast->mcmember.mgid); 654 if (ret) 655 ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret); 656 } 657 658 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 659 ib_sa_free_multicast(mcast->mc); 660 661 return 0; 662 } 663 664 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) 665 { 666 struct ipoib_dev_priv *priv = netdev_priv(dev); 667 struct ipoib_mcast *mcast; 668 669 /* 670 * We can only be called from ipoib_start_xmit, so we're 671 * inside tx_lock -- no need to save/restore flags. 672 */ 673 spin_lock(&priv->lock); 674 675 if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) || 676 !priv->broadcast || 677 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 678 ++priv->stats.tx_dropped; 679 dev_kfree_skb_any(skb); 680 goto unlock; 681 } 682 683 mcast = __ipoib_mcast_find(dev, mgid); 684 if (!mcast) { 685 /* Let's create a new send only group now */ 686 ipoib_dbg_mcast(priv, "setting up send only multicast group for " 687 IPOIB_GID_FMT "\n", IPOIB_GID_RAW_ARG(mgid)); 688 689 mcast = ipoib_mcast_alloc(dev, 0); 690 if (!mcast) { 691 ipoib_warn(priv, "unable to allocate memory for " 692 "multicast structure\n"); 693 ++priv->stats.tx_dropped; 694 dev_kfree_skb_any(skb); 695 goto out; 696 } 697 698 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); 699 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); 700 __ipoib_mcast_add(dev, mcast); 701 list_add_tail(&mcast->list, &priv->multicast_list); 702 } 703 704 if (!mcast->ah) { 705 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) 706 skb_queue_tail(&mcast->pkt_queue, skb); 707 else { 708 ++priv->stats.tx_dropped; 709 dev_kfree_skb_any(skb); 710 } 711 712 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 713 ipoib_dbg_mcast(priv, "no address vector, " 714 "but multicast join already started\n"); 715 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 716 ipoib_mcast_sendonly_join(mcast); 717 718 /* 719 * If lookup completes between here and out:, don't 720 * want to send packet twice. 721 */ 722 mcast = NULL; 723 } 724 725 out: 726 if (mcast && mcast->ah) { 727 if (skb->dst && 728 skb->dst->neighbour && 729 !*to_ipoib_neigh(skb->dst->neighbour)) { 730 struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour); 731 732 if (neigh) { 733 kref_get(&mcast->ah->ref); 734 neigh->ah = mcast->ah; 735 list_add_tail(&neigh->list, &mcast->neigh_list); 736 } 737 } 738 739 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); 740 } 741 742 unlock: 743 spin_unlock(&priv->lock); 744 } 745 746 void ipoib_mcast_dev_flush(struct net_device *dev) 747 { 748 struct ipoib_dev_priv *priv = netdev_priv(dev); 749 LIST_HEAD(remove_list); 750 struct ipoib_mcast *mcast, *tmcast; 751 unsigned long flags; 752 753 ipoib_dbg_mcast(priv, "flushing multicast list\n"); 754 755 spin_lock_irqsave(&priv->lock, flags); 756 757 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { 758 list_del(&mcast->list); 759 rb_erase(&mcast->rb_node, &priv->multicast_tree); 760 list_add_tail(&mcast->list, &remove_list); 761 } 762 763 if (priv->broadcast) { 764 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); 765 list_add_tail(&priv->broadcast->list, &remove_list); 766 priv->broadcast = NULL; 767 } 768 769 spin_unlock_irqrestore(&priv->lock, flags); 770 771 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 772 ipoib_mcast_leave(dev, mcast); 773 ipoib_mcast_free(mcast); 774 } 775 } 776 777 void ipoib_mcast_restart_task(struct work_struct *work) 778 { 779 struct ipoib_dev_priv *priv = 780 container_of(work, struct ipoib_dev_priv, restart_task); 781 struct net_device *dev = priv->dev; 782 struct dev_mc_list *mclist; 783 struct ipoib_mcast *mcast, *tmcast; 784 LIST_HEAD(remove_list); 785 unsigned long flags; 786 787 ipoib_dbg_mcast(priv, "restarting multicast task\n"); 788 789 ipoib_mcast_stop_thread(dev, 0); 790 791 local_irq_save(flags); 792 netif_tx_lock(dev); 793 spin_lock(&priv->lock); 794 795 /* 796 * Unfortunately, the networking core only gives us a list of all of 797 * the multicast hardware addresses. We need to figure out which ones 798 * are new and which ones have been removed 799 */ 800 801 /* Clear out the found flag */ 802 list_for_each_entry(mcast, &priv->multicast_list, list) 803 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); 804 805 /* Mark all of the entries that are found or don't exist */ 806 for (mclist = dev->mc_list; mclist; mclist = mclist->next) { 807 union ib_gid mgid; 808 809 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); 810 811 /* Add in the P_Key */ 812 mgid.raw[4] = (priv->pkey >> 8) & 0xff; 813 mgid.raw[5] = priv->pkey & 0xff; 814 815 mcast = __ipoib_mcast_find(dev, &mgid); 816 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 817 struct ipoib_mcast *nmcast; 818 819 /* Not found or send-only group, let's add a new entry */ 820 ipoib_dbg_mcast(priv, "adding multicast entry for mgid " 821 IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mgid)); 822 823 nmcast = ipoib_mcast_alloc(dev, 0); 824 if (!nmcast) { 825 ipoib_warn(priv, "unable to allocate memory for multicast structure\n"); 826 continue; 827 } 828 829 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags); 830 831 nmcast->mcmember.mgid = mgid; 832 833 if (mcast) { 834 /* Destroy the send only entry */ 835 list_move_tail(&mcast->list, &remove_list); 836 837 rb_replace_node(&mcast->rb_node, 838 &nmcast->rb_node, 839 &priv->multicast_tree); 840 } else 841 __ipoib_mcast_add(dev, nmcast); 842 843 list_add_tail(&nmcast->list, &priv->multicast_list); 844 } 845 846 if (mcast) 847 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); 848 } 849 850 /* Remove all of the entries don't exist anymore */ 851 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { 852 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && 853 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 854 ipoib_dbg_mcast(priv, "deleting multicast group " IPOIB_GID_FMT "\n", 855 IPOIB_GID_ARG(mcast->mcmember.mgid)); 856 857 rb_erase(&mcast->rb_node, &priv->multicast_tree); 858 859 /* Move to the remove list */ 860 list_move_tail(&mcast->list, &remove_list); 861 } 862 } 863 864 spin_unlock(&priv->lock); 865 netif_tx_unlock(dev); 866 local_irq_restore(flags); 867 868 /* We have to cancel outside of the spinlock */ 869 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 870 ipoib_mcast_leave(mcast->dev, mcast); 871 ipoib_mcast_free(mcast); 872 } 873 874 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 875 ipoib_mcast_start_thread(dev); 876 } 877 878 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 879 880 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev) 881 { 882 struct ipoib_mcast_iter *iter; 883 884 iter = kmalloc(sizeof *iter, GFP_KERNEL); 885 if (!iter) 886 return NULL; 887 888 iter->dev = dev; 889 memset(iter->mgid.raw, 0, 16); 890 891 if (ipoib_mcast_iter_next(iter)) { 892 kfree(iter); 893 return NULL; 894 } 895 896 return iter; 897 } 898 899 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) 900 { 901 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); 902 struct rb_node *n; 903 struct ipoib_mcast *mcast; 904 int ret = 1; 905 906 spin_lock_irq(&priv->lock); 907 908 n = rb_first(&priv->multicast_tree); 909 910 while (n) { 911 mcast = rb_entry(n, struct ipoib_mcast, rb_node); 912 913 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw, 914 sizeof (union ib_gid)) < 0) { 915 iter->mgid = mcast->mcmember.mgid; 916 iter->created = mcast->created; 917 iter->queuelen = skb_queue_len(&mcast->pkt_queue); 918 iter->complete = !!mcast->ah; 919 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); 920 921 ret = 0; 922 923 break; 924 } 925 926 n = rb_next(n); 927 } 928 929 spin_unlock_irq(&priv->lock); 930 931 return ret; 932 } 933 934 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, 935 union ib_gid *mgid, 936 unsigned long *created, 937 unsigned int *queuelen, 938 unsigned int *complete, 939 unsigned int *send_only) 940 { 941 *mgid = iter->mgid; 942 *created = iter->created; 943 *queuelen = iter->queuelen; 944 *complete = iter->complete; 945 *send_only = iter->send_only; 946 } 947 948 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 949