1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "ipoib.h" 41 42 #include <linux/delay.h> 43 #include <linux/completion.h> 44 45 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 46 static int mcast_debug_level = 1; 47 48 module_param(mcast_debug_level, int, 0644); 49 MODULE_PARM_DESC(mcast_debug_level, 50 "Enable multicast debug tracing if > 0"); 51 #endif 52 53 static DEFINE_MUTEX(mcast_mutex); 54 55 struct ipoib_mcast_iter { 56 struct ipoib_dev_priv *priv; 57 union ib_gid mgid; 58 unsigned long created; 59 unsigned int queuelen; 60 unsigned int complete; 61 unsigned int send_only; 62 }; 63 64 static void ipoib_mcast_free(struct ipoib_mcast *mcast) 65 { 66 struct ifnet *dev = mcast->priv->dev; 67 int tx_dropped = 0; 68 69 ipoib_dbg_mcast(mcast->priv, "deleting multicast group %16D\n", 70 mcast->mcmember.mgid.raw, ":"); 71 72 if (mcast->ah) 73 ipoib_put_ah(mcast->ah); 74 75 tx_dropped = mcast->pkt_queue.ifq_len; 76 _IF_DRAIN(&mcast->pkt_queue); /* XXX Locking. */ 77 78 if_inc_counter(dev, IFCOUNTER_OERRORS, tx_dropped); 79 80 kfree(mcast); 81 } 82 83 static struct ipoib_mcast *ipoib_mcast_alloc(struct ipoib_dev_priv *priv, 84 int can_sleep) 85 { 86 struct ipoib_mcast *mcast; 87 88 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC); 89 if (!mcast) 90 return NULL; 91 92 mcast->priv = priv; 93 mcast->created = jiffies; 94 mcast->backoff = 1; 95 96 INIT_LIST_HEAD(&mcast->list); 97 bzero(&mcast->pkt_queue, sizeof(mcast->pkt_queue)); 98 99 return mcast; 100 } 101 102 static struct ipoib_mcast *__ipoib_mcast_find(struct ipoib_dev_priv *priv, 103 void *mgid) 104 { 105 struct rb_node *n = priv->multicast_tree.rb_node; 106 107 while (n) { 108 struct ipoib_mcast *mcast; 109 int ret; 110 111 mcast = rb_entry(n, struct ipoib_mcast, rb_node); 112 113 ret = memcmp(mgid, mcast->mcmember.mgid.raw, 114 sizeof (union ib_gid)); 115 if (ret < 0) 116 n = n->rb_left; 117 else if (ret > 0) 118 n = n->rb_right; 119 else 120 return mcast; 121 } 122 123 return NULL; 124 } 125 126 static int __ipoib_mcast_add(struct ipoib_dev_priv *priv, 127 struct ipoib_mcast *mcast) 128 { 129 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL; 130 131 while (*n) { 132 struct ipoib_mcast *tmcast; 133 int ret; 134 135 pn = *n; 136 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node); 137 138 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw, 139 sizeof (union ib_gid)); 140 if (ret < 0) 141 n = &pn->rb_left; 142 else if (ret > 0) 143 n = &pn->rb_right; 144 else 145 return -EEXIST; 146 } 147 148 rb_link_node(&mcast->rb_node, pn, n); 149 rb_insert_color(&mcast->rb_node, &priv->multicast_tree); 150 151 return 0; 152 } 153 154 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, 155 struct ib_sa_mcmember_rec *mcmember) 156 { 157 struct ipoib_dev_priv *priv = mcast->priv; 158 struct ifnet *dev = priv->dev; 159 struct ipoib_ah *ah; 160 int ret; 161 int set_qkey = 0; 162 163 mcast->mcmember = *mcmember; 164 165 /* Set the cached Q_Key before we attach if it's the broadcast group */ 166 if (!memcmp(mcast->mcmember.mgid.raw, dev->if_broadcastaddr + 4, 167 sizeof (union ib_gid))) { 168 spin_lock_irq(&priv->lock); 169 if (!priv->broadcast) { 170 spin_unlock_irq(&priv->lock); 171 return -EAGAIN; 172 } 173 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); 174 spin_unlock_irq(&priv->lock); 175 priv->tx_wr.remote_qkey = priv->qkey; 176 set_qkey = 1; 177 } 178 179 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 180 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 181 ipoib_warn(priv, "multicast group %16D already attached\n", 182 mcast->mcmember.mgid.raw, ":"); 183 184 return 0; 185 } 186 187 ret = ipoib_mcast_attach(priv, be16_to_cpu(mcast->mcmember.mlid), 188 &mcast->mcmember.mgid, set_qkey); 189 if (ret < 0) { 190 ipoib_warn(priv, "couldn't attach QP to multicast group %16D\n", 191 mcast->mcmember.mgid.raw, ":"); 192 193 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags); 194 return ret; 195 } 196 } 197 198 { 199 struct ib_ah_attr av = { 200 .dlid = be16_to_cpu(mcast->mcmember.mlid), 201 .port_num = priv->port, 202 .sl = mcast->mcmember.sl, 203 .ah_flags = IB_AH_GRH, 204 .static_rate = mcast->mcmember.rate, 205 .grh = { 206 .flow_label = be32_to_cpu(mcast->mcmember.flow_label), 207 .hop_limit = mcast->mcmember.hop_limit, 208 .sgid_index = 0, 209 .traffic_class = mcast->mcmember.traffic_class 210 } 211 }; 212 av.grh.dgid = mcast->mcmember.mgid; 213 214 ah = ipoib_create_ah(priv, priv->pd, &av); 215 if (!ah) { 216 ipoib_warn(priv, "ib_address_create failed\n"); 217 } else { 218 spin_lock_irq(&priv->lock); 219 mcast->ah = ah; 220 spin_unlock_irq(&priv->lock); 221 222 ipoib_dbg_mcast(priv, "MGID %16D AV %p, LID 0x%04x, SL %d\n", 223 mcast->mcmember.mgid.raw, ":", 224 mcast->ah->ah, 225 be16_to_cpu(mcast->mcmember.mlid), 226 mcast->mcmember.sl); 227 } 228 } 229 230 /* actually send any queued packets */ 231 while (mcast->pkt_queue.ifq_len) { 232 struct mbuf *mb; 233 _IF_DEQUEUE(&mcast->pkt_queue, mb); 234 mb->m_pkthdr.rcvif = dev; 235 236 if (dev->if_transmit(dev, mb)) 237 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); 238 } 239 240 return 0; 241 } 242 243 static int 244 ipoib_mcast_sendonly_join_complete(int status, 245 struct ib_sa_multicast *multicast) 246 { 247 struct ipoib_mcast *mcast = multicast->context; 248 struct ipoib_dev_priv *priv = mcast->priv; 249 250 /* We trap for port events ourselves. */ 251 if (status == -ENETRESET) 252 return 0; 253 254 if (!status) 255 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 256 257 if (status) { 258 if (mcast->logcount++ < 20) 259 ipoib_dbg_mcast(priv, "multicast join failed for %16D, status %d\n", 260 mcast->mcmember.mgid.raw, ":", status); 261 262 /* Flush out any queued packets */ 263 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, mcast->pkt_queue.ifq_len); 264 _IF_DRAIN(&mcast->pkt_queue); 265 266 /* Clear the busy flag so we try again */ 267 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, 268 &mcast->flags); 269 } 270 return status; 271 } 272 273 static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) 274 { 275 struct ipoib_dev_priv *priv = mcast->priv; 276 struct ib_sa_mcmember_rec rec = { 277 #if 0 /* Some SMs don't support send-only yet */ 278 .join_state = 4 279 #else 280 .join_state = 1 281 #endif 282 }; 283 int ret = 0; 284 285 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 286 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n"); 287 return -ENODEV; 288 } 289 290 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { 291 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n"); 292 return -EBUSY; 293 } 294 295 rec.mgid = mcast->mcmember.mgid; 296 rec.port_gid = priv->local_gid; 297 rec.pkey = cpu_to_be16(priv->pkey); 298 299 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, 300 priv->port, &rec, 301 IB_SA_MCMEMBER_REC_MGID | 302 IB_SA_MCMEMBER_REC_PORT_GID | 303 IB_SA_MCMEMBER_REC_PKEY | 304 IB_SA_MCMEMBER_REC_JOIN_STATE, 305 GFP_ATOMIC, 306 ipoib_mcast_sendonly_join_complete, 307 mcast); 308 if (IS_ERR(mcast->mc)) { 309 ret = PTR_ERR(mcast->mc); 310 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 311 ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n", 312 ret); 313 } else { 314 ipoib_dbg_mcast(priv, "no multicast record for %16D, starting join\n", 315 mcast->mcmember.mgid.raw, ":"); 316 } 317 318 return ret; 319 } 320 321 void ipoib_mcast_carrier_on_task(struct work_struct *work) 322 { 323 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 324 carrier_on_task); 325 struct ib_port_attr attr; 326 327 /* 328 * Take rtnl_lock to avoid racing with ipoib_stop() and 329 * turning the carrier back on while a device is being 330 * removed. 331 */ 332 if (ib_query_port(priv->ca, priv->port, &attr) || 333 attr.state != IB_PORT_ACTIVE) { 334 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); 335 return; 336 } 337 if_link_state_change(priv->dev, LINK_STATE_UP); 338 } 339 340 static int ipoib_mcast_join_complete(int status, 341 struct ib_sa_multicast *multicast) 342 { 343 struct ipoib_mcast *mcast = multicast->context; 344 struct ipoib_dev_priv *priv = mcast->priv; 345 346 ipoib_dbg_mcast(priv, "join completion for %16D (status %d)\n", 347 mcast->mcmember.mgid.raw, ":", status); 348 349 /* We trap for port events ourselves. */ 350 if (status == -ENETRESET) 351 return 0; 352 353 if (!status) 354 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 355 356 if (!status) { 357 mcast->backoff = 1; 358 mutex_lock(&mcast_mutex); 359 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 360 queue_delayed_work(ipoib_workqueue, 361 &priv->mcast_task, 0); 362 mutex_unlock(&mcast_mutex); 363 364 /* 365 * Defer carrier on work to ipoib_workqueue to avoid a 366 * deadlock on rtnl_lock here. 367 */ 368 if (mcast == priv->broadcast) 369 queue_work(ipoib_workqueue, &priv->carrier_on_task); 370 371 return 0; 372 } 373 374 if (mcast->logcount++ < 20) { 375 if (status == -ETIMEDOUT || status == -EAGAIN) { 376 ipoib_dbg_mcast(priv, "multicast join failed for %16D, status %d\n", 377 mcast->mcmember.mgid.raw, ":", status); 378 } else { 379 ipoib_warn(priv, "multicast join failed for %16D, status %d\n", 380 mcast->mcmember.mgid.raw, ":", status); 381 } 382 } 383 384 mcast->backoff *= 2; 385 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 386 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 387 388 /* Clear the busy flag so we try again */ 389 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 390 391 mutex_lock(&mcast_mutex); 392 spin_lock_irq(&priv->lock); 393 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 394 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 395 mcast->backoff * HZ); 396 spin_unlock_irq(&priv->lock); 397 mutex_unlock(&mcast_mutex); 398 399 return status; 400 } 401 402 static void ipoib_mcast_join(struct ipoib_dev_priv *priv, 403 struct ipoib_mcast *mcast, int create) 404 { 405 struct ib_sa_mcmember_rec rec = { 406 .join_state = 1 407 }; 408 ib_sa_comp_mask comp_mask; 409 int ret = 0; 410 411 ipoib_dbg_mcast(priv, "joining MGID %16D\n", 412 mcast->mcmember.mgid.raw, ":"); 413 414 rec.mgid = mcast->mcmember.mgid; 415 rec.port_gid = priv->local_gid; 416 rec.pkey = cpu_to_be16(priv->pkey); 417 418 comp_mask = 419 IB_SA_MCMEMBER_REC_MGID | 420 IB_SA_MCMEMBER_REC_PORT_GID | 421 IB_SA_MCMEMBER_REC_PKEY | 422 IB_SA_MCMEMBER_REC_JOIN_STATE; 423 424 if (create) { 425 comp_mask |= 426 IB_SA_MCMEMBER_REC_QKEY | 427 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 428 IB_SA_MCMEMBER_REC_MTU | 429 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS | 430 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 431 IB_SA_MCMEMBER_REC_RATE | 432 IB_SA_MCMEMBER_REC_SL | 433 IB_SA_MCMEMBER_REC_FLOW_LABEL | 434 IB_SA_MCMEMBER_REC_HOP_LIMIT; 435 436 rec.qkey = priv->broadcast->mcmember.qkey; 437 rec.mtu_selector = IB_SA_EQ; 438 rec.mtu = priv->broadcast->mcmember.mtu; 439 rec.traffic_class = priv->broadcast->mcmember.traffic_class; 440 rec.rate_selector = IB_SA_EQ; 441 rec.rate = priv->broadcast->mcmember.rate; 442 rec.sl = priv->broadcast->mcmember.sl; 443 rec.flow_label = priv->broadcast->mcmember.flow_label; 444 rec.hop_limit = priv->broadcast->mcmember.hop_limit; 445 } 446 447 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 448 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, 449 &rec, comp_mask, GFP_KERNEL, 450 ipoib_mcast_join_complete, mcast); 451 if (IS_ERR(mcast->mc)) { 452 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 453 ret = PTR_ERR(mcast->mc); 454 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); 455 456 mcast->backoff *= 2; 457 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 458 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 459 460 mutex_lock(&mcast_mutex); 461 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 462 queue_delayed_work(ipoib_workqueue, 463 &priv->mcast_task, 464 mcast->backoff * HZ); 465 mutex_unlock(&mcast_mutex); 466 } 467 } 468 469 void ipoib_mcast_join_task(struct work_struct *work) 470 { 471 struct ipoib_dev_priv *priv = 472 container_of(work, struct ipoib_dev_priv, mcast_task.work); 473 struct ifnet *dev = priv->dev; 474 struct ib_port_attr attr; 475 476 ipoib_dbg_mcast(priv, "Running join task. flags 0x%lX\n", priv->flags); 477 478 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) 479 return; 480 481 if (ib_query_port(priv->ca, priv->port, &attr) || 482 attr.state != IB_PORT_ACTIVE) { 483 ipoib_dbg(priv, "%s: port state is not ACTIVE (state = %d) suspend task.\n", 484 __func__, attr.state); 485 return; 486 } 487 488 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid, NULL)) 489 ipoib_warn(priv, "ib_query_gid() failed\n"); 490 else 491 memcpy(IF_LLADDR(dev) + 4, priv->local_gid.raw, sizeof (union ib_gid)); 492 493 { 494 struct ib_port_attr attr; 495 496 if (!ib_query_port(priv->ca, priv->port, &attr)) 497 priv->local_lid = attr.lid; 498 else 499 ipoib_warn(priv, "ib_query_port failed\n"); 500 } 501 502 if (!priv->broadcast) { 503 struct ipoib_mcast *broadcast; 504 505 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 506 return; 507 508 broadcast = ipoib_mcast_alloc(priv, 1); 509 if (!broadcast) { 510 ipoib_warn(priv, "failed to allocate broadcast group\n"); 511 mutex_lock(&mcast_mutex); 512 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 513 queue_delayed_work(ipoib_workqueue, 514 &priv->mcast_task, HZ); 515 mutex_unlock(&mcast_mutex); 516 return; 517 } 518 519 spin_lock_irq(&priv->lock); 520 memcpy(broadcast->mcmember.mgid.raw, dev->if_broadcastaddr + 4, 521 sizeof (union ib_gid)); 522 priv->broadcast = broadcast; 523 524 __ipoib_mcast_add(priv, priv->broadcast); 525 spin_unlock_irq(&priv->lock); 526 } 527 528 if (priv->broadcast && 529 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 530 if (priv->broadcast && 531 !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) 532 ipoib_mcast_join(priv, priv->broadcast, 0); 533 return; 534 } 535 536 while (1) { 537 struct ipoib_mcast *mcast = NULL; 538 539 spin_lock_irq(&priv->lock); 540 list_for_each_entry(mcast, &priv->multicast_list, list) { 541 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) 542 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) 543 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 544 /* Found the next unjoined group */ 545 break; 546 } 547 } 548 spin_unlock_irq(&priv->lock); 549 550 if (&mcast->list == &priv->multicast_list) { 551 /* All done */ 552 break; 553 } 554 555 ipoib_mcast_join(priv, mcast, 1); 556 return; 557 } 558 559 spin_lock_irq(&priv->lock); 560 if (priv->broadcast) 561 priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); 562 else 563 priv->mcast_mtu = priv->admin_mtu; 564 spin_unlock_irq(&priv->lock); 565 566 if (!ipoib_cm_admin_enabled(priv)) 567 ipoib_change_mtu(priv, min(priv->mcast_mtu, priv->admin_mtu)); 568 569 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); 570 571 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 572 } 573 574 int ipoib_mcast_start_thread(struct ipoib_dev_priv *priv) 575 { 576 ipoib_dbg_mcast(priv, "starting multicast thread flags 0x%lX\n", 577 priv->flags); 578 579 mutex_lock(&mcast_mutex); 580 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 581 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); 582 mutex_unlock(&mcast_mutex); 583 584 return 0; 585 } 586 587 int ipoib_mcast_stop_thread(struct ipoib_dev_priv *priv, int flush) 588 { 589 590 ipoib_dbg_mcast(priv, "stopping multicast thread\n"); 591 592 mutex_lock(&mcast_mutex); 593 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 594 cancel_delayed_work(&priv->mcast_task); 595 mutex_unlock(&mcast_mutex); 596 597 if (flush) 598 flush_workqueue(ipoib_workqueue); 599 600 return 0; 601 } 602 603 static int ipoib_mcast_leave(struct ipoib_dev_priv *priv, struct ipoib_mcast *mcast) 604 { 605 int ret = 0; 606 607 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 608 ib_sa_free_multicast(mcast->mc); 609 610 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 611 ipoib_dbg_mcast(priv, "leaving MGID %16D\n", 612 mcast->mcmember.mgid.raw, ":"); 613 614 /* Remove ourselves from the multicast group */ 615 ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid, 616 be16_to_cpu(mcast->mcmember.mlid)); 617 if (ret) 618 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret); 619 } 620 621 return 0; 622 } 623 624 void 625 ipoib_mcast_send(struct ipoib_dev_priv *priv, void *mgid, struct mbuf *mb) 626 { 627 struct ifnet *dev = priv->dev; 628 struct ipoib_mcast *mcast; 629 630 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || 631 !priv->broadcast || 632 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 633 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 634 m_freem(mb); 635 return; 636 } 637 638 mcast = __ipoib_mcast_find(priv, mgid); 639 if (!mcast) { 640 /* Let's create a new send only group now */ 641 ipoib_dbg_mcast(priv, "setting up send only multicast group for %16D\n", 642 mgid, ":"); 643 644 mcast = ipoib_mcast_alloc(priv, 0); 645 if (!mcast) { 646 ipoib_warn(priv, "unable to allocate memory for " 647 "multicast structure\n"); 648 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 649 m_freem(mb); 650 goto out; 651 } 652 653 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); 654 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); 655 __ipoib_mcast_add(priv, mcast); 656 list_add_tail(&mcast->list, &priv->multicast_list); 657 } 658 659 if (!mcast->ah) { 660 if (mcast->pkt_queue.ifq_len < IPOIB_MAX_MCAST_QUEUE) { 661 _IF_ENQUEUE(&mcast->pkt_queue, mb); 662 } else { 663 if_inc_counter(dev, IFCOUNTER_OERRORS, 1); 664 m_freem(mb); 665 } 666 667 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 668 ipoib_dbg_mcast(priv, "no address vector, " 669 "but multicast join already started\n"); 670 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 671 ipoib_mcast_sendonly_join(mcast); 672 673 /* 674 * If lookup completes between here and out:, don't 675 * want to send packet twice. 676 */ 677 mcast = NULL; 678 } 679 680 out: 681 if (mcast && mcast->ah) 682 ipoib_send(priv, mb, mcast->ah, IB_MULTICAST_QPN); 683 } 684 685 void ipoib_mcast_dev_flush(struct ipoib_dev_priv *priv) 686 { 687 LIST_HEAD(remove_list); 688 struct ipoib_mcast *mcast, *tmcast; 689 unsigned long flags; 690 691 ipoib_dbg_mcast(priv, "flushing multicast list\n"); 692 693 spin_lock_irqsave(&priv->lock, flags); 694 695 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { 696 list_del(&mcast->list); 697 rb_erase(&mcast->rb_node, &priv->multicast_tree); 698 list_add_tail(&mcast->list, &remove_list); 699 } 700 701 if (priv->broadcast) { 702 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); 703 list_add_tail(&priv->broadcast->list, &remove_list); 704 priv->broadcast = NULL; 705 } 706 707 spin_unlock_irqrestore(&priv->lock, flags); 708 709 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 710 ipoib_mcast_leave(priv, mcast); 711 ipoib_mcast_free(mcast); 712 } 713 } 714 715 static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen, 716 const u8 *broadcast) 717 { 718 if (addrlen != INFINIBAND_ALEN) 719 return 0; 720 /* reserved QPN, prefix, scope */ 721 if (memcmp(addr, broadcast, 6)) 722 return 0; 723 /* signature lower, pkey */ 724 if (memcmp(addr + 7, broadcast + 7, 3)) 725 return 0; 726 return 1; 727 } 728 729 void ipoib_mcast_restart_task(struct work_struct *work) 730 { 731 struct ipoib_dev_priv *priv = 732 container_of(work, struct ipoib_dev_priv, restart_task); 733 ipoib_mcast_restart(priv); 734 } 735 736 void ipoib_mcast_restart(struct ipoib_dev_priv *priv) 737 { 738 struct ifnet *dev = priv->dev; 739 struct ifmultiaddr *ifma; 740 struct ipoib_mcast *mcast, *tmcast; 741 LIST_HEAD(remove_list); 742 struct ib_sa_mcmember_rec rec; 743 int addrlen; 744 745 ipoib_dbg_mcast(priv, "restarting multicast task flags 0x%lX\n", 746 priv->flags); 747 748 ipoib_mcast_stop_thread(priv, 0); 749 750 if_maddr_rlock(dev); 751 spin_lock(&priv->lock); 752 753 /* 754 * Unfortunately, the networking core only gives us a list of all of 755 * the multicast hardware addresses. We need to figure out which ones 756 * are new and which ones have been removed 757 */ 758 759 /* Clear out the found flag */ 760 list_for_each_entry(mcast, &priv->multicast_list, list) 761 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); 762 763 /* Mark all of the entries that are found or don't exist */ 764 765 766 CK_STAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) { 767 union ib_gid mgid; 768 uint8_t *addr; 769 770 if (ifma->ifma_addr->sa_family != AF_LINK) 771 continue; 772 addr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 773 addrlen = ((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen; 774 if (!ipoib_mcast_addr_is_valid(addr, addrlen, 775 dev->if_broadcastaddr)) 776 continue; 777 778 memcpy(mgid.raw, addr + 4, sizeof mgid); 779 780 mcast = __ipoib_mcast_find(priv, &mgid); 781 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 782 struct ipoib_mcast *nmcast; 783 784 /* ignore group which is directly joined by userspace */ 785 if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) && 786 !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) { 787 ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %16D\n", 788 mgid.raw, ":"); 789 continue; 790 } 791 792 /* Not found or send-only group, let's add a new entry */ 793 ipoib_dbg_mcast(priv, "adding multicast entry for mgid %16D\n", 794 mgid.raw, ":"); 795 796 nmcast = ipoib_mcast_alloc(priv, 0); 797 if (!nmcast) { 798 ipoib_warn(priv, "unable to allocate memory for multicast structure\n"); 799 continue; 800 } 801 802 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags); 803 804 nmcast->mcmember.mgid = mgid; 805 806 if (mcast) { 807 /* Destroy the send only entry */ 808 list_move_tail(&mcast->list, &remove_list); 809 810 rb_replace_node(&mcast->rb_node, 811 &nmcast->rb_node, 812 &priv->multicast_tree); 813 } else 814 __ipoib_mcast_add(priv, nmcast); 815 816 list_add_tail(&nmcast->list, &priv->multicast_list); 817 } 818 819 if (mcast) 820 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); 821 } 822 823 /* Remove all of the entries don't exist anymore */ 824 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { 825 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && 826 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 827 ipoib_dbg_mcast(priv, "deleting multicast group %16D\n", 828 mcast->mcmember.mgid.raw, ":"); 829 830 rb_erase(&mcast->rb_node, &priv->multicast_tree); 831 832 /* Move to the remove list */ 833 list_move_tail(&mcast->list, &remove_list); 834 } 835 } 836 837 spin_unlock(&priv->lock); 838 if_maddr_runlock(dev); 839 840 /* We have to cancel outside of the spinlock */ 841 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 842 ipoib_mcast_leave(mcast->priv, mcast); 843 ipoib_mcast_free(mcast); 844 } 845 846 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 847 ipoib_mcast_start_thread(priv); 848 } 849 850 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 851 852 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct ipoib_dev_priv *priv) 853 { 854 struct ipoib_mcast_iter *iter; 855 856 iter = kmalloc(sizeof *iter, GFP_KERNEL); 857 if (!iter) 858 return NULL; 859 860 iter->priv = priv; 861 memset(iter->mgid.raw, 0, 16); 862 863 if (ipoib_mcast_iter_next(iter)) { 864 kfree(iter); 865 return NULL; 866 } 867 868 return iter; 869 } 870 871 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) 872 { 873 struct ipoib_dev_priv *priv = iter->priv; 874 struct rb_node *n; 875 struct ipoib_mcast *mcast; 876 int ret = 1; 877 878 spin_lock_irq(&priv->lock); 879 880 n = rb_first(&priv->multicast_tree); 881 882 while (n) { 883 mcast = rb_entry(n, struct ipoib_mcast, rb_node); 884 885 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw, 886 sizeof (union ib_gid)) < 0) { 887 iter->mgid = mcast->mcmember.mgid; 888 iter->created = mcast->created; 889 iter->queuelen = mcast->pkt_queue.ifq_len; 890 iter->complete = !!mcast->ah; 891 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); 892 893 ret = 0; 894 895 break; 896 } 897 898 n = rb_next(n); 899 } 900 901 spin_unlock_irq(&priv->lock); 902 903 return ret; 904 } 905 906 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, 907 union ib_gid *mgid, 908 unsigned long *created, 909 unsigned int *queuelen, 910 unsigned int *complete, 911 unsigned int *send_only) 912 { 913 *mgid = iter->mgid; 914 *created = iter->created; 915 *queuelen = iter->queuelen; 916 *complete = iter->complete; 917 *send_only = iter->send_only; 918 } 919 920 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 921