1 /* 2 * net/tipc/bcast.c: TIPC broadcast code 3 * 4 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB 5 * Copyright (c) 2004, Intel Corporation. 6 * Copyright (c) 2005, 2010-2011, Wind River Systems 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the names of the copyright holders nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * Alternatively, this software may be distributed under the terms of the 22 * GNU General Public License ("GPL") version 2 as published by the Free 23 * Software Foundation. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include "socket.h" 39 #include "msg.h" 40 #include "bcast.h" 41 #include "name_distr.h" 42 #include "core.h" 43 44 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 45 #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ 46 47 const char tipc_bclink_name[] = "broadcast-link"; 48 49 static void tipc_nmap_diff(struct tipc_node_map *nm_a, 50 struct tipc_node_map *nm_b, 51 struct tipc_node_map *nm_diff); 52 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node); 53 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node); 54 55 static void tipc_bclink_lock(struct net *net) 56 { 57 struct tipc_net *tn = net_generic(net, tipc_net_id); 58 59 spin_lock_bh(&tn->bclink->lock); 60 } 61 62 static void tipc_bclink_unlock(struct net *net) 63 { 64 struct tipc_net *tn = net_generic(net, tipc_net_id); 65 66 spin_unlock_bh(&tn->bclink->lock); 67 } 68 69 void tipc_bclink_input(struct net *net) 70 { 71 struct tipc_net *tn = net_generic(net, tipc_net_id); 72 73 tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq); 74 } 75 76 uint tipc_bclink_get_mtu(void) 77 { 78 return MAX_PKT_DEFAULT_MCAST; 79 } 80 81 static u32 bcbuf_acks(struct sk_buff *buf) 82 { 83 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; 84 } 85 86 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks) 87 { 88 TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks; 89 } 90 91 static void bcbuf_decr_acks(struct sk_buff *buf) 92 { 93 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1); 94 } 95 96 void tipc_bclink_add_node(struct net *net, u32 addr) 97 { 98 struct tipc_net *tn = net_generic(net, tipc_net_id); 99 100 tipc_bclink_lock(net); 101 tipc_nmap_add(&tn->bclink->bcast_nodes, addr); 102 tipc_bclink_unlock(net); 103 } 104 105 void tipc_bclink_remove_node(struct net *net, u32 addr) 106 { 107 struct tipc_net *tn = net_generic(net, tipc_net_id); 108 109 tipc_bclink_lock(net); 110 tipc_nmap_remove(&tn->bclink->bcast_nodes, addr); 111 112 /* Last node? => reset backlog queue */ 113 if (!tn->bclink->bcast_nodes.count) 114 tipc_link_purge_backlog(&tn->bclink->link); 115 116 tipc_bclink_unlock(net); 117 } 118 119 static void bclink_set_last_sent(struct net *net) 120 { 121 struct tipc_net *tn = net_generic(net, tipc_net_id); 122 struct tipc_link *bcl = tn->bcl; 123 124 bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1); 125 } 126 127 u32 tipc_bclink_get_last_sent(struct net *net) 128 { 129 struct tipc_net *tn = net_generic(net, tipc_net_id); 130 131 return tn->bcl->silent_intv_cnt; 132 } 133 134 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno) 135 { 136 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ? 137 seqno : node->bclink.last_sent; 138 } 139 140 /** 141 * tipc_bclink_retransmit_to - get most recent node to request retransmission 142 * 143 * Called with bclink_lock locked 144 */ 145 struct tipc_node *tipc_bclink_retransmit_to(struct net *net) 146 { 147 struct tipc_net *tn = net_generic(net, tipc_net_id); 148 149 return tn->bclink->retransmit_to; 150 } 151 152 /** 153 * bclink_retransmit_pkt - retransmit broadcast packets 154 * @after: sequence number of last packet to *not* retransmit 155 * @to: sequence number of last packet to retransmit 156 * 157 * Called with bclink_lock locked 158 */ 159 static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to) 160 { 161 struct sk_buff *skb; 162 struct tipc_link *bcl = tn->bcl; 163 164 skb_queue_walk(&bcl->transmq, skb) { 165 if (more(buf_seqno(skb), after)) { 166 tipc_link_retransmit(bcl, skb, mod(to - after)); 167 break; 168 } 169 } 170 } 171 172 /** 173 * tipc_bclink_wakeup_users - wake up pending users 174 * 175 * Called with no locks taken 176 */ 177 void tipc_bclink_wakeup_users(struct net *net) 178 { 179 struct tipc_net *tn = net_generic(net, tipc_net_id); 180 181 tipc_sk_rcv(net, &tn->bclink->link.wakeupq); 182 } 183 184 /** 185 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets 186 * @n_ptr: node that sent acknowledgement info 187 * @acked: broadcast sequence # that has been acknowledged 188 * 189 * Node is locked, bclink_lock unlocked. 190 */ 191 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) 192 { 193 struct sk_buff *skb, *tmp; 194 unsigned int released = 0; 195 struct net *net = n_ptr->net; 196 struct tipc_net *tn = net_generic(net, tipc_net_id); 197 198 if (unlikely(!n_ptr->bclink.recv_permitted)) 199 return; 200 201 tipc_bclink_lock(net); 202 203 /* Bail out if tx queue is empty (no clean up is required) */ 204 skb = skb_peek(&tn->bcl->transmq); 205 if (!skb) 206 goto exit; 207 208 /* Determine which messages need to be acknowledged */ 209 if (acked == INVALID_LINK_SEQ) { 210 /* 211 * Contact with specified node has been lost, so need to 212 * acknowledge sent messages only (if other nodes still exist) 213 * or both sent and unsent messages (otherwise) 214 */ 215 if (tn->bclink->bcast_nodes.count) 216 acked = tn->bcl->silent_intv_cnt; 217 else 218 acked = tn->bcl->snd_nxt; 219 } else { 220 /* 221 * Bail out if specified sequence number does not correspond 222 * to a message that has been sent and not yet acknowledged 223 */ 224 if (less(acked, buf_seqno(skb)) || 225 less(tn->bcl->silent_intv_cnt, acked) || 226 less_eq(acked, n_ptr->bclink.acked)) 227 goto exit; 228 } 229 230 /* Skip over packets that node has previously acknowledged */ 231 skb_queue_walk(&tn->bcl->transmq, skb) { 232 if (more(buf_seqno(skb), n_ptr->bclink.acked)) 233 break; 234 } 235 236 /* Update packets that node is now acknowledging */ 237 skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) { 238 if (more(buf_seqno(skb), acked)) 239 break; 240 bcbuf_decr_acks(skb); 241 bclink_set_last_sent(net); 242 if (bcbuf_acks(skb) == 0) { 243 __skb_unlink(skb, &tn->bcl->transmq); 244 kfree_skb(skb); 245 released = 1; 246 } 247 } 248 n_ptr->bclink.acked = acked; 249 250 /* Try resolving broadcast link congestion, if necessary */ 251 if (unlikely(skb_peek(&tn->bcl->backlogq))) { 252 tipc_link_push_packets(tn->bcl); 253 bclink_set_last_sent(net); 254 } 255 if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq))) 256 n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS; 257 exit: 258 tipc_bclink_unlock(net); 259 } 260 261 /** 262 * tipc_bclink_update_link_state - update broadcast link state 263 * 264 * RCU and node lock set 265 */ 266 void tipc_bclink_update_link_state(struct tipc_node *n_ptr, 267 u32 last_sent) 268 { 269 struct sk_buff *buf; 270 struct net *net = n_ptr->net; 271 struct tipc_net *tn = net_generic(net, tipc_net_id); 272 273 /* Ignore "stale" link state info */ 274 if (less_eq(last_sent, n_ptr->bclink.last_in)) 275 return; 276 277 /* Update link synchronization state; quit if in sync */ 278 bclink_update_last_sent(n_ptr, last_sent); 279 280 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in) 281 return; 282 283 /* Update out-of-sync state; quit if loss is still unconfirmed */ 284 if ((++n_ptr->bclink.oos_state) == 1) { 285 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2)) 286 return; 287 n_ptr->bclink.oos_state++; 288 } 289 290 /* Don't NACK if one has been recently sent (or seen) */ 291 if (n_ptr->bclink.oos_state & 0x1) 292 return; 293 294 /* Send NACK */ 295 buf = tipc_buf_acquire(INT_H_SIZE); 296 if (buf) { 297 struct tipc_msg *msg = buf_msg(buf); 298 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq); 299 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent; 300 301 tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG, 302 INT_H_SIZE, n_ptr->addr); 303 msg_set_non_seq(msg, 1); 304 msg_set_mc_netid(msg, tn->net_id); 305 msg_set_bcast_ack(msg, n_ptr->bclink.last_in); 306 msg_set_bcgap_after(msg, n_ptr->bclink.last_in); 307 msg_set_bcgap_to(msg, to); 308 309 tipc_bclink_lock(net); 310 tipc_bearer_send(net, MAX_BEARERS, buf, NULL); 311 tn->bcl->stats.sent_nacks++; 312 tipc_bclink_unlock(net); 313 kfree_skb(buf); 314 315 n_ptr->bclink.oos_state++; 316 } 317 } 318 319 /** 320 * bclink_peek_nack - monitor retransmission requests sent by other nodes 321 * 322 * Delay any upcoming NACK by this node if another node has already 323 * requested the first message this node is going to ask for. 324 */ 325 static void bclink_peek_nack(struct net *net, struct tipc_msg *msg) 326 { 327 struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg)); 328 329 if (unlikely(!n_ptr)) 330 return; 331 332 tipc_node_lock(n_ptr); 333 if (n_ptr->bclink.recv_permitted && 334 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) && 335 (n_ptr->bclink.last_in == msg_bcgap_after(msg))) 336 n_ptr->bclink.oos_state = 2; 337 tipc_node_unlock(n_ptr); 338 tipc_node_put(n_ptr); 339 } 340 341 /* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster 342 * and to identified node local sockets 343 * @net: the applicable net namespace 344 * @list: chain of buffers containing message 345 * Consumes the buffer chain, except when returning -ELINKCONG 346 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE 347 */ 348 int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list) 349 { 350 struct tipc_net *tn = net_generic(net, tipc_net_id); 351 struct tipc_link *bcl = tn->bcl; 352 struct tipc_bclink *bclink = tn->bclink; 353 int rc = 0; 354 int bc = 0; 355 struct sk_buff *skb; 356 struct sk_buff_head arrvq; 357 struct sk_buff_head inputq; 358 359 /* Prepare clone of message for local node */ 360 skb = tipc_msg_reassemble(list); 361 if (unlikely(!skb)) { 362 __skb_queue_purge(list); 363 return -EHOSTUNREACH; 364 } 365 /* Broadcast to all nodes */ 366 if (likely(bclink)) { 367 tipc_bclink_lock(net); 368 if (likely(bclink->bcast_nodes.count)) { 369 rc = __tipc_link_xmit(net, bcl, list); 370 if (likely(!rc)) { 371 u32 len = skb_queue_len(&bcl->transmq); 372 373 bclink_set_last_sent(net); 374 bcl->stats.queue_sz_counts++; 375 bcl->stats.accu_queue_sz += len; 376 } 377 bc = 1; 378 } 379 tipc_bclink_unlock(net); 380 } 381 382 if (unlikely(!bc)) 383 __skb_queue_purge(list); 384 385 if (unlikely(rc)) { 386 kfree_skb(skb); 387 return rc; 388 } 389 /* Deliver message clone */ 390 __skb_queue_head_init(&arrvq); 391 skb_queue_head_init(&inputq); 392 __skb_queue_tail(&arrvq, skb); 393 tipc_sk_mcast_rcv(net, &arrvq, &inputq); 394 return rc; 395 } 396 397 /** 398 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet 399 * 400 * Called with both sending node's lock and bclink_lock taken. 401 */ 402 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) 403 { 404 struct tipc_net *tn = net_generic(node->net, tipc_net_id); 405 406 bclink_update_last_sent(node, seqno); 407 node->bclink.last_in = seqno; 408 node->bclink.oos_state = 0; 409 tn->bcl->stats.recv_info++; 410 411 /* 412 * Unicast an ACK periodically, ensuring that 413 * all nodes in the cluster don't ACK at the same time 414 */ 415 if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) { 416 tipc_link_proto_xmit(node->active_links[node->addr & 1], 417 STATE_MSG, 0, 0, 0, 0); 418 tn->bcl->stats.sent_acks++; 419 } 420 } 421 422 /** 423 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards 424 * 425 * RCU is locked, no other locks set 426 */ 427 void tipc_bclink_rcv(struct net *net, struct sk_buff *buf) 428 { 429 struct tipc_net *tn = net_generic(net, tipc_net_id); 430 struct tipc_link *bcl = tn->bcl; 431 struct tipc_msg *msg = buf_msg(buf); 432 struct tipc_node *node; 433 u32 next_in; 434 u32 seqno; 435 int deferred = 0; 436 int pos = 0; 437 struct sk_buff *iskb; 438 struct sk_buff_head *arrvq, *inputq; 439 440 /* Screen out unwanted broadcast messages */ 441 if (msg_mc_netid(msg) != tn->net_id) 442 goto exit; 443 444 node = tipc_node_find(net, msg_prevnode(msg)); 445 if (unlikely(!node)) 446 goto exit; 447 448 tipc_node_lock(node); 449 if (unlikely(!node->bclink.recv_permitted)) 450 goto unlock; 451 452 /* Handle broadcast protocol message */ 453 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 454 if (msg_type(msg) != STATE_MSG) 455 goto unlock; 456 if (msg_destnode(msg) == tn->own_addr) { 457 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 458 tipc_bclink_lock(net); 459 bcl->stats.recv_nacks++; 460 tn->bclink->retransmit_to = node; 461 bclink_retransmit_pkt(tn, msg_bcgap_after(msg), 462 msg_bcgap_to(msg)); 463 tipc_bclink_unlock(net); 464 tipc_node_unlock(node); 465 } else { 466 tipc_node_unlock(node); 467 bclink_peek_nack(net, msg); 468 } 469 tipc_node_put(node); 470 goto exit; 471 } 472 473 /* Handle in-sequence broadcast message */ 474 seqno = msg_seqno(msg); 475 next_in = mod(node->bclink.last_in + 1); 476 arrvq = &tn->bclink->arrvq; 477 inputq = &tn->bclink->inputq; 478 479 if (likely(seqno == next_in)) { 480 receive: 481 /* Deliver message to destination */ 482 if (likely(msg_isdata(msg))) { 483 tipc_bclink_lock(net); 484 bclink_accept_pkt(node, seqno); 485 spin_lock_bh(&inputq->lock); 486 __skb_queue_tail(arrvq, buf); 487 spin_unlock_bh(&inputq->lock); 488 node->action_flags |= TIPC_BCAST_MSG_EVT; 489 tipc_bclink_unlock(net); 490 tipc_node_unlock(node); 491 } else if (msg_user(msg) == MSG_BUNDLER) { 492 tipc_bclink_lock(net); 493 bclink_accept_pkt(node, seqno); 494 bcl->stats.recv_bundles++; 495 bcl->stats.recv_bundled += msg_msgcnt(msg); 496 pos = 0; 497 while (tipc_msg_extract(buf, &iskb, &pos)) { 498 spin_lock_bh(&inputq->lock); 499 __skb_queue_tail(arrvq, iskb); 500 spin_unlock_bh(&inputq->lock); 501 } 502 node->action_flags |= TIPC_BCAST_MSG_EVT; 503 tipc_bclink_unlock(net); 504 tipc_node_unlock(node); 505 } else if (msg_user(msg) == MSG_FRAGMENTER) { 506 tipc_bclink_lock(net); 507 bclink_accept_pkt(node, seqno); 508 tipc_buf_append(&node->bclink.reasm_buf, &buf); 509 if (unlikely(!buf && !node->bclink.reasm_buf)) { 510 tipc_bclink_unlock(net); 511 goto unlock; 512 } 513 bcl->stats.recv_fragments++; 514 if (buf) { 515 bcl->stats.recv_fragmented++; 516 msg = buf_msg(buf); 517 tipc_bclink_unlock(net); 518 goto receive; 519 } 520 tipc_bclink_unlock(net); 521 tipc_node_unlock(node); 522 } else { 523 tipc_bclink_lock(net); 524 bclink_accept_pkt(node, seqno); 525 tipc_bclink_unlock(net); 526 tipc_node_unlock(node); 527 kfree_skb(buf); 528 } 529 buf = NULL; 530 531 /* Determine new synchronization state */ 532 tipc_node_lock(node); 533 if (unlikely(!tipc_node_is_up(node))) 534 goto unlock; 535 536 if (node->bclink.last_in == node->bclink.last_sent) 537 goto unlock; 538 539 if (skb_queue_empty(&node->bclink.deferdq)) { 540 node->bclink.oos_state = 1; 541 goto unlock; 542 } 543 544 msg = buf_msg(skb_peek(&node->bclink.deferdq)); 545 seqno = msg_seqno(msg); 546 next_in = mod(next_in + 1); 547 if (seqno != next_in) 548 goto unlock; 549 550 /* Take in-sequence message from deferred queue & deliver it */ 551 buf = __skb_dequeue(&node->bclink.deferdq); 552 goto receive; 553 } 554 555 /* Handle out-of-sequence broadcast message */ 556 if (less(next_in, seqno)) { 557 deferred = tipc_link_defer_pkt(&node->bclink.deferdq, 558 buf); 559 bclink_update_last_sent(node, seqno); 560 buf = NULL; 561 } 562 563 tipc_bclink_lock(net); 564 565 if (deferred) 566 bcl->stats.deferred_recv++; 567 else 568 bcl->stats.duplicates++; 569 570 tipc_bclink_unlock(net); 571 572 unlock: 573 tipc_node_unlock(node); 574 tipc_node_put(node); 575 exit: 576 kfree_skb(buf); 577 } 578 579 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 580 { 581 return (n_ptr->bclink.recv_permitted && 582 (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked)); 583 } 584 585 586 /** 587 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer 588 * 589 * Send packet over as many bearers as necessary to reach all nodes 590 * that have joined the broadcast link. 591 * 592 * Returns 0 (packet sent successfully) under all circumstances, 593 * since the broadcast link's pseudo-bearer never blocks 594 */ 595 static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf, 596 struct tipc_bearer *unused1, 597 struct tipc_media_addr *unused2) 598 { 599 int bp_index; 600 struct tipc_msg *msg = buf_msg(buf); 601 struct tipc_net *tn = net_generic(net, tipc_net_id); 602 struct tipc_bcbearer *bcbearer = tn->bcbearer; 603 struct tipc_bclink *bclink = tn->bclink; 604 605 /* Prepare broadcast link message for reliable transmission, 606 * if first time trying to send it; 607 * preparation is skipped for broadcast link protocol messages 608 * since they are sent in an unreliable manner and don't need it 609 */ 610 if (likely(!msg_non_seq(buf_msg(buf)))) { 611 bcbuf_set_acks(buf, bclink->bcast_nodes.count); 612 msg_set_non_seq(msg, 1); 613 msg_set_mc_netid(msg, tn->net_id); 614 tn->bcl->stats.sent_info++; 615 if (WARN_ON(!bclink->bcast_nodes.count)) { 616 dump_stack(); 617 return 0; 618 } 619 } 620 621 /* Send buffer over bearers until all targets reached */ 622 bcbearer->remains = bclink->bcast_nodes; 623 624 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 625 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary; 626 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary; 627 struct tipc_bearer *bp[2] = {p, s}; 628 struct tipc_bearer *b = bp[msg_link_selector(msg)]; 629 struct sk_buff *tbuf; 630 631 if (!p) 632 break; /* No more bearers to try */ 633 if (!b) 634 b = p; 635 tipc_nmap_diff(&bcbearer->remains, &b->nodes, 636 &bcbearer->remains_new); 637 if (bcbearer->remains_new.count == bcbearer->remains.count) 638 continue; /* Nothing added by bearer pair */ 639 640 if (bp_index == 0) { 641 /* Use original buffer for first bearer */ 642 tipc_bearer_send(net, b->identity, buf, &b->bcast_addr); 643 } else { 644 /* Avoid concurrent buffer access */ 645 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC); 646 if (!tbuf) 647 break; 648 tipc_bearer_send(net, b->identity, tbuf, 649 &b->bcast_addr); 650 kfree_skb(tbuf); /* Bearer keeps a clone */ 651 } 652 if (bcbearer->remains_new.count == 0) 653 break; /* All targets reached */ 654 655 bcbearer->remains = bcbearer->remains_new; 656 } 657 658 return 0; 659 } 660 661 /** 662 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer 663 */ 664 void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr, 665 u32 node, bool action) 666 { 667 struct tipc_net *tn = net_generic(net, tipc_net_id); 668 struct tipc_bcbearer *bcbearer = tn->bcbearer; 669 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 670 struct tipc_bcbearer_pair *bp_curr; 671 struct tipc_bearer *b; 672 int b_index; 673 int pri; 674 675 tipc_bclink_lock(net); 676 677 if (action) 678 tipc_nmap_add(nm_ptr, node); 679 else 680 tipc_nmap_remove(nm_ptr, node); 681 682 /* Group bearers by priority (can assume max of two per priority) */ 683 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); 684 685 rcu_read_lock(); 686 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 687 b = rcu_dereference_rtnl(tn->bearer_list[b_index]); 688 if (!b || !b->nodes.count) 689 continue; 690 691 if (!bp_temp[b->priority].primary) 692 bp_temp[b->priority].primary = b; 693 else 694 bp_temp[b->priority].secondary = b; 695 } 696 rcu_read_unlock(); 697 698 /* Create array of bearer pairs for broadcasting */ 699 bp_curr = bcbearer->bpairs; 700 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs)); 701 702 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) { 703 704 if (!bp_temp[pri].primary) 705 continue; 706 707 bp_curr->primary = bp_temp[pri].primary; 708 709 if (bp_temp[pri].secondary) { 710 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes, 711 &bp_temp[pri].secondary->nodes)) { 712 bp_curr->secondary = bp_temp[pri].secondary; 713 } else { 714 bp_curr++; 715 bp_curr->primary = bp_temp[pri].secondary; 716 } 717 } 718 719 bp_curr++; 720 } 721 722 tipc_bclink_unlock(net); 723 } 724 725 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, 726 struct tipc_stats *stats) 727 { 728 int i; 729 struct nlattr *nest; 730 731 struct nla_map { 732 __u32 key; 733 __u32 val; 734 }; 735 736 struct nla_map map[] = { 737 {TIPC_NLA_STATS_RX_INFO, stats->recv_info}, 738 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, 739 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, 740 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, 741 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, 742 {TIPC_NLA_STATS_TX_INFO, stats->sent_info}, 743 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, 744 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, 745 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, 746 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled}, 747 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks}, 748 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv}, 749 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks}, 750 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks}, 751 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted}, 752 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates}, 753 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs}, 754 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz}, 755 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ? 756 (stats->accu_queue_sz / stats->queue_sz_counts) : 0} 757 }; 758 759 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS); 760 if (!nest) 761 return -EMSGSIZE; 762 763 for (i = 0; i < ARRAY_SIZE(map); i++) 764 if (nla_put_u32(skb, map[i].key, map[i].val)) 765 goto msg_full; 766 767 nla_nest_end(skb, nest); 768 769 return 0; 770 msg_full: 771 nla_nest_cancel(skb, nest); 772 773 return -EMSGSIZE; 774 } 775 776 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) 777 { 778 int err; 779 void *hdr; 780 struct nlattr *attrs; 781 struct nlattr *prop; 782 struct tipc_net *tn = net_generic(net, tipc_net_id); 783 struct tipc_link *bcl = tn->bcl; 784 785 if (!bcl) 786 return 0; 787 788 tipc_bclink_lock(net); 789 790 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 791 NLM_F_MULTI, TIPC_NL_LINK_GET); 792 if (!hdr) 793 return -EMSGSIZE; 794 795 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 796 if (!attrs) 797 goto msg_full; 798 799 /* The broadcast link is always up */ 800 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 801 goto attr_msg_full; 802 803 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST)) 804 goto attr_msg_full; 805 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) 806 goto attr_msg_full; 807 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt)) 808 goto attr_msg_full; 809 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt)) 810 goto attr_msg_full; 811 812 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 813 if (!prop) 814 goto attr_msg_full; 815 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window)) 816 goto prop_msg_full; 817 nla_nest_end(msg->skb, prop); 818 819 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats); 820 if (err) 821 goto attr_msg_full; 822 823 tipc_bclink_unlock(net); 824 nla_nest_end(msg->skb, attrs); 825 genlmsg_end(msg->skb, hdr); 826 827 return 0; 828 829 prop_msg_full: 830 nla_nest_cancel(msg->skb, prop); 831 attr_msg_full: 832 nla_nest_cancel(msg->skb, attrs); 833 msg_full: 834 tipc_bclink_unlock(net); 835 genlmsg_cancel(msg->skb, hdr); 836 837 return -EMSGSIZE; 838 } 839 840 int tipc_bclink_reset_stats(struct net *net) 841 { 842 struct tipc_net *tn = net_generic(net, tipc_net_id); 843 struct tipc_link *bcl = tn->bcl; 844 845 if (!bcl) 846 return -ENOPROTOOPT; 847 848 tipc_bclink_lock(net); 849 memset(&bcl->stats, 0, sizeof(bcl->stats)); 850 tipc_bclink_unlock(net); 851 return 0; 852 } 853 854 int tipc_bclink_set_queue_limits(struct net *net, u32 limit) 855 { 856 struct tipc_net *tn = net_generic(net, tipc_net_id); 857 struct tipc_link *bcl = tn->bcl; 858 859 if (!bcl) 860 return -ENOPROTOOPT; 861 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) 862 return -EINVAL; 863 864 tipc_bclink_lock(net); 865 tipc_link_set_queue_limits(bcl, limit); 866 tipc_bclink_unlock(net); 867 return 0; 868 } 869 870 int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]) 871 { 872 int err; 873 u32 win; 874 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 875 876 if (!attrs[TIPC_NLA_LINK_PROP]) 877 return -EINVAL; 878 879 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props); 880 if (err) 881 return err; 882 883 if (!props[TIPC_NLA_PROP_WIN]) 884 return -EOPNOTSUPP; 885 886 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 887 888 return tipc_bclink_set_queue_limits(net, win); 889 } 890 891 int tipc_bclink_init(struct net *net) 892 { 893 struct tipc_net *tn = net_generic(net, tipc_net_id); 894 struct tipc_bcbearer *bcbearer; 895 struct tipc_bclink *bclink; 896 struct tipc_link *bcl; 897 898 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); 899 if (!bcbearer) 900 return -ENOMEM; 901 902 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC); 903 if (!bclink) { 904 kfree(bcbearer); 905 return -ENOMEM; 906 } 907 908 bcl = &bclink->link; 909 bcbearer->bearer.media = &bcbearer->media; 910 bcbearer->media.send_msg = tipc_bcbearer_send; 911 sprintf(bcbearer->media.name, "tipc-broadcast"); 912 913 spin_lock_init(&bclink->lock); 914 __skb_queue_head_init(&bcl->transmq); 915 __skb_queue_head_init(&bcl->backlogq); 916 __skb_queue_head_init(&bcl->deferdq); 917 skb_queue_head_init(&bcl->wakeupq); 918 bcl->snd_nxt = 1; 919 spin_lock_init(&bclink->node.lock); 920 __skb_queue_head_init(&bclink->arrvq); 921 skb_queue_head_init(&bclink->inputq); 922 bcl->owner = &bclink->node; 923 bcl->owner->net = net; 924 bcl->mtu = MAX_PKT_DEFAULT_MCAST; 925 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 926 bcl->bearer_id = MAX_BEARERS; 927 rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer); 928 bcl->state = WORKING_WORKING; 929 bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg; 930 msg_set_prevnode(bcl->pmsg, tn->own_addr); 931 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); 932 tn->bcbearer = bcbearer; 933 tn->bclink = bclink; 934 tn->bcl = bcl; 935 return 0; 936 } 937 938 void tipc_bclink_stop(struct net *net) 939 { 940 struct tipc_net *tn = net_generic(net, tipc_net_id); 941 942 tipc_bclink_lock(net); 943 tipc_link_purge_queues(tn->bcl); 944 tipc_bclink_unlock(net); 945 946 RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL); 947 synchronize_net(); 948 kfree(tn->bcbearer); 949 kfree(tn->bclink); 950 } 951 952 /** 953 * tipc_nmap_add - add a node to a node map 954 */ 955 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) 956 { 957 int n = tipc_node(node); 958 int w = n / WSIZE; 959 u32 mask = (1 << (n % WSIZE)); 960 961 if ((nm_ptr->map[w] & mask) == 0) { 962 nm_ptr->count++; 963 nm_ptr->map[w] |= mask; 964 } 965 } 966 967 /** 968 * tipc_nmap_remove - remove a node from a node map 969 */ 970 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) 971 { 972 int n = tipc_node(node); 973 int w = n / WSIZE; 974 u32 mask = (1 << (n % WSIZE)); 975 976 if ((nm_ptr->map[w] & mask) != 0) { 977 nm_ptr->map[w] &= ~mask; 978 nm_ptr->count--; 979 } 980 } 981 982 /** 983 * tipc_nmap_diff - find differences between node maps 984 * @nm_a: input node map A 985 * @nm_b: input node map B 986 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) 987 */ 988 static void tipc_nmap_diff(struct tipc_node_map *nm_a, 989 struct tipc_node_map *nm_b, 990 struct tipc_node_map *nm_diff) 991 { 992 int stop = ARRAY_SIZE(nm_a->map); 993 int w; 994 int b; 995 u32 map; 996 997 memset(nm_diff, 0, sizeof(*nm_diff)); 998 for (w = 0; w < stop; w++) { 999 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]); 1000 nm_diff->map[w] = map; 1001 if (map != 0) { 1002 for (b = 0 ; b < WSIZE; b++) { 1003 if (map & (1 << b)) 1004 nm_diff->count++; 1005 } 1006 } 1007 } 1008 } 1009