1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "node.h" 40 #include "name_distr.h" 41 #include "socket.h" 42 #include "bcast.h" 43 #include "monitor.h" 44 #include "discover.h" 45 #include "netlink.h" 46 #include "trace.h" 47 48 #define INVALID_NODE_SIG 0x10000 49 #define NODE_CLEANUP_AFTER 300000 50 51 /* Flags used to take different actions according to flag type 52 * TIPC_NOTIFY_NODE_DOWN: notify node is down 53 * TIPC_NOTIFY_NODE_UP: notify node is up 54 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type 55 */ 56 enum { 57 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 58 TIPC_NOTIFY_NODE_UP = (1 << 4), 59 TIPC_NOTIFY_LINK_UP = (1 << 6), 60 TIPC_NOTIFY_LINK_DOWN = (1 << 7) 61 }; 62 63 struct tipc_link_entry { 64 struct tipc_link *link; 65 spinlock_t lock; /* per link */ 66 u32 mtu; 67 struct sk_buff_head inputq; 68 struct tipc_media_addr maddr; 69 }; 70 71 struct tipc_bclink_entry { 72 struct tipc_link *link; 73 struct sk_buff_head inputq1; 74 struct sk_buff_head arrvq; 75 struct sk_buff_head inputq2; 76 struct sk_buff_head namedq; 77 }; 78 79 /** 80 * struct tipc_node - TIPC node structure 81 * @addr: network address of node 82 * @ref: reference counter to node object 83 * @lock: rwlock governing access to structure 84 * @net: the applicable net namespace 85 * @hash: links to adjacent nodes in unsorted hash chain 86 * @inputq: pointer to input queue containing messages for msg event 87 * @namedq: pointer to name table input queue with name table messages 88 * @active_links: bearer ids of active links, used as index into links[] array 89 * @links: array containing references to all links to node 90 * @action_flags: bit mask of different types of node actions 91 * @state: connectivity state vs peer node 92 * @sync_point: sequence number where synch/failover is finished 93 * @list: links to adjacent nodes in sorted list of cluster's nodes 94 * @working_links: number of working links to node (both active and standby) 95 * @link_cnt: number of links to node 96 * @capabilities: bitmap, indicating peer node's functional capabilities 97 * @signature: node instance identifier 98 * @link_id: local and remote bearer ids of changing link, if any 99 * @publ_list: list of publications 100 * @rcu: rcu struct for tipc_node 101 * @delete_at: indicates the time for deleting a down node 102 */ 103 struct tipc_node { 104 u32 addr; 105 struct kref kref; 106 rwlock_t lock; 107 struct net *net; 108 struct hlist_node hash; 109 int active_links[2]; 110 struct tipc_link_entry links[MAX_BEARERS]; 111 struct tipc_bclink_entry bc_entry; 112 int action_flags; 113 struct list_head list; 114 int state; 115 bool failover_sent; 116 u16 sync_point; 117 int link_cnt; 118 u16 working_links; 119 u16 capabilities; 120 u32 signature; 121 u32 link_id; 122 u8 peer_id[16]; 123 struct list_head publ_list; 124 struct list_head conn_sks; 125 unsigned long keepalive_intv; 126 struct timer_list timer; 127 struct rcu_head rcu; 128 unsigned long delete_at; 129 struct net *peer_net; 130 u32 peer_hash_mix; 131 }; 132 133 /* Node FSM states and events: 134 */ 135 enum { 136 SELF_DOWN_PEER_DOWN = 0xdd, 137 SELF_UP_PEER_UP = 0xaa, 138 SELF_DOWN_PEER_LEAVING = 0xd1, 139 SELF_UP_PEER_COMING = 0xac, 140 SELF_COMING_PEER_UP = 0xca, 141 SELF_LEAVING_PEER_DOWN = 0x1d, 142 NODE_FAILINGOVER = 0xf0, 143 NODE_SYNCHING = 0xcc 144 }; 145 146 enum { 147 SELF_ESTABL_CONTACT_EVT = 0xece, 148 SELF_LOST_CONTACT_EVT = 0x1ce, 149 PEER_ESTABL_CONTACT_EVT = 0x9ece, 150 PEER_LOST_CONTACT_EVT = 0x91ce, 151 NODE_FAILOVER_BEGIN_EVT = 0xfbe, 152 NODE_FAILOVER_END_EVT = 0xfee, 153 NODE_SYNCH_BEGIN_EVT = 0xcbe, 154 NODE_SYNCH_END_EVT = 0xcee 155 }; 156 157 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 158 struct sk_buff_head *xmitq, 159 struct tipc_media_addr **maddr); 160 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, 161 bool delete); 162 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq); 163 static void tipc_node_delete(struct tipc_node *node); 164 static void tipc_node_timeout(struct timer_list *t); 165 static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 166 static struct tipc_node *tipc_node_find(struct net *net, u32 addr); 167 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); 168 static void tipc_node_put(struct tipc_node *node); 169 static bool node_is_up(struct tipc_node *n); 170 static void tipc_node_delete_from_list(struct tipc_node *node); 171 172 struct tipc_sock_conn { 173 u32 port; 174 u32 peer_port; 175 u32 peer_node; 176 struct list_head list; 177 }; 178 179 static struct tipc_link *node_active_link(struct tipc_node *n, int sel) 180 { 181 int bearer_id = n->active_links[sel & 1]; 182 183 if (unlikely(bearer_id == INVALID_BEARER_ID)) 184 return NULL; 185 186 return n->links[bearer_id].link; 187 } 188 189 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected) 190 { 191 struct tipc_node *n; 192 int bearer_id; 193 unsigned int mtu = MAX_MSG_SIZE; 194 195 n = tipc_node_find(net, addr); 196 if (unlikely(!n)) 197 return mtu; 198 199 /* Allow MAX_MSG_SIZE when building connection oriented message 200 * if they are in the same core network 201 */ 202 if (n->peer_net && connected) { 203 tipc_node_put(n); 204 return mtu; 205 } 206 207 bearer_id = n->active_links[sel & 1]; 208 if (likely(bearer_id != INVALID_BEARER_ID)) 209 mtu = n->links[bearer_id].mtu; 210 tipc_node_put(n); 211 return mtu; 212 } 213 214 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id) 215 { 216 u8 *own_id = tipc_own_id(net); 217 struct tipc_node *n; 218 219 if (!own_id) 220 return true; 221 222 if (addr == tipc_own_addr(net)) { 223 memcpy(id, own_id, TIPC_NODEID_LEN); 224 return true; 225 } 226 n = tipc_node_find(net, addr); 227 if (!n) 228 return false; 229 230 memcpy(id, &n->peer_id, TIPC_NODEID_LEN); 231 tipc_node_put(n); 232 return true; 233 } 234 235 u16 tipc_node_get_capabilities(struct net *net, u32 addr) 236 { 237 struct tipc_node *n; 238 u16 caps; 239 240 n = tipc_node_find(net, addr); 241 if (unlikely(!n)) 242 return TIPC_NODE_CAPABILITIES; 243 caps = n->capabilities; 244 tipc_node_put(n); 245 return caps; 246 } 247 248 static void tipc_node_kref_release(struct kref *kref) 249 { 250 struct tipc_node *n = container_of(kref, struct tipc_node, kref); 251 252 kfree(n->bc_entry.link); 253 kfree_rcu(n, rcu); 254 } 255 256 static void tipc_node_put(struct tipc_node *node) 257 { 258 kref_put(&node->kref, tipc_node_kref_release); 259 } 260 261 static void tipc_node_get(struct tipc_node *node) 262 { 263 kref_get(&node->kref); 264 } 265 266 /* 267 * tipc_node_find - locate specified node object, if it exists 268 */ 269 static struct tipc_node *tipc_node_find(struct net *net, u32 addr) 270 { 271 struct tipc_net *tn = tipc_net(net); 272 struct tipc_node *node; 273 unsigned int thash = tipc_hashfn(addr); 274 275 rcu_read_lock(); 276 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) { 277 if (node->addr != addr) 278 continue; 279 if (!kref_get_unless_zero(&node->kref)) 280 node = NULL; 281 break; 282 } 283 rcu_read_unlock(); 284 return node; 285 } 286 287 /* tipc_node_find_by_id - locate specified node object by its 128-bit id 288 * Note: this function is called only when a discovery request failed 289 * to find the node by its 32-bit id, and is not time critical 290 */ 291 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id) 292 { 293 struct tipc_net *tn = tipc_net(net); 294 struct tipc_node *n; 295 bool found = false; 296 297 rcu_read_lock(); 298 list_for_each_entry_rcu(n, &tn->node_list, list) { 299 read_lock_bh(&n->lock); 300 if (!memcmp(id, n->peer_id, 16) && 301 kref_get_unless_zero(&n->kref)) 302 found = true; 303 read_unlock_bh(&n->lock); 304 if (found) 305 break; 306 } 307 rcu_read_unlock(); 308 return found ? n : NULL; 309 } 310 311 static void tipc_node_read_lock(struct tipc_node *n) 312 { 313 read_lock_bh(&n->lock); 314 } 315 316 static void tipc_node_read_unlock(struct tipc_node *n) 317 { 318 read_unlock_bh(&n->lock); 319 } 320 321 static void tipc_node_write_lock(struct tipc_node *n) 322 { 323 write_lock_bh(&n->lock); 324 } 325 326 static void tipc_node_write_unlock_fast(struct tipc_node *n) 327 { 328 write_unlock_bh(&n->lock); 329 } 330 331 static void tipc_node_write_unlock(struct tipc_node *n) 332 { 333 struct net *net = n->net; 334 u32 addr = 0; 335 u32 flags = n->action_flags; 336 u32 link_id = 0; 337 u32 bearer_id; 338 struct list_head *publ_list; 339 340 if (likely(!flags)) { 341 write_unlock_bh(&n->lock); 342 return; 343 } 344 345 addr = n->addr; 346 link_id = n->link_id; 347 bearer_id = link_id & 0xffff; 348 publ_list = &n->publ_list; 349 350 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | 351 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP); 352 353 write_unlock_bh(&n->lock); 354 355 if (flags & TIPC_NOTIFY_NODE_DOWN) 356 tipc_publ_notify(net, publ_list, addr); 357 358 if (flags & TIPC_NOTIFY_NODE_UP) 359 tipc_named_node_up(net, addr); 360 361 if (flags & TIPC_NOTIFY_LINK_UP) { 362 tipc_mon_peer_up(net, addr, bearer_id); 363 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, 364 TIPC_NODE_SCOPE, link_id, link_id); 365 } 366 if (flags & TIPC_NOTIFY_LINK_DOWN) { 367 tipc_mon_peer_down(net, addr, bearer_id); 368 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, 369 addr, link_id); 370 } 371 } 372 373 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes) 374 { 375 int net_id = tipc_netid(n->net); 376 struct tipc_net *tn_peer; 377 struct net *tmp; 378 u32 hash_chk; 379 380 if (n->peer_net) 381 return; 382 383 for_each_net_rcu(tmp) { 384 tn_peer = tipc_net(tmp); 385 if (!tn_peer) 386 continue; 387 /* Integrity checking whether node exists in namespace or not */ 388 if (tn_peer->net_id != net_id) 389 continue; 390 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN)) 391 continue; 392 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random); 393 if (hash_mixes ^ hash_chk) 394 continue; 395 n->peer_net = tmp; 396 n->peer_hash_mix = hash_mixes; 397 break; 398 } 399 } 400 401 static struct tipc_node *tipc_node_create(struct net *net, u32 addr, 402 u8 *peer_id, u16 capabilities, 403 u32 signature, u32 hash_mixes) 404 { 405 struct tipc_net *tn = net_generic(net, tipc_net_id); 406 struct tipc_node *n, *temp_node; 407 struct tipc_link *l; 408 int bearer_id; 409 int i; 410 411 spin_lock_bh(&tn->node_list_lock); 412 n = tipc_node_find(net, addr); 413 if (n) { 414 if (n->peer_hash_mix ^ hash_mixes) 415 tipc_node_assign_peer_net(n, hash_mixes); 416 if (n->capabilities == capabilities) 417 goto exit; 418 /* Same node may come back with new capabilities */ 419 tipc_node_write_lock(n); 420 n->capabilities = capabilities; 421 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 422 l = n->links[bearer_id].link; 423 if (l) 424 tipc_link_update_caps(l, capabilities); 425 } 426 tipc_node_write_unlock_fast(n); 427 428 /* Calculate cluster capabilities */ 429 tn->capabilities = TIPC_NODE_CAPABILITIES; 430 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 431 tn->capabilities &= temp_node->capabilities; 432 } 433 434 goto exit; 435 } 436 n = kzalloc(sizeof(*n), GFP_ATOMIC); 437 if (!n) { 438 pr_warn("Node creation failed, no memory\n"); 439 goto exit; 440 } 441 n->addr = addr; 442 memcpy(&n->peer_id, peer_id, 16); 443 n->net = net; 444 n->peer_net = NULL; 445 n->peer_hash_mix = 0; 446 /* Assign kernel local namespace if exists */ 447 tipc_node_assign_peer_net(n, hash_mixes); 448 n->capabilities = capabilities; 449 kref_init(&n->kref); 450 rwlock_init(&n->lock); 451 INIT_HLIST_NODE(&n->hash); 452 INIT_LIST_HEAD(&n->list); 453 INIT_LIST_HEAD(&n->publ_list); 454 INIT_LIST_HEAD(&n->conn_sks); 455 skb_queue_head_init(&n->bc_entry.namedq); 456 skb_queue_head_init(&n->bc_entry.inputq1); 457 __skb_queue_head_init(&n->bc_entry.arrvq); 458 skb_queue_head_init(&n->bc_entry.inputq2); 459 for (i = 0; i < MAX_BEARERS; i++) 460 spin_lock_init(&n->links[i].lock); 461 n->state = SELF_DOWN_PEER_LEAVING; 462 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 463 n->signature = INVALID_NODE_SIG; 464 n->active_links[0] = INVALID_BEARER_ID; 465 n->active_links[1] = INVALID_BEARER_ID; 466 if (!tipc_link_bc_create(net, tipc_own_addr(net), 467 addr, U16_MAX, 468 tipc_link_window(tipc_bc_sndlink(net)), 469 n->capabilities, 470 &n->bc_entry.inputq1, 471 &n->bc_entry.namedq, 472 tipc_bc_sndlink(net), 473 &n->bc_entry.link)) { 474 pr_warn("Broadcast rcv link creation failed, no memory\n"); 475 if (n->peer_net) { 476 n->peer_net = NULL; 477 n->peer_hash_mix = 0; 478 } 479 kfree(n); 480 n = NULL; 481 goto exit; 482 } 483 tipc_node_get(n); 484 timer_setup(&n->timer, tipc_node_timeout, 0); 485 n->keepalive_intv = U32_MAX; 486 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]); 487 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 488 if (n->addr < temp_node->addr) 489 break; 490 } 491 list_add_tail_rcu(&n->list, &temp_node->list); 492 /* Calculate cluster capabilities */ 493 tn->capabilities = TIPC_NODE_CAPABILITIES; 494 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 495 tn->capabilities &= temp_node->capabilities; 496 } 497 trace_tipc_node_create(n, true, " "); 498 exit: 499 spin_unlock_bh(&tn->node_list_lock); 500 return n; 501 } 502 503 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 504 { 505 unsigned long tol = tipc_link_tolerance(l); 506 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 507 508 /* Link with lowest tolerance determines timer interval */ 509 if (intv < n->keepalive_intv) 510 n->keepalive_intv = intv; 511 512 /* Ensure link's abort limit corresponds to current tolerance */ 513 tipc_link_set_abort_limit(l, tol / n->keepalive_intv); 514 } 515 516 static void tipc_node_delete_from_list(struct tipc_node *node) 517 { 518 list_del_rcu(&node->list); 519 hlist_del_rcu(&node->hash); 520 tipc_node_put(node); 521 } 522 523 static void tipc_node_delete(struct tipc_node *node) 524 { 525 trace_tipc_node_delete(node, true, " "); 526 tipc_node_delete_from_list(node); 527 528 del_timer_sync(&node->timer); 529 tipc_node_put(node); 530 } 531 532 void tipc_node_stop(struct net *net) 533 { 534 struct tipc_net *tn = tipc_net(net); 535 struct tipc_node *node, *t_node; 536 537 spin_lock_bh(&tn->node_list_lock); 538 list_for_each_entry_safe(node, t_node, &tn->node_list, list) 539 tipc_node_delete(node); 540 spin_unlock_bh(&tn->node_list_lock); 541 } 542 543 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) 544 { 545 struct tipc_node *n; 546 547 if (in_own_node(net, addr)) 548 return; 549 550 n = tipc_node_find(net, addr); 551 if (!n) { 552 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr); 553 return; 554 } 555 tipc_node_write_lock(n); 556 list_add_tail(subscr, &n->publ_list); 557 tipc_node_write_unlock_fast(n); 558 tipc_node_put(n); 559 } 560 561 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) 562 { 563 struct tipc_node *n; 564 565 if (in_own_node(net, addr)) 566 return; 567 568 n = tipc_node_find(net, addr); 569 if (!n) { 570 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr); 571 return; 572 } 573 tipc_node_write_lock(n); 574 list_del_init(subscr); 575 tipc_node_write_unlock_fast(n); 576 tipc_node_put(n); 577 } 578 579 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 580 { 581 struct tipc_node *node; 582 struct tipc_sock_conn *conn; 583 int err = 0; 584 585 if (in_own_node(net, dnode)) 586 return 0; 587 588 node = tipc_node_find(net, dnode); 589 if (!node) { 590 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 591 return -EHOSTUNREACH; 592 } 593 conn = kmalloc(sizeof(*conn), GFP_ATOMIC); 594 if (!conn) { 595 err = -EHOSTUNREACH; 596 goto exit; 597 } 598 conn->peer_node = dnode; 599 conn->port = port; 600 conn->peer_port = peer_port; 601 602 tipc_node_write_lock(node); 603 list_add_tail(&conn->list, &node->conn_sks); 604 tipc_node_write_unlock(node); 605 exit: 606 tipc_node_put(node); 607 return err; 608 } 609 610 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) 611 { 612 struct tipc_node *node; 613 struct tipc_sock_conn *conn, *safe; 614 615 if (in_own_node(net, dnode)) 616 return; 617 618 node = tipc_node_find(net, dnode); 619 if (!node) 620 return; 621 622 tipc_node_write_lock(node); 623 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 624 if (port != conn->port) 625 continue; 626 list_del(&conn->list); 627 kfree(conn); 628 } 629 tipc_node_write_unlock(node); 630 tipc_node_put(node); 631 } 632 633 static void tipc_node_clear_links(struct tipc_node *node) 634 { 635 int i; 636 637 for (i = 0; i < MAX_BEARERS; i++) { 638 struct tipc_link_entry *le = &node->links[i]; 639 640 if (le->link) { 641 kfree(le->link); 642 le->link = NULL; 643 node->link_cnt--; 644 } 645 } 646 } 647 648 /* tipc_node_cleanup - delete nodes that does not 649 * have active links for NODE_CLEANUP_AFTER time 650 */ 651 static bool tipc_node_cleanup(struct tipc_node *peer) 652 { 653 struct tipc_node *temp_node; 654 struct tipc_net *tn = tipc_net(peer->net); 655 bool deleted = false; 656 657 /* If lock held by tipc_node_stop() the node will be deleted anyway */ 658 if (!spin_trylock_bh(&tn->node_list_lock)) 659 return false; 660 661 tipc_node_write_lock(peer); 662 663 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { 664 tipc_node_clear_links(peer); 665 tipc_node_delete_from_list(peer); 666 deleted = true; 667 } 668 tipc_node_write_unlock(peer); 669 670 if (!deleted) { 671 spin_unlock_bh(&tn->node_list_lock); 672 return deleted; 673 } 674 675 /* Calculate cluster capabilities */ 676 tn->capabilities = TIPC_NODE_CAPABILITIES; 677 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 678 tn->capabilities &= temp_node->capabilities; 679 } 680 681 spin_unlock_bh(&tn->node_list_lock); 682 return deleted; 683 } 684 685 /* tipc_node_timeout - handle expiration of node timer 686 */ 687 static void tipc_node_timeout(struct timer_list *t) 688 { 689 struct tipc_node *n = from_timer(n, t, timer); 690 struct tipc_link_entry *le; 691 struct sk_buff_head xmitq; 692 int remains = n->link_cnt; 693 int bearer_id; 694 int rc = 0; 695 696 trace_tipc_node_timeout(n, false, " "); 697 if (!node_is_up(n) && tipc_node_cleanup(n)) { 698 /*Removing the reference of Timer*/ 699 tipc_node_put(n); 700 return; 701 } 702 703 __skb_queue_head_init(&xmitq); 704 705 /* Initial node interval to value larger (10 seconds), then it will be 706 * recalculated with link lowest tolerance 707 */ 708 tipc_node_read_lock(n); 709 n->keepalive_intv = 10000; 710 tipc_node_read_unlock(n); 711 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { 712 tipc_node_read_lock(n); 713 le = &n->links[bearer_id]; 714 if (le->link) { 715 spin_lock_bh(&le->lock); 716 /* Link tolerance may change asynchronously: */ 717 tipc_node_calculate_timer(n, le->link); 718 rc = tipc_link_timeout(le->link, &xmitq); 719 spin_unlock_bh(&le->lock); 720 remains--; 721 } 722 tipc_node_read_unlock(n); 723 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); 724 if (rc & TIPC_LINK_DOWN_EVT) 725 tipc_node_link_down(n, bearer_id, false); 726 } 727 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv)); 728 } 729 730 /** 731 * __tipc_node_link_up - handle addition of link 732 * Node lock must be held by caller 733 * Link becomes active (alone or shared) or standby, depending on its priority. 734 */ 735 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, 736 struct sk_buff_head *xmitq) 737 { 738 int *slot0 = &n->active_links[0]; 739 int *slot1 = &n->active_links[1]; 740 struct tipc_link *ol = node_active_link(n, 0); 741 struct tipc_link *nl = n->links[bearer_id].link; 742 743 if (!nl || tipc_link_is_up(nl)) 744 return; 745 746 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); 747 if (!tipc_link_is_up(nl)) 748 return; 749 750 n->working_links++; 751 n->action_flags |= TIPC_NOTIFY_LINK_UP; 752 n->link_id = tipc_link_id(nl); 753 754 /* Leave room for tunnel header when returning 'mtu' to users: */ 755 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE; 756 757 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 758 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 759 760 pr_debug("Established link <%s> on network plane %c\n", 761 tipc_link_name(nl), tipc_link_plane(nl)); 762 trace_tipc_node_link_up(n, true, " "); 763 764 /* Ensure that a STATE message goes first */ 765 tipc_link_build_state_msg(nl, xmitq); 766 767 /* First link? => give it both slots */ 768 if (!ol) { 769 *slot0 = bearer_id; 770 *slot1 = bearer_id; 771 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 772 n->action_flags |= TIPC_NOTIFY_NODE_UP; 773 tipc_link_set_active(nl, true); 774 tipc_bcast_add_peer(n->net, nl, xmitq); 775 return; 776 } 777 778 /* Second link => redistribute slots */ 779 if (tipc_link_prio(nl) > tipc_link_prio(ol)) { 780 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol)); 781 *slot0 = bearer_id; 782 *slot1 = bearer_id; 783 tipc_link_set_active(nl, true); 784 tipc_link_set_active(ol, false); 785 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) { 786 tipc_link_set_active(nl, true); 787 *slot1 = bearer_id; 788 } else { 789 pr_debug("New link <%s> is standby\n", tipc_link_name(nl)); 790 } 791 792 /* Prepare synchronization with first link */ 793 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq); 794 } 795 796 /** 797 * tipc_node_link_up - handle addition of link 798 * 799 * Link becomes active (alone or shared) or standby, depending on its priority. 800 */ 801 static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 802 struct sk_buff_head *xmitq) 803 { 804 struct tipc_media_addr *maddr; 805 806 tipc_node_write_lock(n); 807 __tipc_node_link_up(n, bearer_id, xmitq); 808 maddr = &n->links[bearer_id].maddr; 809 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr); 810 tipc_node_write_unlock(n); 811 } 812 813 /** 814 * tipc_node_link_failover() - start failover in case "half-failover" 815 * 816 * This function is only called in a very special situation where link 817 * failover can be already started on peer node but not on this node. 818 * This can happen when e.g. 819 * 1. Both links <1A-2A>, <1B-2B> down 820 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network 821 * disturbance, wrong session, etc.) 822 * 3. Link <1B-2B> up 823 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout) 824 * 5. Node 2 starts failover onto link <1B-2B> 825 * 826 * ==> Node 1 does never start link/node failover! 827 * 828 * @n: tipc node structure 829 * @l: link peer endpoint failingover (- can be NULL) 830 * @tnl: tunnel link 831 * @xmitq: queue for messages to be xmited on tnl link later 832 */ 833 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l, 834 struct tipc_link *tnl, 835 struct sk_buff_head *xmitq) 836 { 837 /* Avoid to be "self-failover" that can never end */ 838 if (!tipc_link_is_up(tnl)) 839 return; 840 841 /* Don't rush, failure link may be in the process of resetting */ 842 if (l && !tipc_link_is_reset(l)) 843 return; 844 845 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 846 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 847 848 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 849 tipc_link_failover_prepare(l, tnl, xmitq); 850 851 if (l) 852 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 853 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 854 } 855 856 /** 857 * __tipc_node_link_down - handle loss of link 858 */ 859 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 860 struct sk_buff_head *xmitq, 861 struct tipc_media_addr **maddr) 862 { 863 struct tipc_link_entry *le = &n->links[*bearer_id]; 864 int *slot0 = &n->active_links[0]; 865 int *slot1 = &n->active_links[1]; 866 int i, highest = 0, prio; 867 struct tipc_link *l, *_l, *tnl; 868 869 l = n->links[*bearer_id].link; 870 if (!l || tipc_link_is_reset(l)) 871 return; 872 873 n->working_links--; 874 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 875 n->link_id = tipc_link_id(l); 876 877 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 878 879 pr_debug("Lost link <%s> on network plane %c\n", 880 tipc_link_name(l), tipc_link_plane(l)); 881 882 /* Select new active link if any available */ 883 *slot0 = INVALID_BEARER_ID; 884 *slot1 = INVALID_BEARER_ID; 885 for (i = 0; i < MAX_BEARERS; i++) { 886 _l = n->links[i].link; 887 if (!_l || !tipc_link_is_up(_l)) 888 continue; 889 if (_l == l) 890 continue; 891 prio = tipc_link_prio(_l); 892 if (prio < highest) 893 continue; 894 if (prio > highest) { 895 highest = prio; 896 *slot0 = i; 897 *slot1 = i; 898 continue; 899 } 900 *slot1 = i; 901 } 902 903 if (!node_is_up(n)) { 904 if (tipc_link_peer_is_down(l)) 905 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 906 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); 907 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!"); 908 tipc_link_fsm_evt(l, LINK_RESET_EVT); 909 tipc_link_reset(l); 910 tipc_link_build_reset_msg(l, xmitq); 911 *maddr = &n->links[*bearer_id].maddr; 912 node_lost_contact(n, &le->inputq); 913 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 914 return; 915 } 916 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 917 918 /* There is still a working link => initiate failover */ 919 *bearer_id = n->active_links[0]; 920 tnl = n->links[*bearer_id].link; 921 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 922 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 923 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 924 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 925 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!"); 926 tipc_link_reset(l); 927 tipc_link_fsm_evt(l, LINK_RESET_EVT); 928 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 929 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 930 *maddr = &n->links[*bearer_id].maddr; 931 } 932 933 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 934 { 935 struct tipc_link_entry *le = &n->links[bearer_id]; 936 struct tipc_media_addr *maddr = NULL; 937 struct tipc_link *l = le->link; 938 int old_bearer_id = bearer_id; 939 struct sk_buff_head xmitq; 940 941 if (!l) 942 return; 943 944 __skb_queue_head_init(&xmitq); 945 946 tipc_node_write_lock(n); 947 if (!tipc_link_is_establishing(l)) { 948 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 949 } else { 950 /* Defuse pending tipc_node_link_up() */ 951 tipc_link_reset(l); 952 tipc_link_fsm_evt(l, LINK_RESET_EVT); 953 } 954 if (delete) { 955 kfree(l); 956 le->link = NULL; 957 n->link_cnt--; 958 } 959 trace_tipc_node_link_down(n, true, "node link down or deleted!"); 960 tipc_node_write_unlock(n); 961 if (delete) 962 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); 963 if (!skb_queue_empty(&xmitq)) 964 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); 965 tipc_sk_rcv(n->net, &le->inputq); 966 } 967 968 static bool node_is_up(struct tipc_node *n) 969 { 970 return n->active_links[0] != INVALID_BEARER_ID; 971 } 972 973 bool tipc_node_is_up(struct net *net, u32 addr) 974 { 975 struct tipc_node *n; 976 bool retval = false; 977 978 if (in_own_node(net, addr)) 979 return true; 980 981 n = tipc_node_find(net, addr); 982 if (!n) 983 return false; 984 retval = node_is_up(n); 985 tipc_node_put(n); 986 return retval; 987 } 988 989 static u32 tipc_node_suggest_addr(struct net *net, u32 addr) 990 { 991 struct tipc_node *n; 992 993 addr ^= tipc_net(net)->random; 994 while ((n = tipc_node_find(net, addr))) { 995 tipc_node_put(n); 996 addr++; 997 } 998 return addr; 999 } 1000 1001 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not 1002 * Returns suggested address if any, otherwise 0 1003 */ 1004 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) 1005 { 1006 struct tipc_net *tn = tipc_net(net); 1007 struct tipc_node *n; 1008 1009 /* Suggest new address if some other peer is using this one */ 1010 n = tipc_node_find(net, addr); 1011 if (n) { 1012 if (!memcmp(n->peer_id, id, NODE_ID_LEN)) 1013 addr = 0; 1014 tipc_node_put(n); 1015 if (!addr) 1016 return 0; 1017 return tipc_node_suggest_addr(net, addr); 1018 } 1019 1020 /* Suggest previously used address if peer is known */ 1021 n = tipc_node_find_by_id(net, id); 1022 if (n) { 1023 addr = n->addr; 1024 tipc_node_put(n); 1025 return addr; 1026 } 1027 1028 /* Even this node may be in conflict */ 1029 if (tn->trial_addr == addr) 1030 return tipc_node_suggest_addr(net, addr); 1031 1032 return 0; 1033 } 1034 1035 void tipc_node_check_dest(struct net *net, u32 addr, 1036 u8 *peer_id, struct tipc_bearer *b, 1037 u16 capabilities, u32 signature, u32 hash_mixes, 1038 struct tipc_media_addr *maddr, 1039 bool *respond, bool *dupl_addr) 1040 { 1041 struct tipc_node *n; 1042 struct tipc_link *l; 1043 struct tipc_link_entry *le; 1044 bool addr_match = false; 1045 bool sign_match = false; 1046 bool link_up = false; 1047 bool accept_addr = false; 1048 bool reset = true; 1049 char *if_name; 1050 unsigned long intv; 1051 u16 session; 1052 1053 *dupl_addr = false; 1054 *respond = false; 1055 1056 n = tipc_node_create(net, addr, peer_id, capabilities, signature, 1057 hash_mixes); 1058 if (!n) 1059 return; 1060 1061 tipc_node_write_lock(n); 1062 1063 le = &n->links[b->identity]; 1064 1065 /* Prepare to validate requesting node's signature and media address */ 1066 l = le->link; 1067 link_up = l && tipc_link_is_up(l); 1068 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); 1069 sign_match = (signature == n->signature); 1070 1071 /* These three flags give us eight permutations: */ 1072 1073 if (sign_match && addr_match && link_up) { 1074 /* All is fine. Do nothing. */ 1075 reset = false; 1076 } else if (sign_match && addr_match && !link_up) { 1077 /* Respond. The link will come up in due time */ 1078 *respond = true; 1079 } else if (sign_match && !addr_match && link_up) { 1080 /* Peer has changed i/f address without rebooting. 1081 * If so, the link will reset soon, and the next 1082 * discovery will be accepted. So we can ignore it. 1083 * It may also be an cloned or malicious peer having 1084 * chosen the same node address and signature as an 1085 * existing one. 1086 * Ignore requests until the link goes down, if ever. 1087 */ 1088 *dupl_addr = true; 1089 } else if (sign_match && !addr_match && !link_up) { 1090 /* Peer link has changed i/f address without rebooting. 1091 * It may also be a cloned or malicious peer; we can't 1092 * distinguish between the two. 1093 * The signature is correct, so we must accept. 1094 */ 1095 accept_addr = true; 1096 *respond = true; 1097 } else if (!sign_match && addr_match && link_up) { 1098 /* Peer node rebooted. Two possibilities: 1099 * - Delayed re-discovery; this link endpoint has already 1100 * reset and re-established contact with the peer, before 1101 * receiving a discovery message from that node. 1102 * (The peer happened to receive one from this node first). 1103 * - The peer came back so fast that our side has not 1104 * discovered it yet. Probing from this side will soon 1105 * reset the link, since there can be no working link 1106 * endpoint at the peer end, and the link will re-establish. 1107 * Accept the signature, since it comes from a known peer. 1108 */ 1109 n->signature = signature; 1110 } else if (!sign_match && addr_match && !link_up) { 1111 /* The peer node has rebooted. 1112 * Accept signature, since it is a known peer. 1113 */ 1114 n->signature = signature; 1115 *respond = true; 1116 } else if (!sign_match && !addr_match && link_up) { 1117 /* Peer rebooted with new address, or a new/duplicate peer. 1118 * Ignore until the link goes down, if ever. 1119 */ 1120 *dupl_addr = true; 1121 } else if (!sign_match && !addr_match && !link_up) { 1122 /* Peer rebooted with new address, or it is a new peer. 1123 * Accept signature and address. 1124 */ 1125 n->signature = signature; 1126 accept_addr = true; 1127 *respond = true; 1128 } 1129 1130 if (!accept_addr) 1131 goto exit; 1132 1133 /* Now create new link if not already existing */ 1134 if (!l) { 1135 if (n->link_cnt == 2) 1136 goto exit; 1137 1138 if_name = strchr(b->name, ':') + 1; 1139 get_random_bytes(&session, sizeof(u16)); 1140 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 1141 b->net_plane, b->mtu, b->priority, 1142 b->window, session, 1143 tipc_own_addr(net), addr, peer_id, 1144 n->capabilities, 1145 tipc_bc_sndlink(n->net), n->bc_entry.link, 1146 &le->inputq, 1147 &n->bc_entry.namedq, &l)) { 1148 *respond = false; 1149 goto exit; 1150 } 1151 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!"); 1152 tipc_link_reset(l); 1153 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1154 if (n->state == NODE_FAILINGOVER) 1155 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1156 le->link = l; 1157 n->link_cnt++; 1158 tipc_node_calculate_timer(n, l); 1159 if (n->link_cnt == 1) { 1160 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 1161 if (!mod_timer(&n->timer, intv)) 1162 tipc_node_get(n); 1163 } 1164 } 1165 memcpy(&le->maddr, maddr, sizeof(*maddr)); 1166 exit: 1167 tipc_node_write_unlock(n); 1168 if (reset && l && !tipc_link_is_reset(l)) 1169 tipc_node_link_down(n, b->identity, false); 1170 tipc_node_put(n); 1171 } 1172 1173 void tipc_node_delete_links(struct net *net, int bearer_id) 1174 { 1175 struct tipc_net *tn = net_generic(net, tipc_net_id); 1176 struct tipc_node *n; 1177 1178 rcu_read_lock(); 1179 list_for_each_entry_rcu(n, &tn->node_list, list) { 1180 tipc_node_link_down(n, bearer_id, true); 1181 } 1182 rcu_read_unlock(); 1183 } 1184 1185 static void tipc_node_reset_links(struct tipc_node *n) 1186 { 1187 int i; 1188 1189 pr_warn("Resetting all links to %x\n", n->addr); 1190 1191 trace_tipc_node_reset_links(n, true, " "); 1192 for (i = 0; i < MAX_BEARERS; i++) { 1193 tipc_node_link_down(n, i, false); 1194 } 1195 } 1196 1197 /* tipc_node_fsm_evt - node finite state machine 1198 * Determines when contact is allowed with peer node 1199 */ 1200 static void tipc_node_fsm_evt(struct tipc_node *n, int evt) 1201 { 1202 int state = n->state; 1203 1204 switch (state) { 1205 case SELF_DOWN_PEER_DOWN: 1206 switch (evt) { 1207 case SELF_ESTABL_CONTACT_EVT: 1208 state = SELF_UP_PEER_COMING; 1209 break; 1210 case PEER_ESTABL_CONTACT_EVT: 1211 state = SELF_COMING_PEER_UP; 1212 break; 1213 case SELF_LOST_CONTACT_EVT: 1214 case PEER_LOST_CONTACT_EVT: 1215 break; 1216 case NODE_SYNCH_END_EVT: 1217 case NODE_SYNCH_BEGIN_EVT: 1218 case NODE_FAILOVER_BEGIN_EVT: 1219 case NODE_FAILOVER_END_EVT: 1220 default: 1221 goto illegal_evt; 1222 } 1223 break; 1224 case SELF_UP_PEER_UP: 1225 switch (evt) { 1226 case SELF_LOST_CONTACT_EVT: 1227 state = SELF_DOWN_PEER_LEAVING; 1228 break; 1229 case PEER_LOST_CONTACT_EVT: 1230 state = SELF_LEAVING_PEER_DOWN; 1231 break; 1232 case NODE_SYNCH_BEGIN_EVT: 1233 state = NODE_SYNCHING; 1234 break; 1235 case NODE_FAILOVER_BEGIN_EVT: 1236 state = NODE_FAILINGOVER; 1237 break; 1238 case SELF_ESTABL_CONTACT_EVT: 1239 case PEER_ESTABL_CONTACT_EVT: 1240 case NODE_SYNCH_END_EVT: 1241 case NODE_FAILOVER_END_EVT: 1242 break; 1243 default: 1244 goto illegal_evt; 1245 } 1246 break; 1247 case SELF_DOWN_PEER_LEAVING: 1248 switch (evt) { 1249 case PEER_LOST_CONTACT_EVT: 1250 state = SELF_DOWN_PEER_DOWN; 1251 break; 1252 case SELF_ESTABL_CONTACT_EVT: 1253 case PEER_ESTABL_CONTACT_EVT: 1254 case SELF_LOST_CONTACT_EVT: 1255 break; 1256 case NODE_SYNCH_END_EVT: 1257 case NODE_SYNCH_BEGIN_EVT: 1258 case NODE_FAILOVER_BEGIN_EVT: 1259 case NODE_FAILOVER_END_EVT: 1260 default: 1261 goto illegal_evt; 1262 } 1263 break; 1264 case SELF_UP_PEER_COMING: 1265 switch (evt) { 1266 case PEER_ESTABL_CONTACT_EVT: 1267 state = SELF_UP_PEER_UP; 1268 break; 1269 case SELF_LOST_CONTACT_EVT: 1270 state = SELF_DOWN_PEER_DOWN; 1271 break; 1272 case SELF_ESTABL_CONTACT_EVT: 1273 case PEER_LOST_CONTACT_EVT: 1274 case NODE_SYNCH_END_EVT: 1275 case NODE_FAILOVER_BEGIN_EVT: 1276 break; 1277 case NODE_SYNCH_BEGIN_EVT: 1278 case NODE_FAILOVER_END_EVT: 1279 default: 1280 goto illegal_evt; 1281 } 1282 break; 1283 case SELF_COMING_PEER_UP: 1284 switch (evt) { 1285 case SELF_ESTABL_CONTACT_EVT: 1286 state = SELF_UP_PEER_UP; 1287 break; 1288 case PEER_LOST_CONTACT_EVT: 1289 state = SELF_DOWN_PEER_DOWN; 1290 break; 1291 case SELF_LOST_CONTACT_EVT: 1292 case PEER_ESTABL_CONTACT_EVT: 1293 break; 1294 case NODE_SYNCH_END_EVT: 1295 case NODE_SYNCH_BEGIN_EVT: 1296 case NODE_FAILOVER_BEGIN_EVT: 1297 case NODE_FAILOVER_END_EVT: 1298 default: 1299 goto illegal_evt; 1300 } 1301 break; 1302 case SELF_LEAVING_PEER_DOWN: 1303 switch (evt) { 1304 case SELF_LOST_CONTACT_EVT: 1305 state = SELF_DOWN_PEER_DOWN; 1306 break; 1307 case SELF_ESTABL_CONTACT_EVT: 1308 case PEER_ESTABL_CONTACT_EVT: 1309 case PEER_LOST_CONTACT_EVT: 1310 break; 1311 case NODE_SYNCH_END_EVT: 1312 case NODE_SYNCH_BEGIN_EVT: 1313 case NODE_FAILOVER_BEGIN_EVT: 1314 case NODE_FAILOVER_END_EVT: 1315 default: 1316 goto illegal_evt; 1317 } 1318 break; 1319 case NODE_FAILINGOVER: 1320 switch (evt) { 1321 case SELF_LOST_CONTACT_EVT: 1322 state = SELF_DOWN_PEER_LEAVING; 1323 break; 1324 case PEER_LOST_CONTACT_EVT: 1325 state = SELF_LEAVING_PEER_DOWN; 1326 break; 1327 case NODE_FAILOVER_END_EVT: 1328 state = SELF_UP_PEER_UP; 1329 break; 1330 case NODE_FAILOVER_BEGIN_EVT: 1331 case SELF_ESTABL_CONTACT_EVT: 1332 case PEER_ESTABL_CONTACT_EVT: 1333 break; 1334 case NODE_SYNCH_BEGIN_EVT: 1335 case NODE_SYNCH_END_EVT: 1336 default: 1337 goto illegal_evt; 1338 } 1339 break; 1340 case NODE_SYNCHING: 1341 switch (evt) { 1342 case SELF_LOST_CONTACT_EVT: 1343 state = SELF_DOWN_PEER_LEAVING; 1344 break; 1345 case PEER_LOST_CONTACT_EVT: 1346 state = SELF_LEAVING_PEER_DOWN; 1347 break; 1348 case NODE_SYNCH_END_EVT: 1349 state = SELF_UP_PEER_UP; 1350 break; 1351 case NODE_FAILOVER_BEGIN_EVT: 1352 state = NODE_FAILINGOVER; 1353 break; 1354 case NODE_SYNCH_BEGIN_EVT: 1355 case SELF_ESTABL_CONTACT_EVT: 1356 case PEER_ESTABL_CONTACT_EVT: 1357 break; 1358 case NODE_FAILOVER_END_EVT: 1359 default: 1360 goto illegal_evt; 1361 } 1362 break; 1363 default: 1364 pr_err("Unknown node fsm state %x\n", state); 1365 break; 1366 } 1367 trace_tipc_node_fsm(n->peer_id, n->state, state, evt); 1368 n->state = state; 1369 return; 1370 1371 illegal_evt: 1372 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1373 trace_tipc_node_fsm(n->peer_id, n->state, state, evt); 1374 } 1375 1376 static void node_lost_contact(struct tipc_node *n, 1377 struct sk_buff_head *inputq) 1378 { 1379 struct tipc_sock_conn *conn, *safe; 1380 struct tipc_link *l; 1381 struct list_head *conns = &n->conn_sks; 1382 struct sk_buff *skb; 1383 uint i; 1384 1385 pr_debug("Lost contact with %x\n", n->addr); 1386 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 1387 trace_tipc_node_lost_contact(n, true, " "); 1388 1389 /* Clean up broadcast state */ 1390 tipc_bcast_remove_peer(n->net, n->bc_entry.link); 1391 1392 /* Abort any ongoing link failover */ 1393 for (i = 0; i < MAX_BEARERS; i++) { 1394 l = n->links[i].link; 1395 if (l) 1396 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); 1397 } 1398 1399 /* Notify publications from this node */ 1400 n->action_flags |= TIPC_NOTIFY_NODE_DOWN; 1401 1402 if (n->peer_net) { 1403 n->peer_net = NULL; 1404 n->peer_hash_mix = 0; 1405 } 1406 /* Notify sockets connected to node */ 1407 list_for_each_entry_safe(conn, safe, conns, list) { 1408 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 1409 SHORT_H_SIZE, 0, tipc_own_addr(n->net), 1410 conn->peer_node, conn->port, 1411 conn->peer_port, TIPC_ERR_NO_NODE); 1412 if (likely(skb)) 1413 skb_queue_tail(inputq, skb); 1414 list_del(&conn->list); 1415 kfree(conn); 1416 } 1417 } 1418 1419 /** 1420 * tipc_node_get_linkname - get the name of a link 1421 * 1422 * @bearer_id: id of the bearer 1423 * @node: peer node address 1424 * @linkname: link name output buffer 1425 * 1426 * Returns 0 on success 1427 */ 1428 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, 1429 char *linkname, size_t len) 1430 { 1431 struct tipc_link *link; 1432 int err = -EINVAL; 1433 struct tipc_node *node = tipc_node_find(net, addr); 1434 1435 if (!node) 1436 return err; 1437 1438 if (bearer_id >= MAX_BEARERS) 1439 goto exit; 1440 1441 tipc_node_read_lock(node); 1442 link = node->links[bearer_id].link; 1443 if (link) { 1444 strncpy(linkname, tipc_link_name(link), len); 1445 err = 0; 1446 } 1447 tipc_node_read_unlock(node); 1448 exit: 1449 tipc_node_put(node); 1450 return err; 1451 } 1452 1453 /* Caller should hold node lock for the passed node */ 1454 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1455 { 1456 void *hdr; 1457 struct nlattr *attrs; 1458 1459 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1460 NLM_F_MULTI, TIPC_NL_NODE_GET); 1461 if (!hdr) 1462 return -EMSGSIZE; 1463 1464 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE); 1465 if (!attrs) 1466 goto msg_full; 1467 1468 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) 1469 goto attr_msg_full; 1470 if (node_is_up(node)) 1471 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) 1472 goto attr_msg_full; 1473 1474 nla_nest_end(msg->skb, attrs); 1475 genlmsg_end(msg->skb, hdr); 1476 1477 return 0; 1478 1479 attr_msg_full: 1480 nla_nest_cancel(msg->skb, attrs); 1481 msg_full: 1482 genlmsg_cancel(msg->skb, hdr); 1483 1484 return -EMSGSIZE; 1485 } 1486 1487 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list) 1488 { 1489 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 1490 struct sk_buff_head inputq; 1491 1492 switch (msg_user(hdr)) { 1493 case TIPC_LOW_IMPORTANCE: 1494 case TIPC_MEDIUM_IMPORTANCE: 1495 case TIPC_HIGH_IMPORTANCE: 1496 case TIPC_CRITICAL_IMPORTANCE: 1497 if (msg_connected(hdr) || msg_named(hdr)) { 1498 tipc_loopback_trace(peer_net, list); 1499 spin_lock_init(&list->lock); 1500 tipc_sk_rcv(peer_net, list); 1501 return; 1502 } 1503 if (msg_mcast(hdr)) { 1504 tipc_loopback_trace(peer_net, list); 1505 skb_queue_head_init(&inputq); 1506 tipc_sk_mcast_rcv(peer_net, list, &inputq); 1507 __skb_queue_purge(list); 1508 skb_queue_purge(&inputq); 1509 return; 1510 } 1511 return; 1512 case MSG_FRAGMENTER: 1513 if (tipc_msg_assemble(list)) { 1514 tipc_loopback_trace(peer_net, list); 1515 skb_queue_head_init(&inputq); 1516 tipc_sk_mcast_rcv(peer_net, list, &inputq); 1517 __skb_queue_purge(list); 1518 skb_queue_purge(&inputq); 1519 } 1520 return; 1521 case GROUP_PROTOCOL: 1522 case CONN_MANAGER: 1523 tipc_loopback_trace(peer_net, list); 1524 spin_lock_init(&list->lock); 1525 tipc_sk_rcv(peer_net, list); 1526 return; 1527 case LINK_PROTOCOL: 1528 case NAME_DISTRIBUTOR: 1529 case TUNNEL_PROTOCOL: 1530 case BCAST_PROTOCOL: 1531 return; 1532 default: 1533 return; 1534 }; 1535 } 1536 1537 /** 1538 * tipc_node_xmit() is the general link level function for message sending 1539 * @net: the applicable net namespace 1540 * @list: chain of buffers containing message 1541 * @dnode: address of destination node 1542 * @selector: a number used for deterministic link selection 1543 * Consumes the buffer chain. 1544 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF 1545 */ 1546 int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1547 u32 dnode, int selector) 1548 { 1549 struct tipc_link_entry *le = NULL; 1550 struct tipc_node *n; 1551 struct sk_buff_head xmitq; 1552 bool node_up = false; 1553 int bearer_id; 1554 int rc; 1555 1556 if (in_own_node(net, dnode)) { 1557 tipc_loopback_trace(net, list); 1558 spin_lock_init(&list->lock); 1559 tipc_sk_rcv(net, list); 1560 return 0; 1561 } 1562 1563 n = tipc_node_find(net, dnode); 1564 if (unlikely(!n)) { 1565 __skb_queue_purge(list); 1566 return -EHOSTUNREACH; 1567 } 1568 1569 tipc_node_read_lock(n); 1570 node_up = node_is_up(n); 1571 if (node_up && n->peer_net && check_net(n->peer_net)) { 1572 /* xmit inner linux container */ 1573 tipc_lxc_xmit(n->peer_net, list); 1574 if (likely(skb_queue_empty(list))) { 1575 tipc_node_read_unlock(n); 1576 tipc_node_put(n); 1577 return 0; 1578 } 1579 } 1580 1581 bearer_id = n->active_links[selector & 1]; 1582 if (unlikely(bearer_id == INVALID_BEARER_ID)) { 1583 tipc_node_read_unlock(n); 1584 tipc_node_put(n); 1585 __skb_queue_purge(list); 1586 return -EHOSTUNREACH; 1587 } 1588 1589 __skb_queue_head_init(&xmitq); 1590 le = &n->links[bearer_id]; 1591 spin_lock_bh(&le->lock); 1592 rc = tipc_link_xmit(le->link, list, &xmitq); 1593 spin_unlock_bh(&le->lock); 1594 tipc_node_read_unlock(n); 1595 1596 if (unlikely(rc == -ENOBUFS)) 1597 tipc_node_link_down(n, bearer_id, false); 1598 else 1599 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1600 1601 tipc_node_put(n); 1602 1603 return rc; 1604 } 1605 1606 /* tipc_node_xmit_skb(): send single buffer to destination 1607 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE 1608 * messages, which will not be rejected 1609 * The only exception is datagram messages rerouted after secondary 1610 * lookup, which are rare and safe to dispose of anyway. 1611 */ 1612 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 1613 u32 selector) 1614 { 1615 struct sk_buff_head head; 1616 1617 __skb_queue_head_init(&head); 1618 __skb_queue_tail(&head, skb); 1619 tipc_node_xmit(net, &head, dnode, selector); 1620 return 0; 1621 } 1622 1623 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations 1624 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected 1625 */ 1626 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) 1627 { 1628 struct sk_buff *skb; 1629 u32 selector, dnode; 1630 1631 while ((skb = __skb_dequeue(xmitq))) { 1632 selector = msg_origport(buf_msg(skb)); 1633 dnode = msg_destnode(buf_msg(skb)); 1634 tipc_node_xmit_skb(net, skb, dnode, selector); 1635 } 1636 return 0; 1637 } 1638 1639 void tipc_node_broadcast(struct net *net, struct sk_buff *skb) 1640 { 1641 struct sk_buff *txskb; 1642 struct tipc_node *n; 1643 u32 dst; 1644 1645 rcu_read_lock(); 1646 list_for_each_entry_rcu(n, tipc_nodes(net), list) { 1647 dst = n->addr; 1648 if (in_own_node(net, dst)) 1649 continue; 1650 if (!node_is_up(n)) 1651 continue; 1652 txskb = pskb_copy(skb, GFP_ATOMIC); 1653 if (!txskb) 1654 break; 1655 msg_set_destnode(buf_msg(txskb), dst); 1656 tipc_node_xmit_skb(net, txskb, dst, 0); 1657 } 1658 rcu_read_unlock(); 1659 1660 kfree_skb(skb); 1661 } 1662 1663 static void tipc_node_mcast_rcv(struct tipc_node *n) 1664 { 1665 struct tipc_bclink_entry *be = &n->bc_entry; 1666 1667 /* 'arrvq' is under inputq2's lock protection */ 1668 spin_lock_bh(&be->inputq2.lock); 1669 spin_lock_bh(&be->inputq1.lock); 1670 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); 1671 spin_unlock_bh(&be->inputq1.lock); 1672 spin_unlock_bh(&be->inputq2.lock); 1673 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2); 1674 } 1675 1676 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, 1677 int bearer_id, struct sk_buff_head *xmitq) 1678 { 1679 struct tipc_link *ucl; 1680 int rc; 1681 1682 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr); 1683 1684 if (rc & TIPC_LINK_DOWN_EVT) { 1685 tipc_node_reset_links(n); 1686 return; 1687 } 1688 1689 if (!(rc & TIPC_LINK_SND_STATE)) 1690 return; 1691 1692 /* If probe message, a STATE response will be sent anyway */ 1693 if (msg_probe(hdr)) 1694 return; 1695 1696 /* Produce a STATE message carrying broadcast NACK */ 1697 tipc_node_read_lock(n); 1698 ucl = n->links[bearer_id].link; 1699 if (ucl) 1700 tipc_link_build_state_msg(ucl, xmitq); 1701 tipc_node_read_unlock(n); 1702 } 1703 1704 /** 1705 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1706 * @net: the applicable net namespace 1707 * @skb: TIPC packet 1708 * @bearer_id: id of bearer message arrived on 1709 * 1710 * Invoked with no locks held. 1711 */ 1712 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id) 1713 { 1714 int rc; 1715 struct sk_buff_head xmitq; 1716 struct tipc_bclink_entry *be; 1717 struct tipc_link_entry *le; 1718 struct tipc_msg *hdr = buf_msg(skb); 1719 int usr = msg_user(hdr); 1720 u32 dnode = msg_destnode(hdr); 1721 struct tipc_node *n; 1722 1723 __skb_queue_head_init(&xmitq); 1724 1725 /* If NACK for other node, let rcv link for that node peek into it */ 1726 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net))) 1727 n = tipc_node_find(net, dnode); 1728 else 1729 n = tipc_node_find(net, msg_prevnode(hdr)); 1730 if (!n) { 1731 kfree_skb(skb); 1732 return; 1733 } 1734 be = &n->bc_entry; 1735 le = &n->links[bearer_id]; 1736 1737 rc = tipc_bcast_rcv(net, be->link, skb); 1738 1739 /* Broadcast ACKs are sent on a unicast link */ 1740 if (rc & TIPC_LINK_SND_STATE) { 1741 tipc_node_read_lock(n); 1742 tipc_link_build_state_msg(le->link, &xmitq); 1743 tipc_node_read_unlock(n); 1744 } 1745 1746 if (!skb_queue_empty(&xmitq)) 1747 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1748 1749 if (!skb_queue_empty(&be->inputq1)) 1750 tipc_node_mcast_rcv(n); 1751 1752 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ 1753 if (!skb_queue_empty(&n->bc_entry.namedq)) 1754 tipc_named_rcv(net, &n->bc_entry.namedq); 1755 1756 /* If reassembly or retransmission failure => reset all links to peer */ 1757 if (rc & TIPC_LINK_DOWN_EVT) 1758 tipc_node_reset_links(n); 1759 1760 tipc_node_put(n); 1761 } 1762 1763 /** 1764 * tipc_node_check_state - check and if necessary update node state 1765 * @skb: TIPC packet 1766 * @bearer_id: identity of bearer delivering the packet 1767 * Returns true if state and msg are ok, otherwise false 1768 */ 1769 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, 1770 int bearer_id, struct sk_buff_head *xmitq) 1771 { 1772 struct tipc_msg *hdr = buf_msg(skb); 1773 int usr = msg_user(hdr); 1774 int mtyp = msg_type(hdr); 1775 u16 oseqno = msg_seqno(hdr); 1776 u16 exp_pkts = msg_msgcnt(hdr); 1777 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len; 1778 int state = n->state; 1779 struct tipc_link *l, *tnl, *pl = NULL; 1780 struct tipc_media_addr *maddr; 1781 int pb_id; 1782 1783 if (trace_tipc_node_check_state_enabled()) { 1784 trace_tipc_skb_dump(skb, false, "skb for node state check"); 1785 trace_tipc_node_check_state(n, true, " "); 1786 } 1787 l = n->links[bearer_id].link; 1788 if (!l) 1789 return false; 1790 rcv_nxt = tipc_link_rcv_nxt(l); 1791 1792 1793 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1794 return true; 1795 1796 /* Find parallel link, if any */ 1797 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) { 1798 if ((pb_id != bearer_id) && n->links[pb_id].link) { 1799 pl = n->links[pb_id].link; 1800 break; 1801 } 1802 } 1803 1804 if (!tipc_link_validate_msg(l, hdr)) { 1805 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!"); 1806 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!"); 1807 return false; 1808 } 1809 1810 /* Check and update node accesibility if applicable */ 1811 if (state == SELF_UP_PEER_COMING) { 1812 if (!tipc_link_is_up(l)) 1813 return true; 1814 if (!msg_peer_link_is_up(hdr)) 1815 return true; 1816 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT); 1817 } 1818 1819 if (state == SELF_DOWN_PEER_LEAVING) { 1820 if (msg_peer_node_is_up(hdr)) 1821 return false; 1822 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1823 return true; 1824 } 1825 1826 if (state == SELF_LEAVING_PEER_DOWN) 1827 return false; 1828 1829 /* Ignore duplicate packets */ 1830 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1831 return true; 1832 1833 /* Initiate or update failover mode if applicable */ 1834 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1835 syncpt = oseqno + exp_pkts - 1; 1836 if (pl && !tipc_link_is_reset(pl)) { 1837 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1838 trace_tipc_node_link_down(n, true, 1839 "node link down <- failover!"); 1840 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 1841 tipc_link_inputq(l)); 1842 } 1843 1844 /* If parallel link was already down, and this happened before 1845 * the tunnel link came up, node failover was never started. 1846 * Ensure that a FAILOVER_MSG is sent to get peer out of 1847 * NODE_FAILINGOVER state, also this node must accept 1848 * TUNNEL_MSGs from peer. 1849 */ 1850 if (n->state != NODE_FAILINGOVER) 1851 tipc_node_link_failover(n, pl, l, xmitq); 1852 1853 /* If pkts arrive out of order, use lowest calculated syncpt */ 1854 if (less(syncpt, n->sync_point)) 1855 n->sync_point = syncpt; 1856 } 1857 1858 /* Open parallel link when tunnel link reaches synch point */ 1859 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { 1860 if (!more(rcv_nxt, n->sync_point)) 1861 return true; 1862 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); 1863 if (pl) 1864 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); 1865 return true; 1866 } 1867 1868 /* No synching needed if only one link */ 1869 if (!pl || !tipc_link_is_up(pl)) 1870 return true; 1871 1872 /* Initiate synch mode if applicable */ 1873 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 1874 if (n->capabilities & TIPC_TUNNEL_ENHANCED) 1875 syncpt = msg_syncpt(hdr); 1876 else 1877 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1; 1878 if (!tipc_link_is_up(l)) 1879 __tipc_node_link_up(n, bearer_id, xmitq); 1880 if (n->state == SELF_UP_PEER_UP) { 1881 n->sync_point = syncpt; 1882 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 1883 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 1884 } 1885 } 1886 1887 /* Open tunnel link when parallel link reaches synch point */ 1888 if (n->state == NODE_SYNCHING) { 1889 if (tipc_link_is_synching(l)) { 1890 tnl = l; 1891 } else { 1892 tnl = pl; 1893 pl = l; 1894 } 1895 inputq_len = skb_queue_len(tipc_link_inputq(pl)); 1896 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len; 1897 if (more(dlv_nxt, n->sync_point)) { 1898 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 1899 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 1900 return true; 1901 } 1902 if (l == pl) 1903 return true; 1904 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) 1905 return true; 1906 if (usr == LINK_PROTOCOL) 1907 return true; 1908 return false; 1909 } 1910 return true; 1911 } 1912 1913 /** 1914 * tipc_rcv - process TIPC packets/messages arriving from off-node 1915 * @net: the applicable net namespace 1916 * @skb: TIPC packet 1917 * @bearer: pointer to bearer message arrived on 1918 * 1919 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1920 * structure (i.e. cannot be NULL), but bearer can be inactive. 1921 */ 1922 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) 1923 { 1924 struct sk_buff_head xmitq; 1925 struct tipc_node *n; 1926 struct tipc_msg *hdr; 1927 int bearer_id = b->identity; 1928 struct tipc_link_entry *le; 1929 u32 self = tipc_own_addr(net); 1930 int usr, rc = 0; 1931 u16 bc_ack; 1932 1933 __skb_queue_head_init(&xmitq); 1934 1935 /* Ensure message is well-formed before touching the header */ 1936 TIPC_SKB_CB(skb)->validated = false; 1937 if (unlikely(!tipc_msg_validate(&skb))) 1938 goto discard; 1939 hdr = buf_msg(skb); 1940 usr = msg_user(hdr); 1941 bc_ack = msg_bcast_ack(hdr); 1942 1943 /* Handle arrival of discovery or broadcast packet */ 1944 if (unlikely(msg_non_seq(hdr))) { 1945 if (unlikely(usr == LINK_CONFIG)) 1946 return tipc_disc_rcv(net, skb, b); 1947 else 1948 return tipc_node_bc_rcv(net, skb, bearer_id); 1949 } 1950 1951 /* Discard unicast link messages destined for another node */ 1952 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self))) 1953 goto discard; 1954 1955 /* Locate neighboring node that sent packet */ 1956 n = tipc_node_find(net, msg_prevnode(hdr)); 1957 if (unlikely(!n)) 1958 goto discard; 1959 le = &n->links[bearer_id]; 1960 1961 /* Ensure broadcast reception is in synch with peer's send state */ 1962 if (unlikely(usr == LINK_PROTOCOL)) 1963 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); 1964 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) 1965 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr); 1966 1967 /* Receive packet directly if conditions permit */ 1968 tipc_node_read_lock(n); 1969 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) { 1970 spin_lock_bh(&le->lock); 1971 if (le->link) { 1972 rc = tipc_link_rcv(le->link, skb, &xmitq); 1973 skb = NULL; 1974 } 1975 spin_unlock_bh(&le->lock); 1976 } 1977 tipc_node_read_unlock(n); 1978 1979 /* Check/update node state before receiving */ 1980 if (unlikely(skb)) { 1981 if (unlikely(skb_linearize(skb))) 1982 goto discard; 1983 tipc_node_write_lock(n); 1984 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { 1985 if (le->link) { 1986 rc = tipc_link_rcv(le->link, skb, &xmitq); 1987 skb = NULL; 1988 } 1989 } 1990 tipc_node_write_unlock(n); 1991 } 1992 1993 if (unlikely(rc & TIPC_LINK_UP_EVT)) 1994 tipc_node_link_up(n, bearer_id, &xmitq); 1995 1996 if (unlikely(rc & TIPC_LINK_DOWN_EVT)) 1997 tipc_node_link_down(n, bearer_id, false); 1998 1999 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) 2000 tipc_named_rcv(net, &n->bc_entry.namedq); 2001 2002 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) 2003 tipc_node_mcast_rcv(n); 2004 2005 if (!skb_queue_empty(&le->inputq)) 2006 tipc_sk_rcv(net, &le->inputq); 2007 2008 if (!skb_queue_empty(&xmitq)) 2009 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 2010 2011 tipc_node_put(n); 2012 discard: 2013 kfree_skb(skb); 2014 } 2015 2016 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b, 2017 int prop) 2018 { 2019 struct tipc_net *tn = tipc_net(net); 2020 int bearer_id = b->identity; 2021 struct sk_buff_head xmitq; 2022 struct tipc_link_entry *e; 2023 struct tipc_node *n; 2024 2025 __skb_queue_head_init(&xmitq); 2026 2027 rcu_read_lock(); 2028 2029 list_for_each_entry_rcu(n, &tn->node_list, list) { 2030 tipc_node_write_lock(n); 2031 e = &n->links[bearer_id]; 2032 if (e->link) { 2033 if (prop == TIPC_NLA_PROP_TOL) 2034 tipc_link_set_tolerance(e->link, b->tolerance, 2035 &xmitq); 2036 else if (prop == TIPC_NLA_PROP_MTU) 2037 tipc_link_set_mtu(e->link, b->mtu); 2038 } 2039 tipc_node_write_unlock(n); 2040 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr); 2041 } 2042 2043 rcu_read_unlock(); 2044 } 2045 2046 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) 2047 { 2048 struct net *net = sock_net(skb->sk); 2049 struct tipc_net *tn = net_generic(net, tipc_net_id); 2050 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; 2051 struct tipc_node *peer, *temp_node; 2052 u32 addr; 2053 int err; 2054 2055 /* We identify the peer by its net */ 2056 if (!info->attrs[TIPC_NLA_NET]) 2057 return -EINVAL; 2058 2059 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX, 2060 info->attrs[TIPC_NLA_NET], 2061 tipc_nl_net_policy, info->extack); 2062 if (err) 2063 return err; 2064 2065 if (!attrs[TIPC_NLA_NET_ADDR]) 2066 return -EINVAL; 2067 2068 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); 2069 2070 if (in_own_node(net, addr)) 2071 return -ENOTSUPP; 2072 2073 spin_lock_bh(&tn->node_list_lock); 2074 peer = tipc_node_find(net, addr); 2075 if (!peer) { 2076 spin_unlock_bh(&tn->node_list_lock); 2077 return -ENXIO; 2078 } 2079 2080 tipc_node_write_lock(peer); 2081 if (peer->state != SELF_DOWN_PEER_DOWN && 2082 peer->state != SELF_DOWN_PEER_LEAVING) { 2083 tipc_node_write_unlock(peer); 2084 err = -EBUSY; 2085 goto err_out; 2086 } 2087 2088 tipc_node_clear_links(peer); 2089 tipc_node_write_unlock(peer); 2090 tipc_node_delete(peer); 2091 2092 /* Calculate cluster capabilities */ 2093 tn->capabilities = TIPC_NODE_CAPABILITIES; 2094 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 2095 tn->capabilities &= temp_node->capabilities; 2096 } 2097 err = 0; 2098 err_out: 2099 tipc_node_put(peer); 2100 spin_unlock_bh(&tn->node_list_lock); 2101 2102 return err; 2103 } 2104 2105 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 2106 { 2107 int err; 2108 struct net *net = sock_net(skb->sk); 2109 struct tipc_net *tn = net_generic(net, tipc_net_id); 2110 int done = cb->args[0]; 2111 int last_addr = cb->args[1]; 2112 struct tipc_node *node; 2113 struct tipc_nl_msg msg; 2114 2115 if (done) 2116 return 0; 2117 2118 msg.skb = skb; 2119 msg.portid = NETLINK_CB(cb->skb).portid; 2120 msg.seq = cb->nlh->nlmsg_seq; 2121 2122 rcu_read_lock(); 2123 if (last_addr) { 2124 node = tipc_node_find(net, last_addr); 2125 if (!node) { 2126 rcu_read_unlock(); 2127 /* We never set seq or call nl_dump_check_consistent() 2128 * this means that setting prev_seq here will cause the 2129 * consistence check to fail in the netlink callback 2130 * handler. Resulting in the NLMSG_DONE message having 2131 * the NLM_F_DUMP_INTR flag set if the node state 2132 * changed while we released the lock. 2133 */ 2134 cb->prev_seq = 1; 2135 return -EPIPE; 2136 } 2137 tipc_node_put(node); 2138 } 2139 2140 list_for_each_entry_rcu(node, &tn->node_list, list) { 2141 if (last_addr) { 2142 if (node->addr == last_addr) 2143 last_addr = 0; 2144 else 2145 continue; 2146 } 2147 2148 tipc_node_read_lock(node); 2149 err = __tipc_nl_add_node(&msg, node); 2150 if (err) { 2151 last_addr = node->addr; 2152 tipc_node_read_unlock(node); 2153 goto out; 2154 } 2155 2156 tipc_node_read_unlock(node); 2157 } 2158 done = 1; 2159 out: 2160 cb->args[0] = done; 2161 cb->args[1] = last_addr; 2162 rcu_read_unlock(); 2163 2164 return skb->len; 2165 } 2166 2167 /* tipc_node_find_by_name - locate owner node of link by link's name 2168 * @net: the applicable net namespace 2169 * @name: pointer to link name string 2170 * @bearer_id: pointer to index in 'node->links' array where the link was found. 2171 * 2172 * Returns pointer to node owning the link, or 0 if no matching link is found. 2173 */ 2174 static struct tipc_node *tipc_node_find_by_name(struct net *net, 2175 const char *link_name, 2176 unsigned int *bearer_id) 2177 { 2178 struct tipc_net *tn = net_generic(net, tipc_net_id); 2179 struct tipc_link *l; 2180 struct tipc_node *n; 2181 struct tipc_node *found_node = NULL; 2182 int i; 2183 2184 *bearer_id = 0; 2185 rcu_read_lock(); 2186 list_for_each_entry_rcu(n, &tn->node_list, list) { 2187 tipc_node_read_lock(n); 2188 for (i = 0; i < MAX_BEARERS; i++) { 2189 l = n->links[i].link; 2190 if (l && !strcmp(tipc_link_name(l), link_name)) { 2191 *bearer_id = i; 2192 found_node = n; 2193 break; 2194 } 2195 } 2196 tipc_node_read_unlock(n); 2197 if (found_node) 2198 break; 2199 } 2200 rcu_read_unlock(); 2201 2202 return found_node; 2203 } 2204 2205 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) 2206 { 2207 int err; 2208 int res = 0; 2209 int bearer_id; 2210 char *name; 2211 struct tipc_link *link; 2212 struct tipc_node *node; 2213 struct sk_buff_head xmitq; 2214 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2215 struct net *net = sock_net(skb->sk); 2216 2217 __skb_queue_head_init(&xmitq); 2218 2219 if (!info->attrs[TIPC_NLA_LINK]) 2220 return -EINVAL; 2221 2222 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2223 info->attrs[TIPC_NLA_LINK], 2224 tipc_nl_link_policy, info->extack); 2225 if (err) 2226 return err; 2227 2228 if (!attrs[TIPC_NLA_LINK_NAME]) 2229 return -EINVAL; 2230 2231 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2232 2233 if (strcmp(name, tipc_bclink_name) == 0) 2234 return tipc_nl_bc_link_set(net, attrs); 2235 2236 node = tipc_node_find_by_name(net, name, &bearer_id); 2237 if (!node) 2238 return -EINVAL; 2239 2240 tipc_node_read_lock(node); 2241 2242 link = node->links[bearer_id].link; 2243 if (!link) { 2244 res = -EINVAL; 2245 goto out; 2246 } 2247 2248 if (attrs[TIPC_NLA_LINK_PROP]) { 2249 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 2250 2251 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], 2252 props); 2253 if (err) { 2254 res = err; 2255 goto out; 2256 } 2257 2258 if (props[TIPC_NLA_PROP_TOL]) { 2259 u32 tol; 2260 2261 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 2262 tipc_link_set_tolerance(link, tol, &xmitq); 2263 } 2264 if (props[TIPC_NLA_PROP_PRIO]) { 2265 u32 prio; 2266 2267 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 2268 tipc_link_set_prio(link, prio, &xmitq); 2269 } 2270 if (props[TIPC_NLA_PROP_WIN]) { 2271 u32 win; 2272 2273 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 2274 tipc_link_set_queue_limits(link, win); 2275 } 2276 } 2277 2278 out: 2279 tipc_node_read_unlock(node); 2280 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr); 2281 return res; 2282 } 2283 2284 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) 2285 { 2286 struct net *net = genl_info_net(info); 2287 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2288 struct tipc_nl_msg msg; 2289 char *name; 2290 int err; 2291 2292 msg.portid = info->snd_portid; 2293 msg.seq = info->snd_seq; 2294 2295 if (!info->attrs[TIPC_NLA_LINK]) 2296 return -EINVAL; 2297 2298 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2299 info->attrs[TIPC_NLA_LINK], 2300 tipc_nl_link_policy, info->extack); 2301 if (err) 2302 return err; 2303 2304 if (!attrs[TIPC_NLA_LINK_NAME]) 2305 return -EINVAL; 2306 2307 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2308 2309 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2310 if (!msg.skb) 2311 return -ENOMEM; 2312 2313 if (strcmp(name, tipc_bclink_name) == 0) { 2314 err = tipc_nl_add_bc_link(net, &msg); 2315 if (err) 2316 goto err_free; 2317 } else { 2318 int bearer_id; 2319 struct tipc_node *node; 2320 struct tipc_link *link; 2321 2322 node = tipc_node_find_by_name(net, name, &bearer_id); 2323 if (!node) { 2324 err = -EINVAL; 2325 goto err_free; 2326 } 2327 2328 tipc_node_read_lock(node); 2329 link = node->links[bearer_id].link; 2330 if (!link) { 2331 tipc_node_read_unlock(node); 2332 err = -EINVAL; 2333 goto err_free; 2334 } 2335 2336 err = __tipc_nl_add_link(net, &msg, link, 0); 2337 tipc_node_read_unlock(node); 2338 if (err) 2339 goto err_free; 2340 } 2341 2342 return genlmsg_reply(msg.skb, info); 2343 2344 err_free: 2345 nlmsg_free(msg.skb); 2346 return err; 2347 } 2348 2349 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 2350 { 2351 int err; 2352 char *link_name; 2353 unsigned int bearer_id; 2354 struct tipc_link *link; 2355 struct tipc_node *node; 2356 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2357 struct net *net = sock_net(skb->sk); 2358 struct tipc_link_entry *le; 2359 2360 if (!info->attrs[TIPC_NLA_LINK]) 2361 return -EINVAL; 2362 2363 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2364 info->attrs[TIPC_NLA_LINK], 2365 tipc_nl_link_policy, info->extack); 2366 if (err) 2367 return err; 2368 2369 if (!attrs[TIPC_NLA_LINK_NAME]) 2370 return -EINVAL; 2371 2372 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2373 2374 if (strcmp(link_name, tipc_bclink_name) == 0) { 2375 err = tipc_bclink_reset_stats(net); 2376 if (err) 2377 return err; 2378 return 0; 2379 } 2380 2381 node = tipc_node_find_by_name(net, link_name, &bearer_id); 2382 if (!node) 2383 return -EINVAL; 2384 2385 le = &node->links[bearer_id]; 2386 tipc_node_read_lock(node); 2387 spin_lock_bh(&le->lock); 2388 link = node->links[bearer_id].link; 2389 if (!link) { 2390 spin_unlock_bh(&le->lock); 2391 tipc_node_read_unlock(node); 2392 return -EINVAL; 2393 } 2394 tipc_link_reset_stats(link); 2395 spin_unlock_bh(&le->lock); 2396 tipc_node_read_unlock(node); 2397 return 0; 2398 } 2399 2400 /* Caller should hold node lock */ 2401 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 2402 struct tipc_node *node, u32 *prev_link) 2403 { 2404 u32 i; 2405 int err; 2406 2407 for (i = *prev_link; i < MAX_BEARERS; i++) { 2408 *prev_link = i; 2409 2410 if (!node->links[i].link) 2411 continue; 2412 2413 err = __tipc_nl_add_link(net, msg, 2414 node->links[i].link, NLM_F_MULTI); 2415 if (err) 2416 return err; 2417 } 2418 *prev_link = 0; 2419 2420 return 0; 2421 } 2422 2423 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb) 2424 { 2425 struct net *net = sock_net(skb->sk); 2426 struct tipc_net *tn = net_generic(net, tipc_net_id); 2427 struct tipc_node *node; 2428 struct tipc_nl_msg msg; 2429 u32 prev_node = cb->args[0]; 2430 u32 prev_link = cb->args[1]; 2431 int done = cb->args[2]; 2432 int err; 2433 2434 if (done) 2435 return 0; 2436 2437 msg.skb = skb; 2438 msg.portid = NETLINK_CB(cb->skb).portid; 2439 msg.seq = cb->nlh->nlmsg_seq; 2440 2441 rcu_read_lock(); 2442 if (prev_node) { 2443 node = tipc_node_find(net, prev_node); 2444 if (!node) { 2445 /* We never set seq or call nl_dump_check_consistent() 2446 * this means that setting prev_seq here will cause the 2447 * consistence check to fail in the netlink callback 2448 * handler. Resulting in the last NLMSG_DONE message 2449 * having the NLM_F_DUMP_INTR flag set. 2450 */ 2451 cb->prev_seq = 1; 2452 goto out; 2453 } 2454 tipc_node_put(node); 2455 2456 list_for_each_entry_continue_rcu(node, &tn->node_list, 2457 list) { 2458 tipc_node_read_lock(node); 2459 err = __tipc_nl_add_node_links(net, &msg, node, 2460 &prev_link); 2461 tipc_node_read_unlock(node); 2462 if (err) 2463 goto out; 2464 2465 prev_node = node->addr; 2466 } 2467 } else { 2468 err = tipc_nl_add_bc_link(net, &msg); 2469 if (err) 2470 goto out; 2471 2472 list_for_each_entry_rcu(node, &tn->node_list, list) { 2473 tipc_node_read_lock(node); 2474 err = __tipc_nl_add_node_links(net, &msg, node, 2475 &prev_link); 2476 tipc_node_read_unlock(node); 2477 if (err) 2478 goto out; 2479 2480 prev_node = node->addr; 2481 } 2482 } 2483 done = 1; 2484 out: 2485 rcu_read_unlock(); 2486 2487 cb->args[0] = prev_node; 2488 cb->args[1] = prev_link; 2489 cb->args[2] = done; 2490 2491 return skb->len; 2492 } 2493 2494 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info) 2495 { 2496 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1]; 2497 struct net *net = sock_net(skb->sk); 2498 int err; 2499 2500 if (!info->attrs[TIPC_NLA_MON]) 2501 return -EINVAL; 2502 2503 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX, 2504 info->attrs[TIPC_NLA_MON], 2505 tipc_nl_monitor_policy, 2506 info->extack); 2507 if (err) 2508 return err; 2509 2510 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) { 2511 u32 val; 2512 2513 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]); 2514 err = tipc_nl_monitor_set_threshold(net, val); 2515 if (err) 2516 return err; 2517 } 2518 2519 return 0; 2520 } 2521 2522 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg) 2523 { 2524 struct nlattr *attrs; 2525 void *hdr; 2526 u32 val; 2527 2528 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2529 0, TIPC_NL_MON_GET); 2530 if (!hdr) 2531 return -EMSGSIZE; 2532 2533 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON); 2534 if (!attrs) 2535 goto msg_full; 2536 2537 val = tipc_nl_monitor_get_threshold(net); 2538 2539 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val)) 2540 goto attr_msg_full; 2541 2542 nla_nest_end(msg->skb, attrs); 2543 genlmsg_end(msg->skb, hdr); 2544 2545 return 0; 2546 2547 attr_msg_full: 2548 nla_nest_cancel(msg->skb, attrs); 2549 msg_full: 2550 genlmsg_cancel(msg->skb, hdr); 2551 2552 return -EMSGSIZE; 2553 } 2554 2555 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info) 2556 { 2557 struct net *net = sock_net(skb->sk); 2558 struct tipc_nl_msg msg; 2559 int err; 2560 2561 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2562 if (!msg.skb) 2563 return -ENOMEM; 2564 msg.portid = info->snd_portid; 2565 msg.seq = info->snd_seq; 2566 2567 err = __tipc_nl_add_monitor_prop(net, &msg); 2568 if (err) { 2569 nlmsg_free(msg.skb); 2570 return err; 2571 } 2572 2573 return genlmsg_reply(msg.skb, info); 2574 } 2575 2576 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) 2577 { 2578 struct net *net = sock_net(skb->sk); 2579 u32 prev_bearer = cb->args[0]; 2580 struct tipc_nl_msg msg; 2581 int bearer_id; 2582 int err; 2583 2584 if (prev_bearer == MAX_BEARERS) 2585 return 0; 2586 2587 msg.skb = skb; 2588 msg.portid = NETLINK_CB(cb->skb).portid; 2589 msg.seq = cb->nlh->nlmsg_seq; 2590 2591 rtnl_lock(); 2592 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { 2593 err = __tipc_nl_add_monitor(net, &msg, bearer_id); 2594 if (err) 2595 break; 2596 } 2597 rtnl_unlock(); 2598 cb->args[0] = bearer_id; 2599 2600 return skb->len; 2601 } 2602 2603 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb, 2604 struct netlink_callback *cb) 2605 { 2606 struct net *net = sock_net(skb->sk); 2607 u32 prev_node = cb->args[1]; 2608 u32 bearer_id = cb->args[2]; 2609 int done = cb->args[0]; 2610 struct tipc_nl_msg msg; 2611 int err; 2612 2613 if (!prev_node) { 2614 struct nlattr **attrs = genl_dumpit_info(cb)->attrs; 2615 struct nlattr *mon[TIPC_NLA_MON_MAX + 1]; 2616 2617 if (!attrs[TIPC_NLA_MON]) 2618 return -EINVAL; 2619 2620 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX, 2621 attrs[TIPC_NLA_MON], 2622 tipc_nl_monitor_policy, 2623 NULL); 2624 if (err) 2625 return err; 2626 2627 if (!mon[TIPC_NLA_MON_REF]) 2628 return -EINVAL; 2629 2630 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]); 2631 2632 if (bearer_id >= MAX_BEARERS) 2633 return -EINVAL; 2634 } 2635 2636 if (done) 2637 return 0; 2638 2639 msg.skb = skb; 2640 msg.portid = NETLINK_CB(cb->skb).portid; 2641 msg.seq = cb->nlh->nlmsg_seq; 2642 2643 rtnl_lock(); 2644 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node); 2645 if (!err) 2646 done = 1; 2647 2648 rtnl_unlock(); 2649 cb->args[0] = done; 2650 cb->args[1] = prev_node; 2651 cb->args[2] = bearer_id; 2652 2653 return skb->len; 2654 } 2655 2656 u32 tipc_node_get_addr(struct tipc_node *node) 2657 { 2658 return (node) ? node->addr : 0; 2659 } 2660 2661 /** 2662 * tipc_node_dump - dump TIPC node data 2663 * @n: tipc node to be dumped 2664 * @more: dump more? 2665 * - false: dump only tipc node data 2666 * - true: dump node link data as well 2667 * @buf: returned buffer of dump data in format 2668 */ 2669 int tipc_node_dump(struct tipc_node *n, bool more, char *buf) 2670 { 2671 int i = 0; 2672 size_t sz = (more) ? NODE_LMAX : NODE_LMIN; 2673 2674 if (!n) { 2675 i += scnprintf(buf, sz, "node data: (null)\n"); 2676 return i; 2677 } 2678 2679 i += scnprintf(buf, sz, "node data: %x", n->addr); 2680 i += scnprintf(buf + i, sz - i, " %x", n->state); 2681 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]); 2682 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]); 2683 i += scnprintf(buf + i, sz - i, " %x", n->action_flags); 2684 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent); 2685 i += scnprintf(buf + i, sz - i, " %u", n->sync_point); 2686 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt); 2687 i += scnprintf(buf + i, sz - i, " %u", n->working_links); 2688 i += scnprintf(buf + i, sz - i, " %x", n->capabilities); 2689 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv); 2690 2691 if (!more) 2692 return i; 2693 2694 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n"); 2695 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu); 2696 i += scnprintf(buf + i, sz - i, " media: "); 2697 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr); 2698 i += scnprintf(buf + i, sz - i, "\n"); 2699 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i); 2700 i += scnprintf(buf + i, sz - i, " inputq: "); 2701 i += tipc_list_dump(&n->links[0].inputq, false, buf + i); 2702 2703 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n"); 2704 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu); 2705 i += scnprintf(buf + i, sz - i, " media: "); 2706 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr); 2707 i += scnprintf(buf + i, sz - i, "\n"); 2708 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i); 2709 i += scnprintf(buf + i, sz - i, " inputq: "); 2710 i += tipc_list_dump(&n->links[1].inputq, false, buf + i); 2711 2712 i += scnprintf(buf + i, sz - i, "bclink:\n "); 2713 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i); 2714 2715 return i; 2716 } 2717 2718 void tipc_node_pre_cleanup_net(struct net *exit_net) 2719 { 2720 struct tipc_node *n; 2721 struct tipc_net *tn; 2722 struct net *tmp; 2723 2724 rcu_read_lock(); 2725 for_each_net_rcu(tmp) { 2726 if (tmp == exit_net) 2727 continue; 2728 tn = tipc_net(tmp); 2729 if (!tn) 2730 continue; 2731 spin_lock_bh(&tn->node_list_lock); 2732 list_for_each_entry_rcu(n, &tn->node_list, list) { 2733 if (!n->peer_net) 2734 continue; 2735 if (n->peer_net != exit_net) 2736 continue; 2737 tipc_node_write_lock(n); 2738 n->peer_net = NULL; 2739 n->peer_hash_mix = 0; 2740 tipc_node_write_unlock_fast(n); 2741 break; 2742 } 2743 spin_unlock_bh(&tn->node_list_lock); 2744 } 2745 rcu_read_unlock(); 2746 } 2747