1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "node.h" 40 #include "name_distr.h" 41 #include "socket.h" 42 #include "bcast.h" 43 #include "monitor.h" 44 #include "discover.h" 45 #include "netlink.h" 46 #include "trace.h" 47 48 #define INVALID_NODE_SIG 0x10000 49 #define NODE_CLEANUP_AFTER 300000 50 51 /* Flags used to take different actions according to flag type 52 * TIPC_NOTIFY_NODE_DOWN: notify node is down 53 * TIPC_NOTIFY_NODE_UP: notify node is up 54 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type 55 */ 56 enum { 57 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 58 TIPC_NOTIFY_NODE_UP = (1 << 4), 59 TIPC_NOTIFY_LINK_UP = (1 << 6), 60 TIPC_NOTIFY_LINK_DOWN = (1 << 7) 61 }; 62 63 struct tipc_link_entry { 64 struct tipc_link *link; 65 spinlock_t lock; /* per link */ 66 u32 mtu; 67 struct sk_buff_head inputq; 68 struct tipc_media_addr maddr; 69 }; 70 71 struct tipc_bclink_entry { 72 struct tipc_link *link; 73 struct sk_buff_head inputq1; 74 struct sk_buff_head arrvq; 75 struct sk_buff_head inputq2; 76 struct sk_buff_head namedq; 77 }; 78 79 /** 80 * struct tipc_node - TIPC node structure 81 * @addr: network address of node 82 * @ref: reference counter to node object 83 * @lock: rwlock governing access to structure 84 * @net: the applicable net namespace 85 * @hash: links to adjacent nodes in unsorted hash chain 86 * @inputq: pointer to input queue containing messages for msg event 87 * @namedq: pointer to name table input queue with name table messages 88 * @active_links: bearer ids of active links, used as index into links[] array 89 * @links: array containing references to all links to node 90 * @action_flags: bit mask of different types of node actions 91 * @state: connectivity state vs peer node 92 * @sync_point: sequence number where synch/failover is finished 93 * @list: links to adjacent nodes in sorted list of cluster's nodes 94 * @working_links: number of working links to node (both active and standby) 95 * @link_cnt: number of links to node 96 * @capabilities: bitmap, indicating peer node's functional capabilities 97 * @signature: node instance identifier 98 * @link_id: local and remote bearer ids of changing link, if any 99 * @publ_list: list of publications 100 * @rcu: rcu struct for tipc_node 101 * @delete_at: indicates the time for deleting a down node 102 */ 103 struct tipc_node { 104 u32 addr; 105 struct kref kref; 106 rwlock_t lock; 107 struct net *net; 108 struct hlist_node hash; 109 int active_links[2]; 110 struct tipc_link_entry links[MAX_BEARERS]; 111 struct tipc_bclink_entry bc_entry; 112 int action_flags; 113 struct list_head list; 114 int state; 115 bool failover_sent; 116 u16 sync_point; 117 int link_cnt; 118 u16 working_links; 119 u16 capabilities; 120 u32 signature; 121 u32 link_id; 122 u8 peer_id[16]; 123 struct list_head publ_list; 124 struct list_head conn_sks; 125 unsigned long keepalive_intv; 126 struct timer_list timer; 127 struct rcu_head rcu; 128 unsigned long delete_at; 129 struct net *peer_net; 130 u32 peer_hash_mix; 131 }; 132 133 /* Node FSM states and events: 134 */ 135 enum { 136 SELF_DOWN_PEER_DOWN = 0xdd, 137 SELF_UP_PEER_UP = 0xaa, 138 SELF_DOWN_PEER_LEAVING = 0xd1, 139 SELF_UP_PEER_COMING = 0xac, 140 SELF_COMING_PEER_UP = 0xca, 141 SELF_LEAVING_PEER_DOWN = 0x1d, 142 NODE_FAILINGOVER = 0xf0, 143 NODE_SYNCHING = 0xcc 144 }; 145 146 enum { 147 SELF_ESTABL_CONTACT_EVT = 0xece, 148 SELF_LOST_CONTACT_EVT = 0x1ce, 149 PEER_ESTABL_CONTACT_EVT = 0x9ece, 150 PEER_LOST_CONTACT_EVT = 0x91ce, 151 NODE_FAILOVER_BEGIN_EVT = 0xfbe, 152 NODE_FAILOVER_END_EVT = 0xfee, 153 NODE_SYNCH_BEGIN_EVT = 0xcbe, 154 NODE_SYNCH_END_EVT = 0xcee 155 }; 156 157 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 158 struct sk_buff_head *xmitq, 159 struct tipc_media_addr **maddr); 160 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, 161 bool delete); 162 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq); 163 static void tipc_node_delete(struct tipc_node *node); 164 static void tipc_node_timeout(struct timer_list *t); 165 static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 166 static struct tipc_node *tipc_node_find(struct net *net, u32 addr); 167 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); 168 static void tipc_node_put(struct tipc_node *node); 169 static bool node_is_up(struct tipc_node *n); 170 static void tipc_node_delete_from_list(struct tipc_node *node); 171 172 struct tipc_sock_conn { 173 u32 port; 174 u32 peer_port; 175 u32 peer_node; 176 struct list_head list; 177 }; 178 179 static struct tipc_link *node_active_link(struct tipc_node *n, int sel) 180 { 181 int bearer_id = n->active_links[sel & 1]; 182 183 if (unlikely(bearer_id == INVALID_BEARER_ID)) 184 return NULL; 185 186 return n->links[bearer_id].link; 187 } 188 189 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected) 190 { 191 struct tipc_node *n; 192 int bearer_id; 193 unsigned int mtu = MAX_MSG_SIZE; 194 195 n = tipc_node_find(net, addr); 196 if (unlikely(!n)) 197 return mtu; 198 199 /* Allow MAX_MSG_SIZE when building connection oriented message 200 * if they are in the same core network 201 */ 202 if (n->peer_net && connected) { 203 tipc_node_put(n); 204 return mtu; 205 } 206 207 bearer_id = n->active_links[sel & 1]; 208 if (likely(bearer_id != INVALID_BEARER_ID)) 209 mtu = n->links[bearer_id].mtu; 210 tipc_node_put(n); 211 return mtu; 212 } 213 214 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id) 215 { 216 u8 *own_id = tipc_own_id(net); 217 struct tipc_node *n; 218 219 if (!own_id) 220 return true; 221 222 if (addr == tipc_own_addr(net)) { 223 memcpy(id, own_id, TIPC_NODEID_LEN); 224 return true; 225 } 226 n = tipc_node_find(net, addr); 227 if (!n) 228 return false; 229 230 memcpy(id, &n->peer_id, TIPC_NODEID_LEN); 231 tipc_node_put(n); 232 return true; 233 } 234 235 u16 tipc_node_get_capabilities(struct net *net, u32 addr) 236 { 237 struct tipc_node *n; 238 u16 caps; 239 240 n = tipc_node_find(net, addr); 241 if (unlikely(!n)) 242 return TIPC_NODE_CAPABILITIES; 243 caps = n->capabilities; 244 tipc_node_put(n); 245 return caps; 246 } 247 248 static void tipc_node_kref_release(struct kref *kref) 249 { 250 struct tipc_node *n = container_of(kref, struct tipc_node, kref); 251 252 kfree(n->bc_entry.link); 253 kfree_rcu(n, rcu); 254 } 255 256 static void tipc_node_put(struct tipc_node *node) 257 { 258 kref_put(&node->kref, tipc_node_kref_release); 259 } 260 261 static void tipc_node_get(struct tipc_node *node) 262 { 263 kref_get(&node->kref); 264 } 265 266 /* 267 * tipc_node_find - locate specified node object, if it exists 268 */ 269 static struct tipc_node *tipc_node_find(struct net *net, u32 addr) 270 { 271 struct tipc_net *tn = tipc_net(net); 272 struct tipc_node *node; 273 unsigned int thash = tipc_hashfn(addr); 274 275 rcu_read_lock(); 276 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) { 277 if (node->addr != addr) 278 continue; 279 if (!kref_get_unless_zero(&node->kref)) 280 node = NULL; 281 break; 282 } 283 rcu_read_unlock(); 284 return node; 285 } 286 287 /* tipc_node_find_by_id - locate specified node object by its 128-bit id 288 * Note: this function is called only when a discovery request failed 289 * to find the node by its 32-bit id, and is not time critical 290 */ 291 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id) 292 { 293 struct tipc_net *tn = tipc_net(net); 294 struct tipc_node *n; 295 bool found = false; 296 297 rcu_read_lock(); 298 list_for_each_entry_rcu(n, &tn->node_list, list) { 299 read_lock_bh(&n->lock); 300 if (!memcmp(id, n->peer_id, 16) && 301 kref_get_unless_zero(&n->kref)) 302 found = true; 303 read_unlock_bh(&n->lock); 304 if (found) 305 break; 306 } 307 rcu_read_unlock(); 308 return found ? n : NULL; 309 } 310 311 static void tipc_node_read_lock(struct tipc_node *n) 312 { 313 read_lock_bh(&n->lock); 314 } 315 316 static void tipc_node_read_unlock(struct tipc_node *n) 317 { 318 read_unlock_bh(&n->lock); 319 } 320 321 static void tipc_node_write_lock(struct tipc_node *n) 322 { 323 write_lock_bh(&n->lock); 324 } 325 326 static void tipc_node_write_unlock_fast(struct tipc_node *n) 327 { 328 write_unlock_bh(&n->lock); 329 } 330 331 static void tipc_node_write_unlock(struct tipc_node *n) 332 { 333 struct net *net = n->net; 334 u32 addr = 0; 335 u32 flags = n->action_flags; 336 u32 link_id = 0; 337 u32 bearer_id; 338 struct list_head *publ_list; 339 340 if (likely(!flags)) { 341 write_unlock_bh(&n->lock); 342 return; 343 } 344 345 addr = n->addr; 346 link_id = n->link_id; 347 bearer_id = link_id & 0xffff; 348 publ_list = &n->publ_list; 349 350 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | 351 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP); 352 353 write_unlock_bh(&n->lock); 354 355 if (flags & TIPC_NOTIFY_NODE_DOWN) 356 tipc_publ_notify(net, publ_list, addr); 357 358 if (flags & TIPC_NOTIFY_NODE_UP) 359 tipc_named_node_up(net, addr); 360 361 if (flags & TIPC_NOTIFY_LINK_UP) { 362 tipc_mon_peer_up(net, addr, bearer_id); 363 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, 364 TIPC_NODE_SCOPE, link_id, link_id); 365 } 366 if (flags & TIPC_NOTIFY_LINK_DOWN) { 367 tipc_mon_peer_down(net, addr, bearer_id); 368 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, 369 addr, link_id); 370 } 371 } 372 373 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes) 374 { 375 int net_id = tipc_netid(n->net); 376 struct tipc_net *tn_peer; 377 struct net *tmp; 378 u32 hash_chk; 379 380 if (n->peer_net) 381 return; 382 383 for_each_net_rcu(tmp) { 384 tn_peer = tipc_net(tmp); 385 if (!tn_peer) 386 continue; 387 /* Integrity checking whether node exists in namespace or not */ 388 if (tn_peer->net_id != net_id) 389 continue; 390 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN)) 391 continue; 392 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random); 393 if (hash_mixes ^ hash_chk) 394 continue; 395 n->peer_net = tmp; 396 n->peer_hash_mix = hash_mixes; 397 break; 398 } 399 } 400 401 static struct tipc_node *tipc_node_create(struct net *net, u32 addr, 402 u8 *peer_id, u16 capabilities, 403 u32 signature, u32 hash_mixes) 404 { 405 struct tipc_net *tn = net_generic(net, tipc_net_id); 406 struct tipc_node *n, *temp_node; 407 struct tipc_link *l; 408 int bearer_id; 409 int i; 410 411 spin_lock_bh(&tn->node_list_lock); 412 n = tipc_node_find(net, addr); 413 if (n) { 414 if (n->peer_hash_mix ^ hash_mixes) 415 tipc_node_assign_peer_net(n, hash_mixes); 416 if (n->capabilities == capabilities) 417 goto exit; 418 /* Same node may come back with new capabilities */ 419 tipc_node_write_lock(n); 420 n->capabilities = capabilities; 421 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 422 l = n->links[bearer_id].link; 423 if (l) 424 tipc_link_update_caps(l, capabilities); 425 } 426 tipc_node_write_unlock_fast(n); 427 428 /* Calculate cluster capabilities */ 429 tn->capabilities = TIPC_NODE_CAPABILITIES; 430 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 431 tn->capabilities &= temp_node->capabilities; 432 } 433 434 goto exit; 435 } 436 n = kzalloc(sizeof(*n), GFP_ATOMIC); 437 if (!n) { 438 pr_warn("Node creation failed, no memory\n"); 439 goto exit; 440 } 441 n->addr = addr; 442 memcpy(&n->peer_id, peer_id, 16); 443 n->net = net; 444 n->peer_net = NULL; 445 n->peer_hash_mix = 0; 446 /* Assign kernel local namespace if exists */ 447 tipc_node_assign_peer_net(n, hash_mixes); 448 n->capabilities = capabilities; 449 kref_init(&n->kref); 450 rwlock_init(&n->lock); 451 INIT_HLIST_NODE(&n->hash); 452 INIT_LIST_HEAD(&n->list); 453 INIT_LIST_HEAD(&n->publ_list); 454 INIT_LIST_HEAD(&n->conn_sks); 455 skb_queue_head_init(&n->bc_entry.namedq); 456 skb_queue_head_init(&n->bc_entry.inputq1); 457 __skb_queue_head_init(&n->bc_entry.arrvq); 458 skb_queue_head_init(&n->bc_entry.inputq2); 459 for (i = 0; i < MAX_BEARERS; i++) 460 spin_lock_init(&n->links[i].lock); 461 n->state = SELF_DOWN_PEER_LEAVING; 462 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 463 n->signature = INVALID_NODE_SIG; 464 n->active_links[0] = INVALID_BEARER_ID; 465 n->active_links[1] = INVALID_BEARER_ID; 466 if (!tipc_link_bc_create(net, tipc_own_addr(net), 467 addr, U16_MAX, 468 tipc_link_window(tipc_bc_sndlink(net)), 469 n->capabilities, 470 &n->bc_entry.inputq1, 471 &n->bc_entry.namedq, 472 tipc_bc_sndlink(net), 473 &n->bc_entry.link)) { 474 pr_warn("Broadcast rcv link creation failed, no memory\n"); 475 if (n->peer_net) { 476 n->peer_net = NULL; 477 n->peer_hash_mix = 0; 478 } 479 kfree(n); 480 n = NULL; 481 goto exit; 482 } 483 tipc_node_get(n); 484 timer_setup(&n->timer, tipc_node_timeout, 0); 485 n->keepalive_intv = U32_MAX; 486 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]); 487 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 488 if (n->addr < temp_node->addr) 489 break; 490 } 491 list_add_tail_rcu(&n->list, &temp_node->list); 492 /* Calculate cluster capabilities */ 493 tn->capabilities = TIPC_NODE_CAPABILITIES; 494 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 495 tn->capabilities &= temp_node->capabilities; 496 } 497 trace_tipc_node_create(n, true, " "); 498 exit: 499 spin_unlock_bh(&tn->node_list_lock); 500 return n; 501 } 502 503 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 504 { 505 unsigned long tol = tipc_link_tolerance(l); 506 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 507 508 /* Link with lowest tolerance determines timer interval */ 509 if (intv < n->keepalive_intv) 510 n->keepalive_intv = intv; 511 512 /* Ensure link's abort limit corresponds to current tolerance */ 513 tipc_link_set_abort_limit(l, tol / n->keepalive_intv); 514 } 515 516 static void tipc_node_delete_from_list(struct tipc_node *node) 517 { 518 list_del_rcu(&node->list); 519 hlist_del_rcu(&node->hash); 520 tipc_node_put(node); 521 } 522 523 static void tipc_node_delete(struct tipc_node *node) 524 { 525 trace_tipc_node_delete(node, true, " "); 526 tipc_node_delete_from_list(node); 527 528 del_timer_sync(&node->timer); 529 tipc_node_put(node); 530 } 531 532 void tipc_node_stop(struct net *net) 533 { 534 struct tipc_net *tn = tipc_net(net); 535 struct tipc_node *node, *t_node; 536 537 spin_lock_bh(&tn->node_list_lock); 538 list_for_each_entry_safe(node, t_node, &tn->node_list, list) 539 tipc_node_delete(node); 540 spin_unlock_bh(&tn->node_list_lock); 541 } 542 543 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) 544 { 545 struct tipc_node *n; 546 547 if (in_own_node(net, addr)) 548 return; 549 550 n = tipc_node_find(net, addr); 551 if (!n) { 552 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr); 553 return; 554 } 555 tipc_node_write_lock(n); 556 list_add_tail(subscr, &n->publ_list); 557 tipc_node_write_unlock_fast(n); 558 tipc_node_put(n); 559 } 560 561 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) 562 { 563 struct tipc_node *n; 564 565 if (in_own_node(net, addr)) 566 return; 567 568 n = tipc_node_find(net, addr); 569 if (!n) { 570 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr); 571 return; 572 } 573 tipc_node_write_lock(n); 574 list_del_init(subscr); 575 tipc_node_write_unlock_fast(n); 576 tipc_node_put(n); 577 } 578 579 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 580 { 581 struct tipc_node *node; 582 struct tipc_sock_conn *conn; 583 int err = 0; 584 585 if (in_own_node(net, dnode)) 586 return 0; 587 588 node = tipc_node_find(net, dnode); 589 if (!node) { 590 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 591 return -EHOSTUNREACH; 592 } 593 conn = kmalloc(sizeof(*conn), GFP_ATOMIC); 594 if (!conn) { 595 err = -EHOSTUNREACH; 596 goto exit; 597 } 598 conn->peer_node = dnode; 599 conn->port = port; 600 conn->peer_port = peer_port; 601 602 tipc_node_write_lock(node); 603 list_add_tail(&conn->list, &node->conn_sks); 604 tipc_node_write_unlock(node); 605 exit: 606 tipc_node_put(node); 607 return err; 608 } 609 610 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) 611 { 612 struct tipc_node *node; 613 struct tipc_sock_conn *conn, *safe; 614 615 if (in_own_node(net, dnode)) 616 return; 617 618 node = tipc_node_find(net, dnode); 619 if (!node) 620 return; 621 622 tipc_node_write_lock(node); 623 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 624 if (port != conn->port) 625 continue; 626 list_del(&conn->list); 627 kfree(conn); 628 } 629 tipc_node_write_unlock(node); 630 tipc_node_put(node); 631 } 632 633 static void tipc_node_clear_links(struct tipc_node *node) 634 { 635 int i; 636 637 for (i = 0; i < MAX_BEARERS; i++) { 638 struct tipc_link_entry *le = &node->links[i]; 639 640 if (le->link) { 641 kfree(le->link); 642 le->link = NULL; 643 node->link_cnt--; 644 } 645 } 646 } 647 648 /* tipc_node_cleanup - delete nodes that does not 649 * have active links for NODE_CLEANUP_AFTER time 650 */ 651 static bool tipc_node_cleanup(struct tipc_node *peer) 652 { 653 struct tipc_node *temp_node; 654 struct tipc_net *tn = tipc_net(peer->net); 655 bool deleted = false; 656 657 /* If lock held by tipc_node_stop() the node will be deleted anyway */ 658 if (!spin_trylock_bh(&tn->node_list_lock)) 659 return false; 660 661 tipc_node_write_lock(peer); 662 663 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { 664 tipc_node_clear_links(peer); 665 tipc_node_delete_from_list(peer); 666 deleted = true; 667 } 668 tipc_node_write_unlock(peer); 669 670 /* Calculate cluster capabilities */ 671 tn->capabilities = TIPC_NODE_CAPABILITIES; 672 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 673 tn->capabilities &= temp_node->capabilities; 674 } 675 676 spin_unlock_bh(&tn->node_list_lock); 677 return deleted; 678 } 679 680 /* tipc_node_timeout - handle expiration of node timer 681 */ 682 static void tipc_node_timeout(struct timer_list *t) 683 { 684 struct tipc_node *n = from_timer(n, t, timer); 685 struct tipc_link_entry *le; 686 struct sk_buff_head xmitq; 687 int remains = n->link_cnt; 688 int bearer_id; 689 int rc = 0; 690 691 trace_tipc_node_timeout(n, false, " "); 692 if (!node_is_up(n) && tipc_node_cleanup(n)) { 693 /*Removing the reference of Timer*/ 694 tipc_node_put(n); 695 return; 696 } 697 698 __skb_queue_head_init(&xmitq); 699 700 /* Initial node interval to value larger (10 seconds), then it will be 701 * recalculated with link lowest tolerance 702 */ 703 tipc_node_read_lock(n); 704 n->keepalive_intv = 10000; 705 tipc_node_read_unlock(n); 706 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { 707 tipc_node_read_lock(n); 708 le = &n->links[bearer_id]; 709 if (le->link) { 710 spin_lock_bh(&le->lock); 711 /* Link tolerance may change asynchronously: */ 712 tipc_node_calculate_timer(n, le->link); 713 rc = tipc_link_timeout(le->link, &xmitq); 714 spin_unlock_bh(&le->lock); 715 remains--; 716 } 717 tipc_node_read_unlock(n); 718 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); 719 if (rc & TIPC_LINK_DOWN_EVT) 720 tipc_node_link_down(n, bearer_id, false); 721 } 722 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv)); 723 } 724 725 /** 726 * __tipc_node_link_up - handle addition of link 727 * Node lock must be held by caller 728 * Link becomes active (alone or shared) or standby, depending on its priority. 729 */ 730 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, 731 struct sk_buff_head *xmitq) 732 { 733 int *slot0 = &n->active_links[0]; 734 int *slot1 = &n->active_links[1]; 735 struct tipc_link *ol = node_active_link(n, 0); 736 struct tipc_link *nl = n->links[bearer_id].link; 737 738 if (!nl || tipc_link_is_up(nl)) 739 return; 740 741 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); 742 if (!tipc_link_is_up(nl)) 743 return; 744 745 n->working_links++; 746 n->action_flags |= TIPC_NOTIFY_LINK_UP; 747 n->link_id = tipc_link_id(nl); 748 749 /* Leave room for tunnel header when returning 'mtu' to users: */ 750 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE; 751 752 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 753 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 754 755 pr_debug("Established link <%s> on network plane %c\n", 756 tipc_link_name(nl), tipc_link_plane(nl)); 757 trace_tipc_node_link_up(n, true, " "); 758 759 /* Ensure that a STATE message goes first */ 760 tipc_link_build_state_msg(nl, xmitq); 761 762 /* First link? => give it both slots */ 763 if (!ol) { 764 *slot0 = bearer_id; 765 *slot1 = bearer_id; 766 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 767 n->action_flags |= TIPC_NOTIFY_NODE_UP; 768 tipc_link_set_active(nl, true); 769 tipc_bcast_add_peer(n->net, nl, xmitq); 770 return; 771 } 772 773 /* Second link => redistribute slots */ 774 if (tipc_link_prio(nl) > tipc_link_prio(ol)) { 775 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol)); 776 *slot0 = bearer_id; 777 *slot1 = bearer_id; 778 tipc_link_set_active(nl, true); 779 tipc_link_set_active(ol, false); 780 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) { 781 tipc_link_set_active(nl, true); 782 *slot1 = bearer_id; 783 } else { 784 pr_debug("New link <%s> is standby\n", tipc_link_name(nl)); 785 } 786 787 /* Prepare synchronization with first link */ 788 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq); 789 } 790 791 /** 792 * tipc_node_link_up - handle addition of link 793 * 794 * Link becomes active (alone or shared) or standby, depending on its priority. 795 */ 796 static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 797 struct sk_buff_head *xmitq) 798 { 799 struct tipc_media_addr *maddr; 800 801 tipc_node_write_lock(n); 802 __tipc_node_link_up(n, bearer_id, xmitq); 803 maddr = &n->links[bearer_id].maddr; 804 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr); 805 tipc_node_write_unlock(n); 806 } 807 808 /** 809 * tipc_node_link_failover() - start failover in case "half-failover" 810 * 811 * This function is only called in a very special situation where link 812 * failover can be already started on peer node but not on this node. 813 * This can happen when e.g. 814 * 1. Both links <1A-2A>, <1B-2B> down 815 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network 816 * disturbance, wrong session, etc.) 817 * 3. Link <1B-2B> up 818 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout) 819 * 5. Node 2 starts failover onto link <1B-2B> 820 * 821 * ==> Node 1 does never start link/node failover! 822 * 823 * @n: tipc node structure 824 * @l: link peer endpoint failingover (- can be NULL) 825 * @tnl: tunnel link 826 * @xmitq: queue for messages to be xmited on tnl link later 827 */ 828 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l, 829 struct tipc_link *tnl, 830 struct sk_buff_head *xmitq) 831 { 832 /* Avoid to be "self-failover" that can never end */ 833 if (!tipc_link_is_up(tnl)) 834 return; 835 836 /* Don't rush, failure link may be in the process of resetting */ 837 if (l && !tipc_link_is_reset(l)) 838 return; 839 840 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 841 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 842 843 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 844 tipc_link_failover_prepare(l, tnl, xmitq); 845 846 if (l) 847 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 848 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 849 } 850 851 /** 852 * __tipc_node_link_down - handle loss of link 853 */ 854 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 855 struct sk_buff_head *xmitq, 856 struct tipc_media_addr **maddr) 857 { 858 struct tipc_link_entry *le = &n->links[*bearer_id]; 859 int *slot0 = &n->active_links[0]; 860 int *slot1 = &n->active_links[1]; 861 int i, highest = 0, prio; 862 struct tipc_link *l, *_l, *tnl; 863 864 l = n->links[*bearer_id].link; 865 if (!l || tipc_link_is_reset(l)) 866 return; 867 868 n->working_links--; 869 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 870 n->link_id = tipc_link_id(l); 871 872 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 873 874 pr_debug("Lost link <%s> on network plane %c\n", 875 tipc_link_name(l), tipc_link_plane(l)); 876 877 /* Select new active link if any available */ 878 *slot0 = INVALID_BEARER_ID; 879 *slot1 = INVALID_BEARER_ID; 880 for (i = 0; i < MAX_BEARERS; i++) { 881 _l = n->links[i].link; 882 if (!_l || !tipc_link_is_up(_l)) 883 continue; 884 if (_l == l) 885 continue; 886 prio = tipc_link_prio(_l); 887 if (prio < highest) 888 continue; 889 if (prio > highest) { 890 highest = prio; 891 *slot0 = i; 892 *slot1 = i; 893 continue; 894 } 895 *slot1 = i; 896 } 897 898 if (!node_is_up(n)) { 899 if (tipc_link_peer_is_down(l)) 900 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 901 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); 902 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!"); 903 tipc_link_fsm_evt(l, LINK_RESET_EVT); 904 tipc_link_reset(l); 905 tipc_link_build_reset_msg(l, xmitq); 906 *maddr = &n->links[*bearer_id].maddr; 907 node_lost_contact(n, &le->inputq); 908 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 909 return; 910 } 911 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 912 913 /* There is still a working link => initiate failover */ 914 *bearer_id = n->active_links[0]; 915 tnl = n->links[*bearer_id].link; 916 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 917 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 918 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 919 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 920 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!"); 921 tipc_link_reset(l); 922 tipc_link_fsm_evt(l, LINK_RESET_EVT); 923 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 924 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 925 *maddr = &n->links[*bearer_id].maddr; 926 } 927 928 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 929 { 930 struct tipc_link_entry *le = &n->links[bearer_id]; 931 struct tipc_media_addr *maddr = NULL; 932 struct tipc_link *l = le->link; 933 int old_bearer_id = bearer_id; 934 struct sk_buff_head xmitq; 935 936 if (!l) 937 return; 938 939 __skb_queue_head_init(&xmitq); 940 941 tipc_node_write_lock(n); 942 if (!tipc_link_is_establishing(l)) { 943 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 944 } else { 945 /* Defuse pending tipc_node_link_up() */ 946 tipc_link_reset(l); 947 tipc_link_fsm_evt(l, LINK_RESET_EVT); 948 } 949 if (delete) { 950 kfree(l); 951 le->link = NULL; 952 n->link_cnt--; 953 } 954 trace_tipc_node_link_down(n, true, "node link down or deleted!"); 955 tipc_node_write_unlock(n); 956 if (delete) 957 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); 958 if (!skb_queue_empty(&xmitq)) 959 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); 960 tipc_sk_rcv(n->net, &le->inputq); 961 } 962 963 static bool node_is_up(struct tipc_node *n) 964 { 965 return n->active_links[0] != INVALID_BEARER_ID; 966 } 967 968 bool tipc_node_is_up(struct net *net, u32 addr) 969 { 970 struct tipc_node *n; 971 bool retval = false; 972 973 if (in_own_node(net, addr)) 974 return true; 975 976 n = tipc_node_find(net, addr); 977 if (!n) 978 return false; 979 retval = node_is_up(n); 980 tipc_node_put(n); 981 return retval; 982 } 983 984 static u32 tipc_node_suggest_addr(struct net *net, u32 addr) 985 { 986 struct tipc_node *n; 987 988 addr ^= tipc_net(net)->random; 989 while ((n = tipc_node_find(net, addr))) { 990 tipc_node_put(n); 991 addr++; 992 } 993 return addr; 994 } 995 996 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not 997 * Returns suggested address if any, otherwise 0 998 */ 999 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) 1000 { 1001 struct tipc_net *tn = tipc_net(net); 1002 struct tipc_node *n; 1003 1004 /* Suggest new address if some other peer is using this one */ 1005 n = tipc_node_find(net, addr); 1006 if (n) { 1007 if (!memcmp(n->peer_id, id, NODE_ID_LEN)) 1008 addr = 0; 1009 tipc_node_put(n); 1010 if (!addr) 1011 return 0; 1012 return tipc_node_suggest_addr(net, addr); 1013 } 1014 1015 /* Suggest previously used address if peer is known */ 1016 n = tipc_node_find_by_id(net, id); 1017 if (n) { 1018 addr = n->addr; 1019 tipc_node_put(n); 1020 return addr; 1021 } 1022 1023 /* Even this node may be in conflict */ 1024 if (tn->trial_addr == addr) 1025 return tipc_node_suggest_addr(net, addr); 1026 1027 return 0; 1028 } 1029 1030 void tipc_node_check_dest(struct net *net, u32 addr, 1031 u8 *peer_id, struct tipc_bearer *b, 1032 u16 capabilities, u32 signature, u32 hash_mixes, 1033 struct tipc_media_addr *maddr, 1034 bool *respond, bool *dupl_addr) 1035 { 1036 struct tipc_node *n; 1037 struct tipc_link *l; 1038 struct tipc_link_entry *le; 1039 bool addr_match = false; 1040 bool sign_match = false; 1041 bool link_up = false; 1042 bool accept_addr = false; 1043 bool reset = true; 1044 char *if_name; 1045 unsigned long intv; 1046 u16 session; 1047 1048 *dupl_addr = false; 1049 *respond = false; 1050 1051 n = tipc_node_create(net, addr, peer_id, capabilities, signature, 1052 hash_mixes); 1053 if (!n) 1054 return; 1055 1056 tipc_node_write_lock(n); 1057 1058 le = &n->links[b->identity]; 1059 1060 /* Prepare to validate requesting node's signature and media address */ 1061 l = le->link; 1062 link_up = l && tipc_link_is_up(l); 1063 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); 1064 sign_match = (signature == n->signature); 1065 1066 /* These three flags give us eight permutations: */ 1067 1068 if (sign_match && addr_match && link_up) { 1069 /* All is fine. Do nothing. */ 1070 reset = false; 1071 } else if (sign_match && addr_match && !link_up) { 1072 /* Respond. The link will come up in due time */ 1073 *respond = true; 1074 } else if (sign_match && !addr_match && link_up) { 1075 /* Peer has changed i/f address without rebooting. 1076 * If so, the link will reset soon, and the next 1077 * discovery will be accepted. So we can ignore it. 1078 * It may also be an cloned or malicious peer having 1079 * chosen the same node address and signature as an 1080 * existing one. 1081 * Ignore requests until the link goes down, if ever. 1082 */ 1083 *dupl_addr = true; 1084 } else if (sign_match && !addr_match && !link_up) { 1085 /* Peer link has changed i/f address without rebooting. 1086 * It may also be a cloned or malicious peer; we can't 1087 * distinguish between the two. 1088 * The signature is correct, so we must accept. 1089 */ 1090 accept_addr = true; 1091 *respond = true; 1092 } else if (!sign_match && addr_match && link_up) { 1093 /* Peer node rebooted. Two possibilities: 1094 * - Delayed re-discovery; this link endpoint has already 1095 * reset and re-established contact with the peer, before 1096 * receiving a discovery message from that node. 1097 * (The peer happened to receive one from this node first). 1098 * - The peer came back so fast that our side has not 1099 * discovered it yet. Probing from this side will soon 1100 * reset the link, since there can be no working link 1101 * endpoint at the peer end, and the link will re-establish. 1102 * Accept the signature, since it comes from a known peer. 1103 */ 1104 n->signature = signature; 1105 } else if (!sign_match && addr_match && !link_up) { 1106 /* The peer node has rebooted. 1107 * Accept signature, since it is a known peer. 1108 */ 1109 n->signature = signature; 1110 *respond = true; 1111 } else if (!sign_match && !addr_match && link_up) { 1112 /* Peer rebooted with new address, or a new/duplicate peer. 1113 * Ignore until the link goes down, if ever. 1114 */ 1115 *dupl_addr = true; 1116 } else if (!sign_match && !addr_match && !link_up) { 1117 /* Peer rebooted with new address, or it is a new peer. 1118 * Accept signature and address. 1119 */ 1120 n->signature = signature; 1121 accept_addr = true; 1122 *respond = true; 1123 } 1124 1125 if (!accept_addr) 1126 goto exit; 1127 1128 /* Now create new link if not already existing */ 1129 if (!l) { 1130 if (n->link_cnt == 2) 1131 goto exit; 1132 1133 if_name = strchr(b->name, ':') + 1; 1134 get_random_bytes(&session, sizeof(u16)); 1135 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 1136 b->net_plane, b->mtu, b->priority, 1137 b->window, session, 1138 tipc_own_addr(net), addr, peer_id, 1139 n->capabilities, 1140 tipc_bc_sndlink(n->net), n->bc_entry.link, 1141 &le->inputq, 1142 &n->bc_entry.namedq, &l)) { 1143 *respond = false; 1144 goto exit; 1145 } 1146 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!"); 1147 tipc_link_reset(l); 1148 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1149 if (n->state == NODE_FAILINGOVER) 1150 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1151 le->link = l; 1152 n->link_cnt++; 1153 tipc_node_calculate_timer(n, l); 1154 if (n->link_cnt == 1) { 1155 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 1156 if (!mod_timer(&n->timer, intv)) 1157 tipc_node_get(n); 1158 } 1159 } 1160 memcpy(&le->maddr, maddr, sizeof(*maddr)); 1161 exit: 1162 tipc_node_write_unlock(n); 1163 if (reset && l && !tipc_link_is_reset(l)) 1164 tipc_node_link_down(n, b->identity, false); 1165 tipc_node_put(n); 1166 } 1167 1168 void tipc_node_delete_links(struct net *net, int bearer_id) 1169 { 1170 struct tipc_net *tn = net_generic(net, tipc_net_id); 1171 struct tipc_node *n; 1172 1173 rcu_read_lock(); 1174 list_for_each_entry_rcu(n, &tn->node_list, list) { 1175 tipc_node_link_down(n, bearer_id, true); 1176 } 1177 rcu_read_unlock(); 1178 } 1179 1180 static void tipc_node_reset_links(struct tipc_node *n) 1181 { 1182 int i; 1183 1184 pr_warn("Resetting all links to %x\n", n->addr); 1185 1186 trace_tipc_node_reset_links(n, true, " "); 1187 for (i = 0; i < MAX_BEARERS; i++) { 1188 tipc_node_link_down(n, i, false); 1189 } 1190 } 1191 1192 /* tipc_node_fsm_evt - node finite state machine 1193 * Determines when contact is allowed with peer node 1194 */ 1195 static void tipc_node_fsm_evt(struct tipc_node *n, int evt) 1196 { 1197 int state = n->state; 1198 1199 switch (state) { 1200 case SELF_DOWN_PEER_DOWN: 1201 switch (evt) { 1202 case SELF_ESTABL_CONTACT_EVT: 1203 state = SELF_UP_PEER_COMING; 1204 break; 1205 case PEER_ESTABL_CONTACT_EVT: 1206 state = SELF_COMING_PEER_UP; 1207 break; 1208 case SELF_LOST_CONTACT_EVT: 1209 case PEER_LOST_CONTACT_EVT: 1210 break; 1211 case NODE_SYNCH_END_EVT: 1212 case NODE_SYNCH_BEGIN_EVT: 1213 case NODE_FAILOVER_BEGIN_EVT: 1214 case NODE_FAILOVER_END_EVT: 1215 default: 1216 goto illegal_evt; 1217 } 1218 break; 1219 case SELF_UP_PEER_UP: 1220 switch (evt) { 1221 case SELF_LOST_CONTACT_EVT: 1222 state = SELF_DOWN_PEER_LEAVING; 1223 break; 1224 case PEER_LOST_CONTACT_EVT: 1225 state = SELF_LEAVING_PEER_DOWN; 1226 break; 1227 case NODE_SYNCH_BEGIN_EVT: 1228 state = NODE_SYNCHING; 1229 break; 1230 case NODE_FAILOVER_BEGIN_EVT: 1231 state = NODE_FAILINGOVER; 1232 break; 1233 case SELF_ESTABL_CONTACT_EVT: 1234 case PEER_ESTABL_CONTACT_EVT: 1235 case NODE_SYNCH_END_EVT: 1236 case NODE_FAILOVER_END_EVT: 1237 break; 1238 default: 1239 goto illegal_evt; 1240 } 1241 break; 1242 case SELF_DOWN_PEER_LEAVING: 1243 switch (evt) { 1244 case PEER_LOST_CONTACT_EVT: 1245 state = SELF_DOWN_PEER_DOWN; 1246 break; 1247 case SELF_ESTABL_CONTACT_EVT: 1248 case PEER_ESTABL_CONTACT_EVT: 1249 case SELF_LOST_CONTACT_EVT: 1250 break; 1251 case NODE_SYNCH_END_EVT: 1252 case NODE_SYNCH_BEGIN_EVT: 1253 case NODE_FAILOVER_BEGIN_EVT: 1254 case NODE_FAILOVER_END_EVT: 1255 default: 1256 goto illegal_evt; 1257 } 1258 break; 1259 case SELF_UP_PEER_COMING: 1260 switch (evt) { 1261 case PEER_ESTABL_CONTACT_EVT: 1262 state = SELF_UP_PEER_UP; 1263 break; 1264 case SELF_LOST_CONTACT_EVT: 1265 state = SELF_DOWN_PEER_DOWN; 1266 break; 1267 case SELF_ESTABL_CONTACT_EVT: 1268 case PEER_LOST_CONTACT_EVT: 1269 case NODE_SYNCH_END_EVT: 1270 case NODE_FAILOVER_BEGIN_EVT: 1271 break; 1272 case NODE_SYNCH_BEGIN_EVT: 1273 case NODE_FAILOVER_END_EVT: 1274 default: 1275 goto illegal_evt; 1276 } 1277 break; 1278 case SELF_COMING_PEER_UP: 1279 switch (evt) { 1280 case SELF_ESTABL_CONTACT_EVT: 1281 state = SELF_UP_PEER_UP; 1282 break; 1283 case PEER_LOST_CONTACT_EVT: 1284 state = SELF_DOWN_PEER_DOWN; 1285 break; 1286 case SELF_LOST_CONTACT_EVT: 1287 case PEER_ESTABL_CONTACT_EVT: 1288 break; 1289 case NODE_SYNCH_END_EVT: 1290 case NODE_SYNCH_BEGIN_EVT: 1291 case NODE_FAILOVER_BEGIN_EVT: 1292 case NODE_FAILOVER_END_EVT: 1293 default: 1294 goto illegal_evt; 1295 } 1296 break; 1297 case SELF_LEAVING_PEER_DOWN: 1298 switch (evt) { 1299 case SELF_LOST_CONTACT_EVT: 1300 state = SELF_DOWN_PEER_DOWN; 1301 break; 1302 case SELF_ESTABL_CONTACT_EVT: 1303 case PEER_ESTABL_CONTACT_EVT: 1304 case PEER_LOST_CONTACT_EVT: 1305 break; 1306 case NODE_SYNCH_END_EVT: 1307 case NODE_SYNCH_BEGIN_EVT: 1308 case NODE_FAILOVER_BEGIN_EVT: 1309 case NODE_FAILOVER_END_EVT: 1310 default: 1311 goto illegal_evt; 1312 } 1313 break; 1314 case NODE_FAILINGOVER: 1315 switch (evt) { 1316 case SELF_LOST_CONTACT_EVT: 1317 state = SELF_DOWN_PEER_LEAVING; 1318 break; 1319 case PEER_LOST_CONTACT_EVT: 1320 state = SELF_LEAVING_PEER_DOWN; 1321 break; 1322 case NODE_FAILOVER_END_EVT: 1323 state = SELF_UP_PEER_UP; 1324 break; 1325 case NODE_FAILOVER_BEGIN_EVT: 1326 case SELF_ESTABL_CONTACT_EVT: 1327 case PEER_ESTABL_CONTACT_EVT: 1328 break; 1329 case NODE_SYNCH_BEGIN_EVT: 1330 case NODE_SYNCH_END_EVT: 1331 default: 1332 goto illegal_evt; 1333 } 1334 break; 1335 case NODE_SYNCHING: 1336 switch (evt) { 1337 case SELF_LOST_CONTACT_EVT: 1338 state = SELF_DOWN_PEER_LEAVING; 1339 break; 1340 case PEER_LOST_CONTACT_EVT: 1341 state = SELF_LEAVING_PEER_DOWN; 1342 break; 1343 case NODE_SYNCH_END_EVT: 1344 state = SELF_UP_PEER_UP; 1345 break; 1346 case NODE_FAILOVER_BEGIN_EVT: 1347 state = NODE_FAILINGOVER; 1348 break; 1349 case NODE_SYNCH_BEGIN_EVT: 1350 case SELF_ESTABL_CONTACT_EVT: 1351 case PEER_ESTABL_CONTACT_EVT: 1352 break; 1353 case NODE_FAILOVER_END_EVT: 1354 default: 1355 goto illegal_evt; 1356 } 1357 break; 1358 default: 1359 pr_err("Unknown node fsm state %x\n", state); 1360 break; 1361 } 1362 trace_tipc_node_fsm(n->peer_id, n->state, state, evt); 1363 n->state = state; 1364 return; 1365 1366 illegal_evt: 1367 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1368 trace_tipc_node_fsm(n->peer_id, n->state, state, evt); 1369 } 1370 1371 static void node_lost_contact(struct tipc_node *n, 1372 struct sk_buff_head *inputq) 1373 { 1374 struct tipc_sock_conn *conn, *safe; 1375 struct tipc_link *l; 1376 struct list_head *conns = &n->conn_sks; 1377 struct sk_buff *skb; 1378 uint i; 1379 1380 pr_debug("Lost contact with %x\n", n->addr); 1381 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 1382 trace_tipc_node_lost_contact(n, true, " "); 1383 1384 /* Clean up broadcast state */ 1385 tipc_bcast_remove_peer(n->net, n->bc_entry.link); 1386 1387 /* Abort any ongoing link failover */ 1388 for (i = 0; i < MAX_BEARERS; i++) { 1389 l = n->links[i].link; 1390 if (l) 1391 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); 1392 } 1393 1394 /* Notify publications from this node */ 1395 n->action_flags |= TIPC_NOTIFY_NODE_DOWN; 1396 1397 if (n->peer_net) { 1398 n->peer_net = NULL; 1399 n->peer_hash_mix = 0; 1400 } 1401 /* Notify sockets connected to node */ 1402 list_for_each_entry_safe(conn, safe, conns, list) { 1403 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 1404 SHORT_H_SIZE, 0, tipc_own_addr(n->net), 1405 conn->peer_node, conn->port, 1406 conn->peer_port, TIPC_ERR_NO_NODE); 1407 if (likely(skb)) 1408 skb_queue_tail(inputq, skb); 1409 list_del(&conn->list); 1410 kfree(conn); 1411 } 1412 } 1413 1414 /** 1415 * tipc_node_get_linkname - get the name of a link 1416 * 1417 * @bearer_id: id of the bearer 1418 * @node: peer node address 1419 * @linkname: link name output buffer 1420 * 1421 * Returns 0 on success 1422 */ 1423 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, 1424 char *linkname, size_t len) 1425 { 1426 struct tipc_link *link; 1427 int err = -EINVAL; 1428 struct tipc_node *node = tipc_node_find(net, addr); 1429 1430 if (!node) 1431 return err; 1432 1433 if (bearer_id >= MAX_BEARERS) 1434 goto exit; 1435 1436 tipc_node_read_lock(node); 1437 link = node->links[bearer_id].link; 1438 if (link) { 1439 strncpy(linkname, tipc_link_name(link), len); 1440 err = 0; 1441 } 1442 tipc_node_read_unlock(node); 1443 exit: 1444 tipc_node_put(node); 1445 return err; 1446 } 1447 1448 /* Caller should hold node lock for the passed node */ 1449 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1450 { 1451 void *hdr; 1452 struct nlattr *attrs; 1453 1454 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1455 NLM_F_MULTI, TIPC_NL_NODE_GET); 1456 if (!hdr) 1457 return -EMSGSIZE; 1458 1459 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE); 1460 if (!attrs) 1461 goto msg_full; 1462 1463 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) 1464 goto attr_msg_full; 1465 if (node_is_up(node)) 1466 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) 1467 goto attr_msg_full; 1468 1469 nla_nest_end(msg->skb, attrs); 1470 genlmsg_end(msg->skb, hdr); 1471 1472 return 0; 1473 1474 attr_msg_full: 1475 nla_nest_cancel(msg->skb, attrs); 1476 msg_full: 1477 genlmsg_cancel(msg->skb, hdr); 1478 1479 return -EMSGSIZE; 1480 } 1481 1482 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list) 1483 { 1484 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 1485 struct sk_buff_head inputq; 1486 1487 switch (msg_user(hdr)) { 1488 case TIPC_LOW_IMPORTANCE: 1489 case TIPC_MEDIUM_IMPORTANCE: 1490 case TIPC_HIGH_IMPORTANCE: 1491 case TIPC_CRITICAL_IMPORTANCE: 1492 if (msg_connected(hdr) || msg_named(hdr)) { 1493 tipc_loopback_trace(peer_net, list); 1494 spin_lock_init(&list->lock); 1495 tipc_sk_rcv(peer_net, list); 1496 return; 1497 } 1498 if (msg_mcast(hdr)) { 1499 tipc_loopback_trace(peer_net, list); 1500 skb_queue_head_init(&inputq); 1501 tipc_sk_mcast_rcv(peer_net, list, &inputq); 1502 __skb_queue_purge(list); 1503 skb_queue_purge(&inputq); 1504 return; 1505 } 1506 return; 1507 case MSG_FRAGMENTER: 1508 if (tipc_msg_assemble(list)) { 1509 tipc_loopback_trace(peer_net, list); 1510 skb_queue_head_init(&inputq); 1511 tipc_sk_mcast_rcv(peer_net, list, &inputq); 1512 __skb_queue_purge(list); 1513 skb_queue_purge(&inputq); 1514 } 1515 return; 1516 case GROUP_PROTOCOL: 1517 case CONN_MANAGER: 1518 tipc_loopback_trace(peer_net, list); 1519 spin_lock_init(&list->lock); 1520 tipc_sk_rcv(peer_net, list); 1521 return; 1522 case LINK_PROTOCOL: 1523 case NAME_DISTRIBUTOR: 1524 case TUNNEL_PROTOCOL: 1525 case BCAST_PROTOCOL: 1526 return; 1527 default: 1528 return; 1529 }; 1530 } 1531 1532 /** 1533 * tipc_node_xmit() is the general link level function for message sending 1534 * @net: the applicable net namespace 1535 * @list: chain of buffers containing message 1536 * @dnode: address of destination node 1537 * @selector: a number used for deterministic link selection 1538 * Consumes the buffer chain. 1539 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF 1540 */ 1541 int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1542 u32 dnode, int selector) 1543 { 1544 struct tipc_link_entry *le = NULL; 1545 struct tipc_node *n; 1546 struct sk_buff_head xmitq; 1547 bool node_up = false; 1548 int bearer_id; 1549 int rc; 1550 1551 if (in_own_node(net, dnode)) { 1552 tipc_loopback_trace(net, list); 1553 spin_lock_init(&list->lock); 1554 tipc_sk_rcv(net, list); 1555 return 0; 1556 } 1557 1558 n = tipc_node_find(net, dnode); 1559 if (unlikely(!n)) { 1560 __skb_queue_purge(list); 1561 return -EHOSTUNREACH; 1562 } 1563 1564 tipc_node_read_lock(n); 1565 node_up = node_is_up(n); 1566 if (node_up && n->peer_net && check_net(n->peer_net)) { 1567 /* xmit inner linux container */ 1568 tipc_lxc_xmit(n->peer_net, list); 1569 if (likely(skb_queue_empty(list))) { 1570 tipc_node_read_unlock(n); 1571 tipc_node_put(n); 1572 return 0; 1573 } 1574 } 1575 1576 bearer_id = n->active_links[selector & 1]; 1577 if (unlikely(bearer_id == INVALID_BEARER_ID)) { 1578 tipc_node_read_unlock(n); 1579 tipc_node_put(n); 1580 __skb_queue_purge(list); 1581 return -EHOSTUNREACH; 1582 } 1583 1584 __skb_queue_head_init(&xmitq); 1585 le = &n->links[bearer_id]; 1586 spin_lock_bh(&le->lock); 1587 rc = tipc_link_xmit(le->link, list, &xmitq); 1588 spin_unlock_bh(&le->lock); 1589 tipc_node_read_unlock(n); 1590 1591 if (unlikely(rc == -ENOBUFS)) 1592 tipc_node_link_down(n, bearer_id, false); 1593 else 1594 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1595 1596 tipc_node_put(n); 1597 1598 return rc; 1599 } 1600 1601 /* tipc_node_xmit_skb(): send single buffer to destination 1602 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE 1603 * messages, which will not be rejected 1604 * The only exception is datagram messages rerouted after secondary 1605 * lookup, which are rare and safe to dispose of anyway. 1606 */ 1607 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 1608 u32 selector) 1609 { 1610 struct sk_buff_head head; 1611 1612 __skb_queue_head_init(&head); 1613 __skb_queue_tail(&head, skb); 1614 tipc_node_xmit(net, &head, dnode, selector); 1615 return 0; 1616 } 1617 1618 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations 1619 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected 1620 */ 1621 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) 1622 { 1623 struct sk_buff *skb; 1624 u32 selector, dnode; 1625 1626 while ((skb = __skb_dequeue(xmitq))) { 1627 selector = msg_origport(buf_msg(skb)); 1628 dnode = msg_destnode(buf_msg(skb)); 1629 tipc_node_xmit_skb(net, skb, dnode, selector); 1630 } 1631 return 0; 1632 } 1633 1634 void tipc_node_broadcast(struct net *net, struct sk_buff *skb) 1635 { 1636 struct sk_buff *txskb; 1637 struct tipc_node *n; 1638 u32 dst; 1639 1640 rcu_read_lock(); 1641 list_for_each_entry_rcu(n, tipc_nodes(net), list) { 1642 dst = n->addr; 1643 if (in_own_node(net, dst)) 1644 continue; 1645 if (!node_is_up(n)) 1646 continue; 1647 txskb = pskb_copy(skb, GFP_ATOMIC); 1648 if (!txskb) 1649 break; 1650 msg_set_destnode(buf_msg(txskb), dst); 1651 tipc_node_xmit_skb(net, txskb, dst, 0); 1652 } 1653 rcu_read_unlock(); 1654 1655 kfree_skb(skb); 1656 } 1657 1658 static void tipc_node_mcast_rcv(struct tipc_node *n) 1659 { 1660 struct tipc_bclink_entry *be = &n->bc_entry; 1661 1662 /* 'arrvq' is under inputq2's lock protection */ 1663 spin_lock_bh(&be->inputq2.lock); 1664 spin_lock_bh(&be->inputq1.lock); 1665 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); 1666 spin_unlock_bh(&be->inputq1.lock); 1667 spin_unlock_bh(&be->inputq2.lock); 1668 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2); 1669 } 1670 1671 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, 1672 int bearer_id, struct sk_buff_head *xmitq) 1673 { 1674 struct tipc_link *ucl; 1675 int rc; 1676 1677 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr); 1678 1679 if (rc & TIPC_LINK_DOWN_EVT) { 1680 tipc_node_reset_links(n); 1681 return; 1682 } 1683 1684 if (!(rc & TIPC_LINK_SND_STATE)) 1685 return; 1686 1687 /* If probe message, a STATE response will be sent anyway */ 1688 if (msg_probe(hdr)) 1689 return; 1690 1691 /* Produce a STATE message carrying broadcast NACK */ 1692 tipc_node_read_lock(n); 1693 ucl = n->links[bearer_id].link; 1694 if (ucl) 1695 tipc_link_build_state_msg(ucl, xmitq); 1696 tipc_node_read_unlock(n); 1697 } 1698 1699 /** 1700 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1701 * @net: the applicable net namespace 1702 * @skb: TIPC packet 1703 * @bearer_id: id of bearer message arrived on 1704 * 1705 * Invoked with no locks held. 1706 */ 1707 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id) 1708 { 1709 int rc; 1710 struct sk_buff_head xmitq; 1711 struct tipc_bclink_entry *be; 1712 struct tipc_link_entry *le; 1713 struct tipc_msg *hdr = buf_msg(skb); 1714 int usr = msg_user(hdr); 1715 u32 dnode = msg_destnode(hdr); 1716 struct tipc_node *n; 1717 1718 __skb_queue_head_init(&xmitq); 1719 1720 /* If NACK for other node, let rcv link for that node peek into it */ 1721 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net))) 1722 n = tipc_node_find(net, dnode); 1723 else 1724 n = tipc_node_find(net, msg_prevnode(hdr)); 1725 if (!n) { 1726 kfree_skb(skb); 1727 return; 1728 } 1729 be = &n->bc_entry; 1730 le = &n->links[bearer_id]; 1731 1732 rc = tipc_bcast_rcv(net, be->link, skb); 1733 1734 /* Broadcast ACKs are sent on a unicast link */ 1735 if (rc & TIPC_LINK_SND_STATE) { 1736 tipc_node_read_lock(n); 1737 tipc_link_build_state_msg(le->link, &xmitq); 1738 tipc_node_read_unlock(n); 1739 } 1740 1741 if (!skb_queue_empty(&xmitq)) 1742 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1743 1744 if (!skb_queue_empty(&be->inputq1)) 1745 tipc_node_mcast_rcv(n); 1746 1747 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ 1748 if (!skb_queue_empty(&n->bc_entry.namedq)) 1749 tipc_named_rcv(net, &n->bc_entry.namedq); 1750 1751 /* If reassembly or retransmission failure => reset all links to peer */ 1752 if (rc & TIPC_LINK_DOWN_EVT) 1753 tipc_node_reset_links(n); 1754 1755 tipc_node_put(n); 1756 } 1757 1758 /** 1759 * tipc_node_check_state - check and if necessary update node state 1760 * @skb: TIPC packet 1761 * @bearer_id: identity of bearer delivering the packet 1762 * Returns true if state and msg are ok, otherwise false 1763 */ 1764 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, 1765 int bearer_id, struct sk_buff_head *xmitq) 1766 { 1767 struct tipc_msg *hdr = buf_msg(skb); 1768 int usr = msg_user(hdr); 1769 int mtyp = msg_type(hdr); 1770 u16 oseqno = msg_seqno(hdr); 1771 u16 exp_pkts = msg_msgcnt(hdr); 1772 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len; 1773 int state = n->state; 1774 struct tipc_link *l, *tnl, *pl = NULL; 1775 struct tipc_media_addr *maddr; 1776 int pb_id; 1777 1778 if (trace_tipc_node_check_state_enabled()) { 1779 trace_tipc_skb_dump(skb, false, "skb for node state check"); 1780 trace_tipc_node_check_state(n, true, " "); 1781 } 1782 l = n->links[bearer_id].link; 1783 if (!l) 1784 return false; 1785 rcv_nxt = tipc_link_rcv_nxt(l); 1786 1787 1788 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1789 return true; 1790 1791 /* Find parallel link, if any */ 1792 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) { 1793 if ((pb_id != bearer_id) && n->links[pb_id].link) { 1794 pl = n->links[pb_id].link; 1795 break; 1796 } 1797 } 1798 1799 if (!tipc_link_validate_msg(l, hdr)) { 1800 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!"); 1801 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!"); 1802 return false; 1803 } 1804 1805 /* Check and update node accesibility if applicable */ 1806 if (state == SELF_UP_PEER_COMING) { 1807 if (!tipc_link_is_up(l)) 1808 return true; 1809 if (!msg_peer_link_is_up(hdr)) 1810 return true; 1811 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT); 1812 } 1813 1814 if (state == SELF_DOWN_PEER_LEAVING) { 1815 if (msg_peer_node_is_up(hdr)) 1816 return false; 1817 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1818 return true; 1819 } 1820 1821 if (state == SELF_LEAVING_PEER_DOWN) 1822 return false; 1823 1824 /* Ignore duplicate packets */ 1825 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1826 return true; 1827 1828 /* Initiate or update failover mode if applicable */ 1829 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1830 syncpt = oseqno + exp_pkts - 1; 1831 if (pl && !tipc_link_is_reset(pl)) { 1832 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1833 trace_tipc_node_link_down(n, true, 1834 "node link down <- failover!"); 1835 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 1836 tipc_link_inputq(l)); 1837 } 1838 1839 /* If parallel link was already down, and this happened before 1840 * the tunnel link came up, node failover was never started. 1841 * Ensure that a FAILOVER_MSG is sent to get peer out of 1842 * NODE_FAILINGOVER state, also this node must accept 1843 * TUNNEL_MSGs from peer. 1844 */ 1845 if (n->state != NODE_FAILINGOVER) 1846 tipc_node_link_failover(n, pl, l, xmitq); 1847 1848 /* If pkts arrive out of order, use lowest calculated syncpt */ 1849 if (less(syncpt, n->sync_point)) 1850 n->sync_point = syncpt; 1851 } 1852 1853 /* Open parallel link when tunnel link reaches synch point */ 1854 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { 1855 if (!more(rcv_nxt, n->sync_point)) 1856 return true; 1857 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); 1858 if (pl) 1859 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); 1860 return true; 1861 } 1862 1863 /* No synching needed if only one link */ 1864 if (!pl || !tipc_link_is_up(pl)) 1865 return true; 1866 1867 /* Initiate synch mode if applicable */ 1868 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 1869 if (n->capabilities & TIPC_TUNNEL_ENHANCED) 1870 syncpt = msg_syncpt(hdr); 1871 else 1872 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1; 1873 if (!tipc_link_is_up(l)) 1874 __tipc_node_link_up(n, bearer_id, xmitq); 1875 if (n->state == SELF_UP_PEER_UP) { 1876 n->sync_point = syncpt; 1877 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 1878 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 1879 } 1880 } 1881 1882 /* Open tunnel link when parallel link reaches synch point */ 1883 if (n->state == NODE_SYNCHING) { 1884 if (tipc_link_is_synching(l)) { 1885 tnl = l; 1886 } else { 1887 tnl = pl; 1888 pl = l; 1889 } 1890 inputq_len = skb_queue_len(tipc_link_inputq(pl)); 1891 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len; 1892 if (more(dlv_nxt, n->sync_point)) { 1893 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 1894 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 1895 return true; 1896 } 1897 if (l == pl) 1898 return true; 1899 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) 1900 return true; 1901 if (usr == LINK_PROTOCOL) 1902 return true; 1903 return false; 1904 } 1905 return true; 1906 } 1907 1908 /** 1909 * tipc_rcv - process TIPC packets/messages arriving from off-node 1910 * @net: the applicable net namespace 1911 * @skb: TIPC packet 1912 * @bearer: pointer to bearer message arrived on 1913 * 1914 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1915 * structure (i.e. cannot be NULL), but bearer can be inactive. 1916 */ 1917 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) 1918 { 1919 struct sk_buff_head xmitq; 1920 struct tipc_node *n; 1921 struct tipc_msg *hdr; 1922 int bearer_id = b->identity; 1923 struct tipc_link_entry *le; 1924 u32 self = tipc_own_addr(net); 1925 int usr, rc = 0; 1926 u16 bc_ack; 1927 1928 __skb_queue_head_init(&xmitq); 1929 1930 /* Ensure message is well-formed before touching the header */ 1931 TIPC_SKB_CB(skb)->validated = false; 1932 if (unlikely(!tipc_msg_validate(&skb))) 1933 goto discard; 1934 hdr = buf_msg(skb); 1935 usr = msg_user(hdr); 1936 bc_ack = msg_bcast_ack(hdr); 1937 1938 /* Handle arrival of discovery or broadcast packet */ 1939 if (unlikely(msg_non_seq(hdr))) { 1940 if (unlikely(usr == LINK_CONFIG)) 1941 return tipc_disc_rcv(net, skb, b); 1942 else 1943 return tipc_node_bc_rcv(net, skb, bearer_id); 1944 } 1945 1946 /* Discard unicast link messages destined for another node */ 1947 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self))) 1948 goto discard; 1949 1950 /* Locate neighboring node that sent packet */ 1951 n = tipc_node_find(net, msg_prevnode(hdr)); 1952 if (unlikely(!n)) 1953 goto discard; 1954 le = &n->links[bearer_id]; 1955 1956 /* Ensure broadcast reception is in synch with peer's send state */ 1957 if (unlikely(usr == LINK_PROTOCOL)) 1958 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); 1959 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) 1960 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr); 1961 1962 /* Receive packet directly if conditions permit */ 1963 tipc_node_read_lock(n); 1964 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) { 1965 spin_lock_bh(&le->lock); 1966 if (le->link) { 1967 rc = tipc_link_rcv(le->link, skb, &xmitq); 1968 skb = NULL; 1969 } 1970 spin_unlock_bh(&le->lock); 1971 } 1972 tipc_node_read_unlock(n); 1973 1974 /* Check/update node state before receiving */ 1975 if (unlikely(skb)) { 1976 if (unlikely(skb_linearize(skb))) 1977 goto discard; 1978 tipc_node_write_lock(n); 1979 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { 1980 if (le->link) { 1981 rc = tipc_link_rcv(le->link, skb, &xmitq); 1982 skb = NULL; 1983 } 1984 } 1985 tipc_node_write_unlock(n); 1986 } 1987 1988 if (unlikely(rc & TIPC_LINK_UP_EVT)) 1989 tipc_node_link_up(n, bearer_id, &xmitq); 1990 1991 if (unlikely(rc & TIPC_LINK_DOWN_EVT)) 1992 tipc_node_link_down(n, bearer_id, false); 1993 1994 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) 1995 tipc_named_rcv(net, &n->bc_entry.namedq); 1996 1997 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) 1998 tipc_node_mcast_rcv(n); 1999 2000 if (!skb_queue_empty(&le->inputq)) 2001 tipc_sk_rcv(net, &le->inputq); 2002 2003 if (!skb_queue_empty(&xmitq)) 2004 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 2005 2006 tipc_node_put(n); 2007 discard: 2008 kfree_skb(skb); 2009 } 2010 2011 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b, 2012 int prop) 2013 { 2014 struct tipc_net *tn = tipc_net(net); 2015 int bearer_id = b->identity; 2016 struct sk_buff_head xmitq; 2017 struct tipc_link_entry *e; 2018 struct tipc_node *n; 2019 2020 __skb_queue_head_init(&xmitq); 2021 2022 rcu_read_lock(); 2023 2024 list_for_each_entry_rcu(n, &tn->node_list, list) { 2025 tipc_node_write_lock(n); 2026 e = &n->links[bearer_id]; 2027 if (e->link) { 2028 if (prop == TIPC_NLA_PROP_TOL) 2029 tipc_link_set_tolerance(e->link, b->tolerance, 2030 &xmitq); 2031 else if (prop == TIPC_NLA_PROP_MTU) 2032 tipc_link_set_mtu(e->link, b->mtu); 2033 } 2034 tipc_node_write_unlock(n); 2035 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr); 2036 } 2037 2038 rcu_read_unlock(); 2039 } 2040 2041 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) 2042 { 2043 struct net *net = sock_net(skb->sk); 2044 struct tipc_net *tn = net_generic(net, tipc_net_id); 2045 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; 2046 struct tipc_node *peer; 2047 u32 addr; 2048 int err; 2049 2050 /* We identify the peer by its net */ 2051 if (!info->attrs[TIPC_NLA_NET]) 2052 return -EINVAL; 2053 2054 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX, 2055 info->attrs[TIPC_NLA_NET], 2056 tipc_nl_net_policy, info->extack); 2057 if (err) 2058 return err; 2059 2060 if (!attrs[TIPC_NLA_NET_ADDR]) 2061 return -EINVAL; 2062 2063 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); 2064 2065 if (in_own_node(net, addr)) 2066 return -ENOTSUPP; 2067 2068 spin_lock_bh(&tn->node_list_lock); 2069 peer = tipc_node_find(net, addr); 2070 if (!peer) { 2071 spin_unlock_bh(&tn->node_list_lock); 2072 return -ENXIO; 2073 } 2074 2075 tipc_node_write_lock(peer); 2076 if (peer->state != SELF_DOWN_PEER_DOWN && 2077 peer->state != SELF_DOWN_PEER_LEAVING) { 2078 tipc_node_write_unlock(peer); 2079 err = -EBUSY; 2080 goto err_out; 2081 } 2082 2083 tipc_node_clear_links(peer); 2084 tipc_node_write_unlock(peer); 2085 tipc_node_delete(peer); 2086 2087 err = 0; 2088 err_out: 2089 tipc_node_put(peer); 2090 spin_unlock_bh(&tn->node_list_lock); 2091 2092 return err; 2093 } 2094 2095 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 2096 { 2097 int err; 2098 struct net *net = sock_net(skb->sk); 2099 struct tipc_net *tn = net_generic(net, tipc_net_id); 2100 int done = cb->args[0]; 2101 int last_addr = cb->args[1]; 2102 struct tipc_node *node; 2103 struct tipc_nl_msg msg; 2104 2105 if (done) 2106 return 0; 2107 2108 msg.skb = skb; 2109 msg.portid = NETLINK_CB(cb->skb).portid; 2110 msg.seq = cb->nlh->nlmsg_seq; 2111 2112 rcu_read_lock(); 2113 if (last_addr) { 2114 node = tipc_node_find(net, last_addr); 2115 if (!node) { 2116 rcu_read_unlock(); 2117 /* We never set seq or call nl_dump_check_consistent() 2118 * this means that setting prev_seq here will cause the 2119 * consistence check to fail in the netlink callback 2120 * handler. Resulting in the NLMSG_DONE message having 2121 * the NLM_F_DUMP_INTR flag set if the node state 2122 * changed while we released the lock. 2123 */ 2124 cb->prev_seq = 1; 2125 return -EPIPE; 2126 } 2127 tipc_node_put(node); 2128 } 2129 2130 list_for_each_entry_rcu(node, &tn->node_list, list) { 2131 if (last_addr) { 2132 if (node->addr == last_addr) 2133 last_addr = 0; 2134 else 2135 continue; 2136 } 2137 2138 tipc_node_read_lock(node); 2139 err = __tipc_nl_add_node(&msg, node); 2140 if (err) { 2141 last_addr = node->addr; 2142 tipc_node_read_unlock(node); 2143 goto out; 2144 } 2145 2146 tipc_node_read_unlock(node); 2147 } 2148 done = 1; 2149 out: 2150 cb->args[0] = done; 2151 cb->args[1] = last_addr; 2152 rcu_read_unlock(); 2153 2154 return skb->len; 2155 } 2156 2157 /* tipc_node_find_by_name - locate owner node of link by link's name 2158 * @net: the applicable net namespace 2159 * @name: pointer to link name string 2160 * @bearer_id: pointer to index in 'node->links' array where the link was found. 2161 * 2162 * Returns pointer to node owning the link, or 0 if no matching link is found. 2163 */ 2164 static struct tipc_node *tipc_node_find_by_name(struct net *net, 2165 const char *link_name, 2166 unsigned int *bearer_id) 2167 { 2168 struct tipc_net *tn = net_generic(net, tipc_net_id); 2169 struct tipc_link *l; 2170 struct tipc_node *n; 2171 struct tipc_node *found_node = NULL; 2172 int i; 2173 2174 *bearer_id = 0; 2175 rcu_read_lock(); 2176 list_for_each_entry_rcu(n, &tn->node_list, list) { 2177 tipc_node_read_lock(n); 2178 for (i = 0; i < MAX_BEARERS; i++) { 2179 l = n->links[i].link; 2180 if (l && !strcmp(tipc_link_name(l), link_name)) { 2181 *bearer_id = i; 2182 found_node = n; 2183 break; 2184 } 2185 } 2186 tipc_node_read_unlock(n); 2187 if (found_node) 2188 break; 2189 } 2190 rcu_read_unlock(); 2191 2192 return found_node; 2193 } 2194 2195 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) 2196 { 2197 int err; 2198 int res = 0; 2199 int bearer_id; 2200 char *name; 2201 struct tipc_link *link; 2202 struct tipc_node *node; 2203 struct sk_buff_head xmitq; 2204 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2205 struct net *net = sock_net(skb->sk); 2206 2207 __skb_queue_head_init(&xmitq); 2208 2209 if (!info->attrs[TIPC_NLA_LINK]) 2210 return -EINVAL; 2211 2212 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2213 info->attrs[TIPC_NLA_LINK], 2214 tipc_nl_link_policy, info->extack); 2215 if (err) 2216 return err; 2217 2218 if (!attrs[TIPC_NLA_LINK_NAME]) 2219 return -EINVAL; 2220 2221 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2222 2223 if (strcmp(name, tipc_bclink_name) == 0) 2224 return tipc_nl_bc_link_set(net, attrs); 2225 2226 node = tipc_node_find_by_name(net, name, &bearer_id); 2227 if (!node) 2228 return -EINVAL; 2229 2230 tipc_node_read_lock(node); 2231 2232 link = node->links[bearer_id].link; 2233 if (!link) { 2234 res = -EINVAL; 2235 goto out; 2236 } 2237 2238 if (attrs[TIPC_NLA_LINK_PROP]) { 2239 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 2240 2241 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], 2242 props); 2243 if (err) { 2244 res = err; 2245 goto out; 2246 } 2247 2248 if (props[TIPC_NLA_PROP_TOL]) { 2249 u32 tol; 2250 2251 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 2252 tipc_link_set_tolerance(link, tol, &xmitq); 2253 } 2254 if (props[TIPC_NLA_PROP_PRIO]) { 2255 u32 prio; 2256 2257 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 2258 tipc_link_set_prio(link, prio, &xmitq); 2259 } 2260 if (props[TIPC_NLA_PROP_WIN]) { 2261 u32 win; 2262 2263 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 2264 tipc_link_set_queue_limits(link, win); 2265 } 2266 } 2267 2268 out: 2269 tipc_node_read_unlock(node); 2270 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr); 2271 return res; 2272 } 2273 2274 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) 2275 { 2276 struct net *net = genl_info_net(info); 2277 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2278 struct tipc_nl_msg msg; 2279 char *name; 2280 int err; 2281 2282 msg.portid = info->snd_portid; 2283 msg.seq = info->snd_seq; 2284 2285 if (!info->attrs[TIPC_NLA_LINK]) 2286 return -EINVAL; 2287 2288 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2289 info->attrs[TIPC_NLA_LINK], 2290 tipc_nl_link_policy, info->extack); 2291 if (err) 2292 return err; 2293 2294 if (!attrs[TIPC_NLA_LINK_NAME]) 2295 return -EINVAL; 2296 2297 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2298 2299 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2300 if (!msg.skb) 2301 return -ENOMEM; 2302 2303 if (strcmp(name, tipc_bclink_name) == 0) { 2304 err = tipc_nl_add_bc_link(net, &msg); 2305 if (err) 2306 goto err_free; 2307 } else { 2308 int bearer_id; 2309 struct tipc_node *node; 2310 struct tipc_link *link; 2311 2312 node = tipc_node_find_by_name(net, name, &bearer_id); 2313 if (!node) { 2314 err = -EINVAL; 2315 goto err_free; 2316 } 2317 2318 tipc_node_read_lock(node); 2319 link = node->links[bearer_id].link; 2320 if (!link) { 2321 tipc_node_read_unlock(node); 2322 err = -EINVAL; 2323 goto err_free; 2324 } 2325 2326 err = __tipc_nl_add_link(net, &msg, link, 0); 2327 tipc_node_read_unlock(node); 2328 if (err) 2329 goto err_free; 2330 } 2331 2332 return genlmsg_reply(msg.skb, info); 2333 2334 err_free: 2335 nlmsg_free(msg.skb); 2336 return err; 2337 } 2338 2339 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 2340 { 2341 int err; 2342 char *link_name; 2343 unsigned int bearer_id; 2344 struct tipc_link *link; 2345 struct tipc_node *node; 2346 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2347 struct net *net = sock_net(skb->sk); 2348 struct tipc_link_entry *le; 2349 2350 if (!info->attrs[TIPC_NLA_LINK]) 2351 return -EINVAL; 2352 2353 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2354 info->attrs[TIPC_NLA_LINK], 2355 tipc_nl_link_policy, info->extack); 2356 if (err) 2357 return err; 2358 2359 if (!attrs[TIPC_NLA_LINK_NAME]) 2360 return -EINVAL; 2361 2362 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2363 2364 if (strcmp(link_name, tipc_bclink_name) == 0) { 2365 err = tipc_bclink_reset_stats(net); 2366 if (err) 2367 return err; 2368 return 0; 2369 } 2370 2371 node = tipc_node_find_by_name(net, link_name, &bearer_id); 2372 if (!node) 2373 return -EINVAL; 2374 2375 le = &node->links[bearer_id]; 2376 tipc_node_read_lock(node); 2377 spin_lock_bh(&le->lock); 2378 link = node->links[bearer_id].link; 2379 if (!link) { 2380 spin_unlock_bh(&le->lock); 2381 tipc_node_read_unlock(node); 2382 return -EINVAL; 2383 } 2384 tipc_link_reset_stats(link); 2385 spin_unlock_bh(&le->lock); 2386 tipc_node_read_unlock(node); 2387 return 0; 2388 } 2389 2390 /* Caller should hold node lock */ 2391 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 2392 struct tipc_node *node, u32 *prev_link) 2393 { 2394 u32 i; 2395 int err; 2396 2397 for (i = *prev_link; i < MAX_BEARERS; i++) { 2398 *prev_link = i; 2399 2400 if (!node->links[i].link) 2401 continue; 2402 2403 err = __tipc_nl_add_link(net, msg, 2404 node->links[i].link, NLM_F_MULTI); 2405 if (err) 2406 return err; 2407 } 2408 *prev_link = 0; 2409 2410 return 0; 2411 } 2412 2413 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb) 2414 { 2415 struct net *net = sock_net(skb->sk); 2416 struct tipc_net *tn = net_generic(net, tipc_net_id); 2417 struct tipc_node *node; 2418 struct tipc_nl_msg msg; 2419 u32 prev_node = cb->args[0]; 2420 u32 prev_link = cb->args[1]; 2421 int done = cb->args[2]; 2422 int err; 2423 2424 if (done) 2425 return 0; 2426 2427 msg.skb = skb; 2428 msg.portid = NETLINK_CB(cb->skb).portid; 2429 msg.seq = cb->nlh->nlmsg_seq; 2430 2431 rcu_read_lock(); 2432 if (prev_node) { 2433 node = tipc_node_find(net, prev_node); 2434 if (!node) { 2435 /* We never set seq or call nl_dump_check_consistent() 2436 * this means that setting prev_seq here will cause the 2437 * consistence check to fail in the netlink callback 2438 * handler. Resulting in the last NLMSG_DONE message 2439 * having the NLM_F_DUMP_INTR flag set. 2440 */ 2441 cb->prev_seq = 1; 2442 goto out; 2443 } 2444 tipc_node_put(node); 2445 2446 list_for_each_entry_continue_rcu(node, &tn->node_list, 2447 list) { 2448 tipc_node_read_lock(node); 2449 err = __tipc_nl_add_node_links(net, &msg, node, 2450 &prev_link); 2451 tipc_node_read_unlock(node); 2452 if (err) 2453 goto out; 2454 2455 prev_node = node->addr; 2456 } 2457 } else { 2458 err = tipc_nl_add_bc_link(net, &msg); 2459 if (err) 2460 goto out; 2461 2462 list_for_each_entry_rcu(node, &tn->node_list, list) { 2463 tipc_node_read_lock(node); 2464 err = __tipc_nl_add_node_links(net, &msg, node, 2465 &prev_link); 2466 tipc_node_read_unlock(node); 2467 if (err) 2468 goto out; 2469 2470 prev_node = node->addr; 2471 } 2472 } 2473 done = 1; 2474 out: 2475 rcu_read_unlock(); 2476 2477 cb->args[0] = prev_node; 2478 cb->args[1] = prev_link; 2479 cb->args[2] = done; 2480 2481 return skb->len; 2482 } 2483 2484 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info) 2485 { 2486 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1]; 2487 struct net *net = sock_net(skb->sk); 2488 int err; 2489 2490 if (!info->attrs[TIPC_NLA_MON]) 2491 return -EINVAL; 2492 2493 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX, 2494 info->attrs[TIPC_NLA_MON], 2495 tipc_nl_monitor_policy, 2496 info->extack); 2497 if (err) 2498 return err; 2499 2500 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) { 2501 u32 val; 2502 2503 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]); 2504 err = tipc_nl_monitor_set_threshold(net, val); 2505 if (err) 2506 return err; 2507 } 2508 2509 return 0; 2510 } 2511 2512 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg) 2513 { 2514 struct nlattr *attrs; 2515 void *hdr; 2516 u32 val; 2517 2518 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2519 0, TIPC_NL_MON_GET); 2520 if (!hdr) 2521 return -EMSGSIZE; 2522 2523 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON); 2524 if (!attrs) 2525 goto msg_full; 2526 2527 val = tipc_nl_monitor_get_threshold(net); 2528 2529 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val)) 2530 goto attr_msg_full; 2531 2532 nla_nest_end(msg->skb, attrs); 2533 genlmsg_end(msg->skb, hdr); 2534 2535 return 0; 2536 2537 attr_msg_full: 2538 nla_nest_cancel(msg->skb, attrs); 2539 msg_full: 2540 genlmsg_cancel(msg->skb, hdr); 2541 2542 return -EMSGSIZE; 2543 } 2544 2545 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info) 2546 { 2547 struct net *net = sock_net(skb->sk); 2548 struct tipc_nl_msg msg; 2549 int err; 2550 2551 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2552 if (!msg.skb) 2553 return -ENOMEM; 2554 msg.portid = info->snd_portid; 2555 msg.seq = info->snd_seq; 2556 2557 err = __tipc_nl_add_monitor_prop(net, &msg); 2558 if (err) { 2559 nlmsg_free(msg.skb); 2560 return err; 2561 } 2562 2563 return genlmsg_reply(msg.skb, info); 2564 } 2565 2566 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) 2567 { 2568 struct net *net = sock_net(skb->sk); 2569 u32 prev_bearer = cb->args[0]; 2570 struct tipc_nl_msg msg; 2571 int bearer_id; 2572 int err; 2573 2574 if (prev_bearer == MAX_BEARERS) 2575 return 0; 2576 2577 msg.skb = skb; 2578 msg.portid = NETLINK_CB(cb->skb).portid; 2579 msg.seq = cb->nlh->nlmsg_seq; 2580 2581 rtnl_lock(); 2582 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { 2583 err = __tipc_nl_add_monitor(net, &msg, bearer_id); 2584 if (err) 2585 break; 2586 } 2587 rtnl_unlock(); 2588 cb->args[0] = bearer_id; 2589 2590 return skb->len; 2591 } 2592 2593 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb, 2594 struct netlink_callback *cb) 2595 { 2596 struct net *net = sock_net(skb->sk); 2597 u32 prev_node = cb->args[1]; 2598 u32 bearer_id = cb->args[2]; 2599 int done = cb->args[0]; 2600 struct tipc_nl_msg msg; 2601 int err; 2602 2603 if (!prev_node) { 2604 struct nlattr **attrs = genl_dumpit_info(cb)->attrs; 2605 struct nlattr *mon[TIPC_NLA_MON_MAX + 1]; 2606 2607 if (!attrs[TIPC_NLA_MON]) 2608 return -EINVAL; 2609 2610 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX, 2611 attrs[TIPC_NLA_MON], 2612 tipc_nl_monitor_policy, 2613 NULL); 2614 if (err) 2615 return err; 2616 2617 if (!mon[TIPC_NLA_MON_REF]) 2618 return -EINVAL; 2619 2620 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]); 2621 2622 if (bearer_id >= MAX_BEARERS) 2623 return -EINVAL; 2624 } 2625 2626 if (done) 2627 return 0; 2628 2629 msg.skb = skb; 2630 msg.portid = NETLINK_CB(cb->skb).portid; 2631 msg.seq = cb->nlh->nlmsg_seq; 2632 2633 rtnl_lock(); 2634 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node); 2635 if (!err) 2636 done = 1; 2637 2638 rtnl_unlock(); 2639 cb->args[0] = done; 2640 cb->args[1] = prev_node; 2641 cb->args[2] = bearer_id; 2642 2643 return skb->len; 2644 } 2645 2646 u32 tipc_node_get_addr(struct tipc_node *node) 2647 { 2648 return (node) ? node->addr : 0; 2649 } 2650 2651 /** 2652 * tipc_node_dump - dump TIPC node data 2653 * @n: tipc node to be dumped 2654 * @more: dump more? 2655 * - false: dump only tipc node data 2656 * - true: dump node link data as well 2657 * @buf: returned buffer of dump data in format 2658 */ 2659 int tipc_node_dump(struct tipc_node *n, bool more, char *buf) 2660 { 2661 int i = 0; 2662 size_t sz = (more) ? NODE_LMAX : NODE_LMIN; 2663 2664 if (!n) { 2665 i += scnprintf(buf, sz, "node data: (null)\n"); 2666 return i; 2667 } 2668 2669 i += scnprintf(buf, sz, "node data: %x", n->addr); 2670 i += scnprintf(buf + i, sz - i, " %x", n->state); 2671 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]); 2672 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]); 2673 i += scnprintf(buf + i, sz - i, " %x", n->action_flags); 2674 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent); 2675 i += scnprintf(buf + i, sz - i, " %u", n->sync_point); 2676 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt); 2677 i += scnprintf(buf + i, sz - i, " %u", n->working_links); 2678 i += scnprintf(buf + i, sz - i, " %x", n->capabilities); 2679 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv); 2680 2681 if (!more) 2682 return i; 2683 2684 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n"); 2685 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu); 2686 i += scnprintf(buf + i, sz - i, " media: "); 2687 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr); 2688 i += scnprintf(buf + i, sz - i, "\n"); 2689 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i); 2690 i += scnprintf(buf + i, sz - i, " inputq: "); 2691 i += tipc_list_dump(&n->links[0].inputq, false, buf + i); 2692 2693 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n"); 2694 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu); 2695 i += scnprintf(buf + i, sz - i, " media: "); 2696 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr); 2697 i += scnprintf(buf + i, sz - i, "\n"); 2698 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i); 2699 i += scnprintf(buf + i, sz - i, " inputq: "); 2700 i += tipc_list_dump(&n->links[1].inputq, false, buf + i); 2701 2702 i += scnprintf(buf + i, sz - i, "bclink:\n "); 2703 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i); 2704 2705 return i; 2706 } 2707 2708 void tipc_node_pre_cleanup_net(struct net *exit_net) 2709 { 2710 struct tipc_node *n; 2711 struct tipc_net *tn; 2712 struct net *tmp; 2713 2714 rcu_read_lock(); 2715 for_each_net_rcu(tmp) { 2716 if (tmp == exit_net) 2717 continue; 2718 tn = tipc_net(tmp); 2719 if (!tn) 2720 continue; 2721 spin_lock_bh(&tn->node_list_lock); 2722 list_for_each_entry_rcu(n, &tn->node_list, list) { 2723 if (!n->peer_net) 2724 continue; 2725 if (n->peer_net != exit_net) 2726 continue; 2727 tipc_node_write_lock(n); 2728 n->peer_net = NULL; 2729 n->peer_hash_mix = 0; 2730 tipc_node_write_unlock_fast(n); 2731 break; 2732 } 2733 spin_unlock_bh(&tn->node_list_lock); 2734 } 2735 rcu_read_unlock(); 2736 } 2737