1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "node.h" 40 #include "name_distr.h" 41 #include "socket.h" 42 #include "bcast.h" 43 #include "monitor.h" 44 #include "discover.h" 45 #include "netlink.h" 46 #include "trace.h" 47 #include "crypto.h" 48 49 #define INVALID_NODE_SIG 0x10000 50 #define NODE_CLEANUP_AFTER 300000 51 52 /* Flags used to take different actions according to flag type 53 * TIPC_NOTIFY_NODE_DOWN: notify node is down 54 * TIPC_NOTIFY_NODE_UP: notify node is up 55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type 56 */ 57 enum { 58 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 59 TIPC_NOTIFY_NODE_UP = (1 << 4), 60 TIPC_NOTIFY_LINK_UP = (1 << 6), 61 TIPC_NOTIFY_LINK_DOWN = (1 << 7) 62 }; 63 64 struct tipc_link_entry { 65 struct tipc_link *link; 66 spinlock_t lock; /* per link */ 67 u32 mtu; 68 struct sk_buff_head inputq; 69 struct tipc_media_addr maddr; 70 }; 71 72 struct tipc_bclink_entry { 73 struct tipc_link *link; 74 struct sk_buff_head inputq1; 75 struct sk_buff_head arrvq; 76 struct sk_buff_head inputq2; 77 struct sk_buff_head namedq; 78 u16 named_rcv_nxt; 79 bool named_open; 80 }; 81 82 /** 83 * struct tipc_node - TIPC node structure 84 * @addr: network address of node 85 * @kref: reference counter to node object 86 * @lock: rwlock governing access to structure 87 * @net: the applicable net namespace 88 * @hash: links to adjacent nodes in unsorted hash chain 89 * @active_links: bearer ids of active links, used as index into links[] array 90 * @links: array containing references to all links to node 91 * @bc_entry: broadcast link entry 92 * @action_flags: bit mask of different types of node actions 93 * @state: connectivity state vs peer node 94 * @preliminary: a preliminary node or not 95 * @failover_sent: failover sent or not 96 * @sync_point: sequence number where synch/failover is finished 97 * @list: links to adjacent nodes in sorted list of cluster's nodes 98 * @working_links: number of working links to node (both active and standby) 99 * @link_cnt: number of links to node 100 * @capabilities: bitmap, indicating peer node's functional capabilities 101 * @signature: node instance identifier 102 * @link_id: local and remote bearer ids of changing link, if any 103 * @peer_id: 128-bit ID of peer 104 * @peer_id_string: ID string of peer 105 * @publ_list: list of publications 106 * @conn_sks: list of connections (FIXME) 107 * @timer: node's keepalive timer 108 * @keepalive_intv: keepalive interval in milliseconds 109 * @rcu: rcu struct for tipc_node 110 * @delete_at: indicates the time for deleting a down node 111 * @peer_net: peer's net namespace 112 * @peer_hash_mix: hash for this peer (FIXME) 113 * @crypto_rx: RX crypto handler 114 */ 115 struct tipc_node { 116 u32 addr; 117 struct kref kref; 118 rwlock_t lock; 119 struct net *net; 120 struct hlist_node hash; 121 int active_links[2]; 122 struct tipc_link_entry links[MAX_BEARERS]; 123 struct tipc_bclink_entry bc_entry; 124 int action_flags; 125 struct list_head list; 126 int state; 127 bool preliminary; 128 bool failover_sent; 129 u16 sync_point; 130 int link_cnt; 131 u16 working_links; 132 u16 capabilities; 133 u32 signature; 134 u32 link_id; 135 u8 peer_id[16]; 136 char peer_id_string[NODE_ID_STR_LEN]; 137 struct list_head publ_list; 138 struct list_head conn_sks; 139 unsigned long keepalive_intv; 140 struct timer_list timer; 141 struct rcu_head rcu; 142 unsigned long delete_at; 143 struct net *peer_net; 144 u32 peer_hash_mix; 145 #ifdef CONFIG_TIPC_CRYPTO 146 struct tipc_crypto *crypto_rx; 147 #endif 148 }; 149 150 /* Node FSM states and events: 151 */ 152 enum { 153 SELF_DOWN_PEER_DOWN = 0xdd, 154 SELF_UP_PEER_UP = 0xaa, 155 SELF_DOWN_PEER_LEAVING = 0xd1, 156 SELF_UP_PEER_COMING = 0xac, 157 SELF_COMING_PEER_UP = 0xca, 158 SELF_LEAVING_PEER_DOWN = 0x1d, 159 NODE_FAILINGOVER = 0xf0, 160 NODE_SYNCHING = 0xcc 161 }; 162 163 enum { 164 SELF_ESTABL_CONTACT_EVT = 0xece, 165 SELF_LOST_CONTACT_EVT = 0x1ce, 166 PEER_ESTABL_CONTACT_EVT = 0x9ece, 167 PEER_LOST_CONTACT_EVT = 0x91ce, 168 NODE_FAILOVER_BEGIN_EVT = 0xfbe, 169 NODE_FAILOVER_END_EVT = 0xfee, 170 NODE_SYNCH_BEGIN_EVT = 0xcbe, 171 NODE_SYNCH_END_EVT = 0xcee 172 }; 173 174 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 175 struct sk_buff_head *xmitq, 176 struct tipc_media_addr **maddr); 177 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, 178 bool delete); 179 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq); 180 static void tipc_node_delete(struct tipc_node *node); 181 static void tipc_node_timeout(struct timer_list *t); 182 static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 183 static struct tipc_node *tipc_node_find(struct net *net, u32 addr); 184 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); 185 static bool node_is_up(struct tipc_node *n); 186 static void tipc_node_delete_from_list(struct tipc_node *node); 187 188 struct tipc_sock_conn { 189 u32 port; 190 u32 peer_port; 191 u32 peer_node; 192 struct list_head list; 193 }; 194 195 static struct tipc_link *node_active_link(struct tipc_node *n, int sel) 196 { 197 int bearer_id = n->active_links[sel & 1]; 198 199 if (unlikely(bearer_id == INVALID_BEARER_ID)) 200 return NULL; 201 202 return n->links[bearer_id].link; 203 } 204 205 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected) 206 { 207 struct tipc_node *n; 208 int bearer_id; 209 unsigned int mtu = MAX_MSG_SIZE; 210 211 n = tipc_node_find(net, addr); 212 if (unlikely(!n)) 213 return mtu; 214 215 /* Allow MAX_MSG_SIZE when building connection oriented message 216 * if they are in the same core network 217 */ 218 if (n->peer_net && connected) { 219 tipc_node_put(n); 220 return mtu; 221 } 222 223 bearer_id = n->active_links[sel & 1]; 224 if (likely(bearer_id != INVALID_BEARER_ID)) 225 mtu = n->links[bearer_id].mtu; 226 tipc_node_put(n); 227 return mtu; 228 } 229 230 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id) 231 { 232 u8 *own_id = tipc_own_id(net); 233 struct tipc_node *n; 234 235 if (!own_id) 236 return true; 237 238 if (addr == tipc_own_addr(net)) { 239 memcpy(id, own_id, TIPC_NODEID_LEN); 240 return true; 241 } 242 n = tipc_node_find(net, addr); 243 if (!n) 244 return false; 245 246 memcpy(id, &n->peer_id, TIPC_NODEID_LEN); 247 tipc_node_put(n); 248 return true; 249 } 250 251 u16 tipc_node_get_capabilities(struct net *net, u32 addr) 252 { 253 struct tipc_node *n; 254 u16 caps; 255 256 n = tipc_node_find(net, addr); 257 if (unlikely(!n)) 258 return TIPC_NODE_CAPABILITIES; 259 caps = n->capabilities; 260 tipc_node_put(n); 261 return caps; 262 } 263 264 u32 tipc_node_get_addr(struct tipc_node *node) 265 { 266 return (node) ? node->addr : 0; 267 } 268 269 char *tipc_node_get_id_str(struct tipc_node *node) 270 { 271 return node->peer_id_string; 272 } 273 274 #ifdef CONFIG_TIPC_CRYPTO 275 /** 276 * tipc_node_crypto_rx - Retrieve crypto RX handle from node 277 * @__n: target tipc_node 278 * Note: node ref counter must be held first! 279 */ 280 struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n) 281 { 282 return (__n) ? __n->crypto_rx : NULL; 283 } 284 285 struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos) 286 { 287 return container_of(pos, struct tipc_node, list)->crypto_rx; 288 } 289 290 struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr) 291 { 292 struct tipc_node *n; 293 294 n = tipc_node_find(net, addr); 295 return (n) ? n->crypto_rx : NULL; 296 } 297 #endif 298 299 static void tipc_node_free(struct rcu_head *rp) 300 { 301 struct tipc_node *n = container_of(rp, struct tipc_node, rcu); 302 303 #ifdef CONFIG_TIPC_CRYPTO 304 tipc_crypto_stop(&n->crypto_rx); 305 #endif 306 kfree(n); 307 } 308 309 static void tipc_node_kref_release(struct kref *kref) 310 { 311 struct tipc_node *n = container_of(kref, struct tipc_node, kref); 312 313 kfree(n->bc_entry.link); 314 call_rcu(&n->rcu, tipc_node_free); 315 } 316 317 void tipc_node_put(struct tipc_node *node) 318 { 319 kref_put(&node->kref, tipc_node_kref_release); 320 } 321 322 void tipc_node_get(struct tipc_node *node) 323 { 324 kref_get(&node->kref); 325 } 326 327 /* 328 * tipc_node_find - locate specified node object, if it exists 329 */ 330 static struct tipc_node *tipc_node_find(struct net *net, u32 addr) 331 { 332 struct tipc_net *tn = tipc_net(net); 333 struct tipc_node *node; 334 unsigned int thash = tipc_hashfn(addr); 335 336 rcu_read_lock(); 337 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) { 338 if (node->addr != addr || node->preliminary) 339 continue; 340 if (!kref_get_unless_zero(&node->kref)) 341 node = NULL; 342 break; 343 } 344 rcu_read_unlock(); 345 return node; 346 } 347 348 /* tipc_node_find_by_id - locate specified node object by its 128-bit id 349 * Note: this function is called only when a discovery request failed 350 * to find the node by its 32-bit id, and is not time critical 351 */ 352 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id) 353 { 354 struct tipc_net *tn = tipc_net(net); 355 struct tipc_node *n; 356 bool found = false; 357 358 rcu_read_lock(); 359 list_for_each_entry_rcu(n, &tn->node_list, list) { 360 read_lock_bh(&n->lock); 361 if (!memcmp(id, n->peer_id, 16) && 362 kref_get_unless_zero(&n->kref)) 363 found = true; 364 read_unlock_bh(&n->lock); 365 if (found) 366 break; 367 } 368 rcu_read_unlock(); 369 return found ? n : NULL; 370 } 371 372 static void tipc_node_read_lock(struct tipc_node *n) 373 __acquires(n->lock) 374 { 375 read_lock_bh(&n->lock); 376 } 377 378 static void tipc_node_read_unlock(struct tipc_node *n) 379 __releases(n->lock) 380 { 381 read_unlock_bh(&n->lock); 382 } 383 384 static void tipc_node_write_lock(struct tipc_node *n) 385 __acquires(n->lock) 386 { 387 write_lock_bh(&n->lock); 388 } 389 390 static void tipc_node_write_unlock_fast(struct tipc_node *n) 391 __releases(n->lock) 392 { 393 write_unlock_bh(&n->lock); 394 } 395 396 static void tipc_node_write_unlock(struct tipc_node *n) 397 __releases(n->lock) 398 { 399 struct tipc_socket_addr sk; 400 struct net *net = n->net; 401 u32 flags = n->action_flags; 402 struct list_head *publ_list; 403 struct tipc_uaddr ua; 404 u32 bearer_id, node; 405 406 if (likely(!flags)) { 407 write_unlock_bh(&n->lock); 408 return; 409 } 410 411 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE, 412 TIPC_LINK_STATE, n->addr, n->addr); 413 sk.ref = n->link_id; 414 sk.node = tipc_own_addr(net); 415 node = n->addr; 416 bearer_id = n->link_id & 0xffff; 417 publ_list = &n->publ_list; 418 419 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | 420 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP); 421 422 write_unlock_bh(&n->lock); 423 424 if (flags & TIPC_NOTIFY_NODE_DOWN) 425 tipc_publ_notify(net, publ_list, node, n->capabilities); 426 427 if (flags & TIPC_NOTIFY_NODE_UP) 428 tipc_named_node_up(net, node, n->capabilities); 429 430 if (flags & TIPC_NOTIFY_LINK_UP) { 431 tipc_mon_peer_up(net, node, bearer_id); 432 tipc_nametbl_publish(net, &ua, &sk, sk.ref); 433 } 434 if (flags & TIPC_NOTIFY_LINK_DOWN) { 435 tipc_mon_peer_down(net, node, bearer_id); 436 tipc_nametbl_withdraw(net, &ua, &sk, sk.ref); 437 } 438 } 439 440 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes) 441 { 442 int net_id = tipc_netid(n->net); 443 struct tipc_net *tn_peer; 444 struct net *tmp; 445 u32 hash_chk; 446 447 if (n->peer_net) 448 return; 449 450 for_each_net_rcu(tmp) { 451 tn_peer = tipc_net(tmp); 452 if (!tn_peer) 453 continue; 454 /* Integrity checking whether node exists in namespace or not */ 455 if (tn_peer->net_id != net_id) 456 continue; 457 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN)) 458 continue; 459 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random); 460 if (hash_mixes ^ hash_chk) 461 continue; 462 n->peer_net = tmp; 463 n->peer_hash_mix = hash_mixes; 464 break; 465 } 466 } 467 468 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id, 469 u16 capabilities, u32 hash_mixes, 470 bool preliminary) 471 { 472 struct tipc_net *tn = net_generic(net, tipc_net_id); 473 struct tipc_link *l, *snd_l = tipc_bc_sndlink(net); 474 struct tipc_node *n, *temp_node; 475 unsigned long intv; 476 int bearer_id; 477 int i; 478 479 spin_lock_bh(&tn->node_list_lock); 480 n = tipc_node_find(net, addr) ?: 481 tipc_node_find_by_id(net, peer_id); 482 if (n) { 483 if (!n->preliminary) 484 goto update; 485 if (preliminary) 486 goto exit; 487 /* A preliminary node becomes "real" now, refresh its data */ 488 tipc_node_write_lock(n); 489 if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX, 490 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l), 491 n->capabilities, &n->bc_entry.inputq1, 492 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) { 493 pr_warn("Broadcast rcv link refresh failed, no memory\n"); 494 tipc_node_write_unlock_fast(n); 495 tipc_node_put(n); 496 n = NULL; 497 goto exit; 498 } 499 n->preliminary = false; 500 n->addr = addr; 501 hlist_del_rcu(&n->hash); 502 hlist_add_head_rcu(&n->hash, 503 &tn->node_htable[tipc_hashfn(addr)]); 504 list_del_rcu(&n->list); 505 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 506 if (n->addr < temp_node->addr) 507 break; 508 } 509 list_add_tail_rcu(&n->list, &temp_node->list); 510 tipc_node_write_unlock_fast(n); 511 512 update: 513 if (n->peer_hash_mix ^ hash_mixes) 514 tipc_node_assign_peer_net(n, hash_mixes); 515 if (n->capabilities == capabilities) 516 goto exit; 517 /* Same node may come back with new capabilities */ 518 tipc_node_write_lock(n); 519 n->capabilities = capabilities; 520 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 521 l = n->links[bearer_id].link; 522 if (l) 523 tipc_link_update_caps(l, capabilities); 524 } 525 tipc_node_write_unlock_fast(n); 526 527 /* Calculate cluster capabilities */ 528 tn->capabilities = TIPC_NODE_CAPABILITIES; 529 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 530 tn->capabilities &= temp_node->capabilities; 531 } 532 533 tipc_bcast_toggle_rcast(net, 534 (tn->capabilities & TIPC_BCAST_RCAST)); 535 536 goto exit; 537 } 538 n = kzalloc(sizeof(*n), GFP_ATOMIC); 539 if (!n) { 540 pr_warn("Node creation failed, no memory\n"); 541 goto exit; 542 } 543 tipc_nodeid2string(n->peer_id_string, peer_id); 544 #ifdef CONFIG_TIPC_CRYPTO 545 if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) { 546 pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string); 547 kfree(n); 548 n = NULL; 549 goto exit; 550 } 551 #endif 552 n->addr = addr; 553 n->preliminary = preliminary; 554 memcpy(&n->peer_id, peer_id, 16); 555 n->net = net; 556 n->peer_net = NULL; 557 n->peer_hash_mix = 0; 558 /* Assign kernel local namespace if exists */ 559 tipc_node_assign_peer_net(n, hash_mixes); 560 n->capabilities = capabilities; 561 kref_init(&n->kref); 562 rwlock_init(&n->lock); 563 INIT_HLIST_NODE(&n->hash); 564 INIT_LIST_HEAD(&n->list); 565 INIT_LIST_HEAD(&n->publ_list); 566 INIT_LIST_HEAD(&n->conn_sks); 567 skb_queue_head_init(&n->bc_entry.namedq); 568 skb_queue_head_init(&n->bc_entry.inputq1); 569 __skb_queue_head_init(&n->bc_entry.arrvq); 570 skb_queue_head_init(&n->bc_entry.inputq2); 571 for (i = 0; i < MAX_BEARERS; i++) 572 spin_lock_init(&n->links[i].lock); 573 n->state = SELF_DOWN_PEER_LEAVING; 574 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 575 n->signature = INVALID_NODE_SIG; 576 n->active_links[0] = INVALID_BEARER_ID; 577 n->active_links[1] = INVALID_BEARER_ID; 578 if (!preliminary && 579 !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX, 580 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l), 581 n->capabilities, &n->bc_entry.inputq1, 582 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) { 583 pr_warn("Broadcast rcv link creation failed, no memory\n"); 584 tipc_node_put(n); 585 n = NULL; 586 goto exit; 587 } 588 tipc_node_get(n); 589 timer_setup(&n->timer, tipc_node_timeout, 0); 590 /* Start a slow timer anyway, crypto needs it */ 591 n->keepalive_intv = 10000; 592 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 593 if (!mod_timer(&n->timer, intv)) 594 tipc_node_get(n); 595 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]); 596 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 597 if (n->addr < temp_node->addr) 598 break; 599 } 600 list_add_tail_rcu(&n->list, &temp_node->list); 601 /* Calculate cluster capabilities */ 602 tn->capabilities = TIPC_NODE_CAPABILITIES; 603 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 604 tn->capabilities &= temp_node->capabilities; 605 } 606 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST)); 607 trace_tipc_node_create(n, true, " "); 608 exit: 609 spin_unlock_bh(&tn->node_list_lock); 610 return n; 611 } 612 613 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 614 { 615 unsigned long tol = tipc_link_tolerance(l); 616 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 617 618 /* Link with lowest tolerance determines timer interval */ 619 if (intv < n->keepalive_intv) 620 n->keepalive_intv = intv; 621 622 /* Ensure link's abort limit corresponds to current tolerance */ 623 tipc_link_set_abort_limit(l, tol / n->keepalive_intv); 624 } 625 626 static void tipc_node_delete_from_list(struct tipc_node *node) 627 { 628 #ifdef CONFIG_TIPC_CRYPTO 629 tipc_crypto_key_flush(node->crypto_rx); 630 #endif 631 list_del_rcu(&node->list); 632 hlist_del_rcu(&node->hash); 633 tipc_node_put(node); 634 } 635 636 static void tipc_node_delete(struct tipc_node *node) 637 { 638 trace_tipc_node_delete(node, true, " "); 639 tipc_node_delete_from_list(node); 640 641 del_timer_sync(&node->timer); 642 tipc_node_put(node); 643 } 644 645 void tipc_node_stop(struct net *net) 646 { 647 struct tipc_net *tn = tipc_net(net); 648 struct tipc_node *node, *t_node; 649 650 spin_lock_bh(&tn->node_list_lock); 651 list_for_each_entry_safe(node, t_node, &tn->node_list, list) 652 tipc_node_delete(node); 653 spin_unlock_bh(&tn->node_list_lock); 654 } 655 656 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) 657 { 658 struct tipc_node *n; 659 660 if (in_own_node(net, addr)) 661 return; 662 663 n = tipc_node_find(net, addr); 664 if (!n) { 665 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr); 666 return; 667 } 668 tipc_node_write_lock(n); 669 list_add_tail(subscr, &n->publ_list); 670 tipc_node_write_unlock_fast(n); 671 tipc_node_put(n); 672 } 673 674 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) 675 { 676 struct tipc_node *n; 677 678 if (in_own_node(net, addr)) 679 return; 680 681 n = tipc_node_find(net, addr); 682 if (!n) { 683 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr); 684 return; 685 } 686 tipc_node_write_lock(n); 687 list_del_init(subscr); 688 tipc_node_write_unlock_fast(n); 689 tipc_node_put(n); 690 } 691 692 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 693 { 694 struct tipc_node *node; 695 struct tipc_sock_conn *conn; 696 int err = 0; 697 698 if (in_own_node(net, dnode)) 699 return 0; 700 701 node = tipc_node_find(net, dnode); 702 if (!node) { 703 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 704 return -EHOSTUNREACH; 705 } 706 conn = kmalloc(sizeof(*conn), GFP_ATOMIC); 707 if (!conn) { 708 err = -EHOSTUNREACH; 709 goto exit; 710 } 711 conn->peer_node = dnode; 712 conn->port = port; 713 conn->peer_port = peer_port; 714 715 tipc_node_write_lock(node); 716 list_add_tail(&conn->list, &node->conn_sks); 717 tipc_node_write_unlock(node); 718 exit: 719 tipc_node_put(node); 720 return err; 721 } 722 723 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) 724 { 725 struct tipc_node *node; 726 struct tipc_sock_conn *conn, *safe; 727 728 if (in_own_node(net, dnode)) 729 return; 730 731 node = tipc_node_find(net, dnode); 732 if (!node) 733 return; 734 735 tipc_node_write_lock(node); 736 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 737 if (port != conn->port) 738 continue; 739 list_del(&conn->list); 740 kfree(conn); 741 } 742 tipc_node_write_unlock(node); 743 tipc_node_put(node); 744 } 745 746 static void tipc_node_clear_links(struct tipc_node *node) 747 { 748 int i; 749 750 for (i = 0; i < MAX_BEARERS; i++) { 751 struct tipc_link_entry *le = &node->links[i]; 752 753 if (le->link) { 754 kfree(le->link); 755 le->link = NULL; 756 node->link_cnt--; 757 } 758 } 759 } 760 761 /* tipc_node_cleanup - delete nodes that does not 762 * have active links for NODE_CLEANUP_AFTER time 763 */ 764 static bool tipc_node_cleanup(struct tipc_node *peer) 765 { 766 struct tipc_node *temp_node; 767 struct tipc_net *tn = tipc_net(peer->net); 768 bool deleted = false; 769 770 /* If lock held by tipc_node_stop() the node will be deleted anyway */ 771 if (!spin_trylock_bh(&tn->node_list_lock)) 772 return false; 773 774 tipc_node_write_lock(peer); 775 776 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { 777 tipc_node_clear_links(peer); 778 tipc_node_delete_from_list(peer); 779 deleted = true; 780 } 781 tipc_node_write_unlock(peer); 782 783 if (!deleted) { 784 spin_unlock_bh(&tn->node_list_lock); 785 return deleted; 786 } 787 788 /* Calculate cluster capabilities */ 789 tn->capabilities = TIPC_NODE_CAPABILITIES; 790 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 791 tn->capabilities &= temp_node->capabilities; 792 } 793 tipc_bcast_toggle_rcast(peer->net, 794 (tn->capabilities & TIPC_BCAST_RCAST)); 795 spin_unlock_bh(&tn->node_list_lock); 796 return deleted; 797 } 798 799 /* tipc_node_timeout - handle expiration of node timer 800 */ 801 static void tipc_node_timeout(struct timer_list *t) 802 { 803 struct tipc_node *n = from_timer(n, t, timer); 804 struct tipc_link_entry *le; 805 struct sk_buff_head xmitq; 806 int remains = n->link_cnt; 807 int bearer_id; 808 int rc = 0; 809 810 trace_tipc_node_timeout(n, false, " "); 811 if (!node_is_up(n) && tipc_node_cleanup(n)) { 812 /*Removing the reference of Timer*/ 813 tipc_node_put(n); 814 return; 815 } 816 817 #ifdef CONFIG_TIPC_CRYPTO 818 /* Take any crypto key related actions first */ 819 tipc_crypto_timeout(n->crypto_rx); 820 #endif 821 __skb_queue_head_init(&xmitq); 822 823 /* Initial node interval to value larger (10 seconds), then it will be 824 * recalculated with link lowest tolerance 825 */ 826 tipc_node_read_lock(n); 827 n->keepalive_intv = 10000; 828 tipc_node_read_unlock(n); 829 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { 830 tipc_node_read_lock(n); 831 le = &n->links[bearer_id]; 832 if (le->link) { 833 spin_lock_bh(&le->lock); 834 /* Link tolerance may change asynchronously: */ 835 tipc_node_calculate_timer(n, le->link); 836 rc = tipc_link_timeout(le->link, &xmitq); 837 spin_unlock_bh(&le->lock); 838 remains--; 839 } 840 tipc_node_read_unlock(n); 841 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n); 842 if (rc & TIPC_LINK_DOWN_EVT) 843 tipc_node_link_down(n, bearer_id, false); 844 } 845 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv)); 846 } 847 848 /** 849 * __tipc_node_link_up - handle addition of link 850 * @n: target tipc_node 851 * @bearer_id: id of the bearer 852 * @xmitq: queue for messages to be xmited on 853 * Node lock must be held by caller 854 * Link becomes active (alone or shared) or standby, depending on its priority. 855 */ 856 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, 857 struct sk_buff_head *xmitq) 858 { 859 int *slot0 = &n->active_links[0]; 860 int *slot1 = &n->active_links[1]; 861 struct tipc_link *ol = node_active_link(n, 0); 862 struct tipc_link *nl = n->links[bearer_id].link; 863 864 if (!nl || tipc_link_is_up(nl)) 865 return; 866 867 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); 868 if (!tipc_link_is_up(nl)) 869 return; 870 871 n->working_links++; 872 n->action_flags |= TIPC_NOTIFY_LINK_UP; 873 n->link_id = tipc_link_id(nl); 874 875 /* Leave room for tunnel header when returning 'mtu' to users: */ 876 n->links[bearer_id].mtu = tipc_link_mss(nl); 877 878 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 879 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 880 881 pr_debug("Established link <%s> on network plane %c\n", 882 tipc_link_name(nl), tipc_link_plane(nl)); 883 trace_tipc_node_link_up(n, true, " "); 884 885 /* Ensure that a STATE message goes first */ 886 tipc_link_build_state_msg(nl, xmitq); 887 888 /* First link? => give it both slots */ 889 if (!ol) { 890 *slot0 = bearer_id; 891 *slot1 = bearer_id; 892 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 893 n->action_flags |= TIPC_NOTIFY_NODE_UP; 894 tipc_link_set_active(nl, true); 895 tipc_bcast_add_peer(n->net, nl, xmitq); 896 return; 897 } 898 899 /* Second link => redistribute slots */ 900 if (tipc_link_prio(nl) > tipc_link_prio(ol)) { 901 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol)); 902 *slot0 = bearer_id; 903 *slot1 = bearer_id; 904 tipc_link_set_active(nl, true); 905 tipc_link_set_active(ol, false); 906 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) { 907 tipc_link_set_active(nl, true); 908 *slot1 = bearer_id; 909 } else { 910 pr_debug("New link <%s> is standby\n", tipc_link_name(nl)); 911 } 912 913 /* Prepare synchronization with first link */ 914 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq); 915 } 916 917 /** 918 * tipc_node_link_up - handle addition of link 919 * @n: target tipc_node 920 * @bearer_id: id of the bearer 921 * @xmitq: queue for messages to be xmited on 922 * 923 * Link becomes active (alone or shared) or standby, depending on its priority. 924 */ 925 static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 926 struct sk_buff_head *xmitq) 927 { 928 struct tipc_media_addr *maddr; 929 930 tipc_node_write_lock(n); 931 __tipc_node_link_up(n, bearer_id, xmitq); 932 maddr = &n->links[bearer_id].maddr; 933 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n); 934 tipc_node_write_unlock(n); 935 } 936 937 /** 938 * tipc_node_link_failover() - start failover in case "half-failover" 939 * 940 * This function is only called in a very special situation where link 941 * failover can be already started on peer node but not on this node. 942 * This can happen when e.g.:: 943 * 944 * 1. Both links <1A-2A>, <1B-2B> down 945 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network 946 * disturbance, wrong session, etc.) 947 * 3. Link <1B-2B> up 948 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout) 949 * 5. Node 2 starts failover onto link <1B-2B> 950 * 951 * ==> Node 1 does never start link/node failover! 952 * 953 * @n: tipc node structure 954 * @l: link peer endpoint failingover (- can be NULL) 955 * @tnl: tunnel link 956 * @xmitq: queue for messages to be xmited on tnl link later 957 */ 958 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l, 959 struct tipc_link *tnl, 960 struct sk_buff_head *xmitq) 961 { 962 /* Avoid to be "self-failover" that can never end */ 963 if (!tipc_link_is_up(tnl)) 964 return; 965 966 /* Don't rush, failure link may be in the process of resetting */ 967 if (l && !tipc_link_is_reset(l)) 968 return; 969 970 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 971 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 972 973 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 974 tipc_link_failover_prepare(l, tnl, xmitq); 975 976 if (l) 977 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 978 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 979 } 980 981 /** 982 * __tipc_node_link_down - handle loss of link 983 * @n: target tipc_node 984 * @bearer_id: id of the bearer 985 * @xmitq: queue for messages to be xmited on 986 * @maddr: output media address of the bearer 987 */ 988 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 989 struct sk_buff_head *xmitq, 990 struct tipc_media_addr **maddr) 991 { 992 struct tipc_link_entry *le = &n->links[*bearer_id]; 993 int *slot0 = &n->active_links[0]; 994 int *slot1 = &n->active_links[1]; 995 int i, highest = 0, prio; 996 struct tipc_link *l, *_l, *tnl; 997 998 l = n->links[*bearer_id].link; 999 if (!l || tipc_link_is_reset(l)) 1000 return; 1001 1002 n->working_links--; 1003 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 1004 n->link_id = tipc_link_id(l); 1005 1006 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 1007 1008 pr_debug("Lost link <%s> on network plane %c\n", 1009 tipc_link_name(l), tipc_link_plane(l)); 1010 1011 /* Select new active link if any available */ 1012 *slot0 = INVALID_BEARER_ID; 1013 *slot1 = INVALID_BEARER_ID; 1014 for (i = 0; i < MAX_BEARERS; i++) { 1015 _l = n->links[i].link; 1016 if (!_l || !tipc_link_is_up(_l)) 1017 continue; 1018 if (_l == l) 1019 continue; 1020 prio = tipc_link_prio(_l); 1021 if (prio < highest) 1022 continue; 1023 if (prio > highest) { 1024 highest = prio; 1025 *slot0 = i; 1026 *slot1 = i; 1027 continue; 1028 } 1029 *slot1 = i; 1030 } 1031 1032 if (!node_is_up(n)) { 1033 if (tipc_link_peer_is_down(l)) 1034 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1035 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); 1036 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!"); 1037 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1038 tipc_link_reset(l); 1039 tipc_link_build_reset_msg(l, xmitq); 1040 *maddr = &n->links[*bearer_id].maddr; 1041 node_lost_contact(n, &le->inputq); 1042 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 1043 return; 1044 } 1045 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 1046 1047 /* There is still a working link => initiate failover */ 1048 *bearer_id = n->active_links[0]; 1049 tnl = n->links[*bearer_id].link; 1050 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 1051 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 1052 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 1053 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 1054 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!"); 1055 tipc_link_reset(l); 1056 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1057 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1058 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 1059 *maddr = &n->links[*bearer_id].maddr; 1060 } 1061 1062 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 1063 { 1064 struct tipc_link_entry *le = &n->links[bearer_id]; 1065 struct tipc_media_addr *maddr = NULL; 1066 struct tipc_link *l = le->link; 1067 int old_bearer_id = bearer_id; 1068 struct sk_buff_head xmitq; 1069 1070 if (!l) 1071 return; 1072 1073 __skb_queue_head_init(&xmitq); 1074 1075 tipc_node_write_lock(n); 1076 if (!tipc_link_is_establishing(l)) { 1077 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 1078 } else { 1079 /* Defuse pending tipc_node_link_up() */ 1080 tipc_link_reset(l); 1081 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1082 } 1083 if (delete) { 1084 kfree(l); 1085 le->link = NULL; 1086 n->link_cnt--; 1087 } 1088 trace_tipc_node_link_down(n, true, "node link down or deleted!"); 1089 tipc_node_write_unlock(n); 1090 if (delete) 1091 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); 1092 if (!skb_queue_empty(&xmitq)) 1093 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n); 1094 tipc_sk_rcv(n->net, &le->inputq); 1095 } 1096 1097 static bool node_is_up(struct tipc_node *n) 1098 { 1099 return n->active_links[0] != INVALID_BEARER_ID; 1100 } 1101 1102 bool tipc_node_is_up(struct net *net, u32 addr) 1103 { 1104 struct tipc_node *n; 1105 bool retval = false; 1106 1107 if (in_own_node(net, addr)) 1108 return true; 1109 1110 n = tipc_node_find(net, addr); 1111 if (!n) 1112 return false; 1113 retval = node_is_up(n); 1114 tipc_node_put(n); 1115 return retval; 1116 } 1117 1118 static u32 tipc_node_suggest_addr(struct net *net, u32 addr) 1119 { 1120 struct tipc_node *n; 1121 1122 addr ^= tipc_net(net)->random; 1123 while ((n = tipc_node_find(net, addr))) { 1124 tipc_node_put(n); 1125 addr++; 1126 } 1127 return addr; 1128 } 1129 1130 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not 1131 * Returns suggested address if any, otherwise 0 1132 */ 1133 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) 1134 { 1135 struct tipc_net *tn = tipc_net(net); 1136 struct tipc_node *n; 1137 bool preliminary; 1138 u32 sugg_addr; 1139 1140 /* Suggest new address if some other peer is using this one */ 1141 n = tipc_node_find(net, addr); 1142 if (n) { 1143 if (!memcmp(n->peer_id, id, NODE_ID_LEN)) 1144 addr = 0; 1145 tipc_node_put(n); 1146 if (!addr) 1147 return 0; 1148 return tipc_node_suggest_addr(net, addr); 1149 } 1150 1151 /* Suggest previously used address if peer is known */ 1152 n = tipc_node_find_by_id(net, id); 1153 if (n) { 1154 sugg_addr = n->addr; 1155 preliminary = n->preliminary; 1156 tipc_node_put(n); 1157 if (!preliminary) 1158 return sugg_addr; 1159 } 1160 1161 /* Even this node may be in conflict */ 1162 if (tn->trial_addr == addr) 1163 return tipc_node_suggest_addr(net, addr); 1164 1165 return 0; 1166 } 1167 1168 void tipc_node_check_dest(struct net *net, u32 addr, 1169 u8 *peer_id, struct tipc_bearer *b, 1170 u16 capabilities, u32 signature, u32 hash_mixes, 1171 struct tipc_media_addr *maddr, 1172 bool *respond, bool *dupl_addr) 1173 { 1174 struct tipc_node *n; 1175 struct tipc_link *l; 1176 struct tipc_link_entry *le; 1177 bool addr_match = false; 1178 bool sign_match = false; 1179 bool link_up = false; 1180 bool link_is_reset = false; 1181 bool accept_addr = false; 1182 bool reset = false; 1183 char *if_name; 1184 unsigned long intv; 1185 u16 session; 1186 1187 *dupl_addr = false; 1188 *respond = false; 1189 1190 n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes, 1191 false); 1192 if (!n) 1193 return; 1194 1195 tipc_node_write_lock(n); 1196 1197 le = &n->links[b->identity]; 1198 1199 /* Prepare to validate requesting node's signature and media address */ 1200 l = le->link; 1201 link_up = l && tipc_link_is_up(l); 1202 link_is_reset = l && tipc_link_is_reset(l); 1203 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); 1204 sign_match = (signature == n->signature); 1205 1206 /* These three flags give us eight permutations: */ 1207 1208 if (sign_match && addr_match && link_up) { 1209 /* All is fine. Ignore requests. */ 1210 /* Peer node is not a container/local namespace */ 1211 if (!n->peer_hash_mix) 1212 n->peer_hash_mix = hash_mixes; 1213 } else if (sign_match && addr_match && !link_up) { 1214 /* Respond. The link will come up in due time */ 1215 *respond = true; 1216 } else if (sign_match && !addr_match && link_up) { 1217 /* Peer has changed i/f address without rebooting. 1218 * If so, the link will reset soon, and the next 1219 * discovery will be accepted. So we can ignore it. 1220 * It may also be a cloned or malicious peer having 1221 * chosen the same node address and signature as an 1222 * existing one. 1223 * Ignore requests until the link goes down, if ever. 1224 */ 1225 *dupl_addr = true; 1226 } else if (sign_match && !addr_match && !link_up) { 1227 /* Peer link has changed i/f address without rebooting. 1228 * It may also be a cloned or malicious peer; we can't 1229 * distinguish between the two. 1230 * The signature is correct, so we must accept. 1231 */ 1232 accept_addr = true; 1233 *respond = true; 1234 reset = true; 1235 } else if (!sign_match && addr_match && link_up) { 1236 /* Peer node rebooted. Two possibilities: 1237 * - Delayed re-discovery; this link endpoint has already 1238 * reset and re-established contact with the peer, before 1239 * receiving a discovery message from that node. 1240 * (The peer happened to receive one from this node first). 1241 * - The peer came back so fast that our side has not 1242 * discovered it yet. Probing from this side will soon 1243 * reset the link, since there can be no working link 1244 * endpoint at the peer end, and the link will re-establish. 1245 * Accept the signature, since it comes from a known peer. 1246 */ 1247 n->signature = signature; 1248 } else if (!sign_match && addr_match && !link_up) { 1249 /* The peer node has rebooted. 1250 * Accept signature, since it is a known peer. 1251 */ 1252 n->signature = signature; 1253 *respond = true; 1254 } else if (!sign_match && !addr_match && link_up) { 1255 /* Peer rebooted with new address, or a new/duplicate peer. 1256 * Ignore until the link goes down, if ever. 1257 */ 1258 *dupl_addr = true; 1259 } else if (!sign_match && !addr_match && !link_up) { 1260 /* Peer rebooted with new address, or it is a new peer. 1261 * Accept signature and address. 1262 */ 1263 n->signature = signature; 1264 accept_addr = true; 1265 *respond = true; 1266 reset = true; 1267 } 1268 1269 if (!accept_addr) 1270 goto exit; 1271 1272 /* Now create new link if not already existing */ 1273 if (!l) { 1274 if (n->link_cnt == 2) 1275 goto exit; 1276 1277 if_name = strchr(b->name, ':') + 1; 1278 get_random_bytes(&session, sizeof(u16)); 1279 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 1280 b->net_plane, b->mtu, b->priority, 1281 b->min_win, b->max_win, session, 1282 tipc_own_addr(net), addr, peer_id, 1283 n->capabilities, 1284 tipc_bc_sndlink(n->net), n->bc_entry.link, 1285 &le->inputq, 1286 &n->bc_entry.namedq, &l)) { 1287 *respond = false; 1288 goto exit; 1289 } 1290 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!"); 1291 tipc_link_reset(l); 1292 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1293 if (n->state == NODE_FAILINGOVER) 1294 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1295 link_is_reset = tipc_link_is_reset(l); 1296 le->link = l; 1297 n->link_cnt++; 1298 tipc_node_calculate_timer(n, l); 1299 if (n->link_cnt == 1) { 1300 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 1301 if (!mod_timer(&n->timer, intv)) 1302 tipc_node_get(n); 1303 } 1304 } 1305 memcpy(&le->maddr, maddr, sizeof(*maddr)); 1306 exit: 1307 tipc_node_write_unlock(n); 1308 if (reset && !link_is_reset) 1309 tipc_node_link_down(n, b->identity, false); 1310 tipc_node_put(n); 1311 } 1312 1313 void tipc_node_delete_links(struct net *net, int bearer_id) 1314 { 1315 struct tipc_net *tn = net_generic(net, tipc_net_id); 1316 struct tipc_node *n; 1317 1318 rcu_read_lock(); 1319 list_for_each_entry_rcu(n, &tn->node_list, list) { 1320 tipc_node_link_down(n, bearer_id, true); 1321 } 1322 rcu_read_unlock(); 1323 } 1324 1325 static void tipc_node_reset_links(struct tipc_node *n) 1326 { 1327 int i; 1328 1329 pr_warn("Resetting all links to %x\n", n->addr); 1330 1331 trace_tipc_node_reset_links(n, true, " "); 1332 for (i = 0; i < MAX_BEARERS; i++) { 1333 tipc_node_link_down(n, i, false); 1334 } 1335 } 1336 1337 /* tipc_node_fsm_evt - node finite state machine 1338 * Determines when contact is allowed with peer node 1339 */ 1340 static void tipc_node_fsm_evt(struct tipc_node *n, int evt) 1341 { 1342 int state = n->state; 1343 1344 switch (state) { 1345 case SELF_DOWN_PEER_DOWN: 1346 switch (evt) { 1347 case SELF_ESTABL_CONTACT_EVT: 1348 state = SELF_UP_PEER_COMING; 1349 break; 1350 case PEER_ESTABL_CONTACT_EVT: 1351 state = SELF_COMING_PEER_UP; 1352 break; 1353 case SELF_LOST_CONTACT_EVT: 1354 case PEER_LOST_CONTACT_EVT: 1355 break; 1356 case NODE_SYNCH_END_EVT: 1357 case NODE_SYNCH_BEGIN_EVT: 1358 case NODE_FAILOVER_BEGIN_EVT: 1359 case NODE_FAILOVER_END_EVT: 1360 default: 1361 goto illegal_evt; 1362 } 1363 break; 1364 case SELF_UP_PEER_UP: 1365 switch (evt) { 1366 case SELF_LOST_CONTACT_EVT: 1367 state = SELF_DOWN_PEER_LEAVING; 1368 break; 1369 case PEER_LOST_CONTACT_EVT: 1370 state = SELF_LEAVING_PEER_DOWN; 1371 break; 1372 case NODE_SYNCH_BEGIN_EVT: 1373 state = NODE_SYNCHING; 1374 break; 1375 case NODE_FAILOVER_BEGIN_EVT: 1376 state = NODE_FAILINGOVER; 1377 break; 1378 case SELF_ESTABL_CONTACT_EVT: 1379 case PEER_ESTABL_CONTACT_EVT: 1380 case NODE_SYNCH_END_EVT: 1381 case NODE_FAILOVER_END_EVT: 1382 break; 1383 default: 1384 goto illegal_evt; 1385 } 1386 break; 1387 case SELF_DOWN_PEER_LEAVING: 1388 switch (evt) { 1389 case PEER_LOST_CONTACT_EVT: 1390 state = SELF_DOWN_PEER_DOWN; 1391 break; 1392 case SELF_ESTABL_CONTACT_EVT: 1393 case PEER_ESTABL_CONTACT_EVT: 1394 case SELF_LOST_CONTACT_EVT: 1395 break; 1396 case NODE_SYNCH_END_EVT: 1397 case NODE_SYNCH_BEGIN_EVT: 1398 case NODE_FAILOVER_BEGIN_EVT: 1399 case NODE_FAILOVER_END_EVT: 1400 default: 1401 goto illegal_evt; 1402 } 1403 break; 1404 case SELF_UP_PEER_COMING: 1405 switch (evt) { 1406 case PEER_ESTABL_CONTACT_EVT: 1407 state = SELF_UP_PEER_UP; 1408 break; 1409 case SELF_LOST_CONTACT_EVT: 1410 state = SELF_DOWN_PEER_DOWN; 1411 break; 1412 case SELF_ESTABL_CONTACT_EVT: 1413 case PEER_LOST_CONTACT_EVT: 1414 case NODE_SYNCH_END_EVT: 1415 case NODE_FAILOVER_BEGIN_EVT: 1416 break; 1417 case NODE_SYNCH_BEGIN_EVT: 1418 case NODE_FAILOVER_END_EVT: 1419 default: 1420 goto illegal_evt; 1421 } 1422 break; 1423 case SELF_COMING_PEER_UP: 1424 switch (evt) { 1425 case SELF_ESTABL_CONTACT_EVT: 1426 state = SELF_UP_PEER_UP; 1427 break; 1428 case PEER_LOST_CONTACT_EVT: 1429 state = SELF_DOWN_PEER_DOWN; 1430 break; 1431 case SELF_LOST_CONTACT_EVT: 1432 case PEER_ESTABL_CONTACT_EVT: 1433 break; 1434 case NODE_SYNCH_END_EVT: 1435 case NODE_SYNCH_BEGIN_EVT: 1436 case NODE_FAILOVER_BEGIN_EVT: 1437 case NODE_FAILOVER_END_EVT: 1438 default: 1439 goto illegal_evt; 1440 } 1441 break; 1442 case SELF_LEAVING_PEER_DOWN: 1443 switch (evt) { 1444 case SELF_LOST_CONTACT_EVT: 1445 state = SELF_DOWN_PEER_DOWN; 1446 break; 1447 case SELF_ESTABL_CONTACT_EVT: 1448 case PEER_ESTABL_CONTACT_EVT: 1449 case PEER_LOST_CONTACT_EVT: 1450 break; 1451 case NODE_SYNCH_END_EVT: 1452 case NODE_SYNCH_BEGIN_EVT: 1453 case NODE_FAILOVER_BEGIN_EVT: 1454 case NODE_FAILOVER_END_EVT: 1455 default: 1456 goto illegal_evt; 1457 } 1458 break; 1459 case NODE_FAILINGOVER: 1460 switch (evt) { 1461 case SELF_LOST_CONTACT_EVT: 1462 state = SELF_DOWN_PEER_LEAVING; 1463 break; 1464 case PEER_LOST_CONTACT_EVT: 1465 state = SELF_LEAVING_PEER_DOWN; 1466 break; 1467 case NODE_FAILOVER_END_EVT: 1468 state = SELF_UP_PEER_UP; 1469 break; 1470 case NODE_FAILOVER_BEGIN_EVT: 1471 case SELF_ESTABL_CONTACT_EVT: 1472 case PEER_ESTABL_CONTACT_EVT: 1473 break; 1474 case NODE_SYNCH_BEGIN_EVT: 1475 case NODE_SYNCH_END_EVT: 1476 default: 1477 goto illegal_evt; 1478 } 1479 break; 1480 case NODE_SYNCHING: 1481 switch (evt) { 1482 case SELF_LOST_CONTACT_EVT: 1483 state = SELF_DOWN_PEER_LEAVING; 1484 break; 1485 case PEER_LOST_CONTACT_EVT: 1486 state = SELF_LEAVING_PEER_DOWN; 1487 break; 1488 case NODE_SYNCH_END_EVT: 1489 state = SELF_UP_PEER_UP; 1490 break; 1491 case NODE_FAILOVER_BEGIN_EVT: 1492 state = NODE_FAILINGOVER; 1493 break; 1494 case NODE_SYNCH_BEGIN_EVT: 1495 case SELF_ESTABL_CONTACT_EVT: 1496 case PEER_ESTABL_CONTACT_EVT: 1497 break; 1498 case NODE_FAILOVER_END_EVT: 1499 default: 1500 goto illegal_evt; 1501 } 1502 break; 1503 default: 1504 pr_err("Unknown node fsm state %x\n", state); 1505 break; 1506 } 1507 trace_tipc_node_fsm(n->peer_id, n->state, state, evt); 1508 n->state = state; 1509 return; 1510 1511 illegal_evt: 1512 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1513 trace_tipc_node_fsm(n->peer_id, n->state, state, evt); 1514 } 1515 1516 static void node_lost_contact(struct tipc_node *n, 1517 struct sk_buff_head *inputq) 1518 { 1519 struct tipc_sock_conn *conn, *safe; 1520 struct tipc_link *l; 1521 struct list_head *conns = &n->conn_sks; 1522 struct sk_buff *skb; 1523 uint i; 1524 1525 pr_debug("Lost contact with %x\n", n->addr); 1526 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 1527 trace_tipc_node_lost_contact(n, true, " "); 1528 1529 /* Clean up broadcast state */ 1530 tipc_bcast_remove_peer(n->net, n->bc_entry.link); 1531 skb_queue_purge(&n->bc_entry.namedq); 1532 1533 /* Abort any ongoing link failover */ 1534 for (i = 0; i < MAX_BEARERS; i++) { 1535 l = n->links[i].link; 1536 if (l) 1537 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); 1538 } 1539 1540 /* Notify publications from this node */ 1541 n->action_flags |= TIPC_NOTIFY_NODE_DOWN; 1542 n->peer_net = NULL; 1543 n->peer_hash_mix = 0; 1544 /* Notify sockets connected to node */ 1545 list_for_each_entry_safe(conn, safe, conns, list) { 1546 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 1547 SHORT_H_SIZE, 0, tipc_own_addr(n->net), 1548 conn->peer_node, conn->port, 1549 conn->peer_port, TIPC_ERR_NO_NODE); 1550 if (likely(skb)) 1551 skb_queue_tail(inputq, skb); 1552 list_del(&conn->list); 1553 kfree(conn); 1554 } 1555 } 1556 1557 /** 1558 * tipc_node_get_linkname - get the name of a link 1559 * 1560 * @net: the applicable net namespace 1561 * @bearer_id: id of the bearer 1562 * @addr: peer node address 1563 * @linkname: link name output buffer 1564 * @len: size of @linkname output buffer 1565 * 1566 * Return: 0 on success 1567 */ 1568 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, 1569 char *linkname, size_t len) 1570 { 1571 struct tipc_link *link; 1572 int err = -EINVAL; 1573 struct tipc_node *node = tipc_node_find(net, addr); 1574 1575 if (!node) 1576 return err; 1577 1578 if (bearer_id >= MAX_BEARERS) 1579 goto exit; 1580 1581 tipc_node_read_lock(node); 1582 link = node->links[bearer_id].link; 1583 if (link) { 1584 strncpy(linkname, tipc_link_name(link), len); 1585 err = 0; 1586 } 1587 tipc_node_read_unlock(node); 1588 exit: 1589 tipc_node_put(node); 1590 return err; 1591 } 1592 1593 /* Caller should hold node lock for the passed node */ 1594 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1595 { 1596 void *hdr; 1597 struct nlattr *attrs; 1598 1599 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1600 NLM_F_MULTI, TIPC_NL_NODE_GET); 1601 if (!hdr) 1602 return -EMSGSIZE; 1603 1604 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE); 1605 if (!attrs) 1606 goto msg_full; 1607 1608 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) 1609 goto attr_msg_full; 1610 if (node_is_up(node)) 1611 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) 1612 goto attr_msg_full; 1613 1614 nla_nest_end(msg->skb, attrs); 1615 genlmsg_end(msg->skb, hdr); 1616 1617 return 0; 1618 1619 attr_msg_full: 1620 nla_nest_cancel(msg->skb, attrs); 1621 msg_full: 1622 genlmsg_cancel(msg->skb, hdr); 1623 1624 return -EMSGSIZE; 1625 } 1626 1627 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list) 1628 { 1629 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 1630 struct sk_buff_head inputq; 1631 1632 switch (msg_user(hdr)) { 1633 case TIPC_LOW_IMPORTANCE: 1634 case TIPC_MEDIUM_IMPORTANCE: 1635 case TIPC_HIGH_IMPORTANCE: 1636 case TIPC_CRITICAL_IMPORTANCE: 1637 if (msg_connected(hdr) || msg_named(hdr) || 1638 msg_direct(hdr)) { 1639 tipc_loopback_trace(peer_net, list); 1640 spin_lock_init(&list->lock); 1641 tipc_sk_rcv(peer_net, list); 1642 return; 1643 } 1644 if (msg_mcast(hdr)) { 1645 tipc_loopback_trace(peer_net, list); 1646 skb_queue_head_init(&inputq); 1647 tipc_sk_mcast_rcv(peer_net, list, &inputq); 1648 __skb_queue_purge(list); 1649 skb_queue_purge(&inputq); 1650 return; 1651 } 1652 return; 1653 case MSG_FRAGMENTER: 1654 if (tipc_msg_assemble(list)) { 1655 tipc_loopback_trace(peer_net, list); 1656 skb_queue_head_init(&inputq); 1657 tipc_sk_mcast_rcv(peer_net, list, &inputq); 1658 __skb_queue_purge(list); 1659 skb_queue_purge(&inputq); 1660 } 1661 return; 1662 case GROUP_PROTOCOL: 1663 case CONN_MANAGER: 1664 tipc_loopback_trace(peer_net, list); 1665 spin_lock_init(&list->lock); 1666 tipc_sk_rcv(peer_net, list); 1667 return; 1668 case LINK_PROTOCOL: 1669 case NAME_DISTRIBUTOR: 1670 case TUNNEL_PROTOCOL: 1671 case BCAST_PROTOCOL: 1672 return; 1673 default: 1674 return; 1675 } 1676 } 1677 1678 /** 1679 * tipc_node_xmit() - general link level function for message sending 1680 * @net: the applicable net namespace 1681 * @list: chain of buffers containing message 1682 * @dnode: address of destination node 1683 * @selector: a number used for deterministic link selection 1684 * Consumes the buffer chain. 1685 * Return: 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF 1686 */ 1687 int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1688 u32 dnode, int selector) 1689 { 1690 struct tipc_link_entry *le = NULL; 1691 struct tipc_node *n; 1692 struct sk_buff_head xmitq; 1693 bool node_up = false; 1694 struct net *peer_net; 1695 int bearer_id; 1696 int rc; 1697 1698 if (in_own_node(net, dnode)) { 1699 tipc_loopback_trace(net, list); 1700 spin_lock_init(&list->lock); 1701 tipc_sk_rcv(net, list); 1702 return 0; 1703 } 1704 1705 n = tipc_node_find(net, dnode); 1706 if (unlikely(!n)) { 1707 __skb_queue_purge(list); 1708 return -EHOSTUNREACH; 1709 } 1710 1711 rcu_read_lock(); 1712 tipc_node_read_lock(n); 1713 node_up = node_is_up(n); 1714 peer_net = n->peer_net; 1715 tipc_node_read_unlock(n); 1716 if (node_up && peer_net && check_net(peer_net)) { 1717 /* xmit inner linux container */ 1718 tipc_lxc_xmit(peer_net, list); 1719 if (likely(skb_queue_empty(list))) { 1720 rcu_read_unlock(); 1721 tipc_node_put(n); 1722 return 0; 1723 } 1724 } 1725 rcu_read_unlock(); 1726 1727 tipc_node_read_lock(n); 1728 bearer_id = n->active_links[selector & 1]; 1729 if (unlikely(bearer_id == INVALID_BEARER_ID)) { 1730 tipc_node_read_unlock(n); 1731 tipc_node_put(n); 1732 __skb_queue_purge(list); 1733 return -EHOSTUNREACH; 1734 } 1735 1736 __skb_queue_head_init(&xmitq); 1737 le = &n->links[bearer_id]; 1738 spin_lock_bh(&le->lock); 1739 rc = tipc_link_xmit(le->link, list, &xmitq); 1740 spin_unlock_bh(&le->lock); 1741 tipc_node_read_unlock(n); 1742 1743 if (unlikely(rc == -ENOBUFS)) 1744 tipc_node_link_down(n, bearer_id, false); 1745 else 1746 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n); 1747 1748 tipc_node_put(n); 1749 1750 return rc; 1751 } 1752 1753 /* tipc_node_xmit_skb(): send single buffer to destination 1754 * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE 1755 * messages, which will not be rejected 1756 * The only exception is datagram messages rerouted after secondary 1757 * lookup, which are rare and safe to dispose of anyway. 1758 */ 1759 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 1760 u32 selector) 1761 { 1762 struct sk_buff_head head; 1763 1764 __skb_queue_head_init(&head); 1765 __skb_queue_tail(&head, skb); 1766 tipc_node_xmit(net, &head, dnode, selector); 1767 return 0; 1768 } 1769 1770 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations 1771 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected 1772 */ 1773 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) 1774 { 1775 struct sk_buff *skb; 1776 u32 selector, dnode; 1777 1778 while ((skb = __skb_dequeue(xmitq))) { 1779 selector = msg_origport(buf_msg(skb)); 1780 dnode = msg_destnode(buf_msg(skb)); 1781 tipc_node_xmit_skb(net, skb, dnode, selector); 1782 } 1783 return 0; 1784 } 1785 1786 void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) 1787 { 1788 struct sk_buff_head xmitq; 1789 struct sk_buff *txskb; 1790 struct tipc_node *n; 1791 u16 dummy; 1792 u32 dst; 1793 1794 /* Use broadcast if all nodes support it */ 1795 if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { 1796 __skb_queue_head_init(&xmitq); 1797 __skb_queue_tail(&xmitq, skb); 1798 tipc_bcast_xmit(net, &xmitq, &dummy); 1799 return; 1800 } 1801 1802 /* Otherwise use legacy replicast method */ 1803 rcu_read_lock(); 1804 list_for_each_entry_rcu(n, tipc_nodes(net), list) { 1805 dst = n->addr; 1806 if (in_own_node(net, dst)) 1807 continue; 1808 if (!node_is_up(n)) 1809 continue; 1810 txskb = pskb_copy(skb, GFP_ATOMIC); 1811 if (!txskb) 1812 break; 1813 msg_set_destnode(buf_msg(txskb), dst); 1814 tipc_node_xmit_skb(net, txskb, dst, 0); 1815 } 1816 rcu_read_unlock(); 1817 kfree_skb(skb); 1818 } 1819 1820 static void tipc_node_mcast_rcv(struct tipc_node *n) 1821 { 1822 struct tipc_bclink_entry *be = &n->bc_entry; 1823 1824 /* 'arrvq' is under inputq2's lock protection */ 1825 spin_lock_bh(&be->inputq2.lock); 1826 spin_lock_bh(&be->inputq1.lock); 1827 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); 1828 spin_unlock_bh(&be->inputq1.lock); 1829 spin_unlock_bh(&be->inputq2.lock); 1830 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2); 1831 } 1832 1833 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, 1834 int bearer_id, struct sk_buff_head *xmitq) 1835 { 1836 struct tipc_link *ucl; 1837 int rc; 1838 1839 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq); 1840 1841 if (rc & TIPC_LINK_DOWN_EVT) { 1842 tipc_node_reset_links(n); 1843 return; 1844 } 1845 1846 if (!(rc & TIPC_LINK_SND_STATE)) 1847 return; 1848 1849 /* If probe message, a STATE response will be sent anyway */ 1850 if (msg_probe(hdr)) 1851 return; 1852 1853 /* Produce a STATE message carrying broadcast NACK */ 1854 tipc_node_read_lock(n); 1855 ucl = n->links[bearer_id].link; 1856 if (ucl) 1857 tipc_link_build_state_msg(ucl, xmitq); 1858 tipc_node_read_unlock(n); 1859 } 1860 1861 /** 1862 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1863 * @net: the applicable net namespace 1864 * @skb: TIPC packet 1865 * @bearer_id: id of bearer message arrived on 1866 * 1867 * Invoked with no locks held. 1868 */ 1869 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id) 1870 { 1871 int rc; 1872 struct sk_buff_head xmitq; 1873 struct tipc_bclink_entry *be; 1874 struct tipc_link_entry *le; 1875 struct tipc_msg *hdr = buf_msg(skb); 1876 int usr = msg_user(hdr); 1877 u32 dnode = msg_destnode(hdr); 1878 struct tipc_node *n; 1879 1880 __skb_queue_head_init(&xmitq); 1881 1882 /* If NACK for other node, let rcv link for that node peek into it */ 1883 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net))) 1884 n = tipc_node_find(net, dnode); 1885 else 1886 n = tipc_node_find(net, msg_prevnode(hdr)); 1887 if (!n) { 1888 kfree_skb(skb); 1889 return; 1890 } 1891 be = &n->bc_entry; 1892 le = &n->links[bearer_id]; 1893 1894 rc = tipc_bcast_rcv(net, be->link, skb); 1895 1896 /* Broadcast ACKs are sent on a unicast link */ 1897 if (rc & TIPC_LINK_SND_STATE) { 1898 tipc_node_read_lock(n); 1899 tipc_link_build_state_msg(le->link, &xmitq); 1900 tipc_node_read_unlock(n); 1901 } 1902 1903 if (!skb_queue_empty(&xmitq)) 1904 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n); 1905 1906 if (!skb_queue_empty(&be->inputq1)) 1907 tipc_node_mcast_rcv(n); 1908 1909 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ 1910 if (!skb_queue_empty(&n->bc_entry.namedq)) 1911 tipc_named_rcv(net, &n->bc_entry.namedq, 1912 &n->bc_entry.named_rcv_nxt, 1913 &n->bc_entry.named_open); 1914 1915 /* If reassembly or retransmission failure => reset all links to peer */ 1916 if (rc & TIPC_LINK_DOWN_EVT) 1917 tipc_node_reset_links(n); 1918 1919 tipc_node_put(n); 1920 } 1921 1922 /** 1923 * tipc_node_check_state - check and if necessary update node state 1924 * @n: target tipc_node 1925 * @skb: TIPC packet 1926 * @bearer_id: identity of bearer delivering the packet 1927 * @xmitq: queue for messages to be xmited on 1928 * Return: true if state and msg are ok, otherwise false 1929 */ 1930 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, 1931 int bearer_id, struct sk_buff_head *xmitq) 1932 { 1933 struct tipc_msg *hdr = buf_msg(skb); 1934 int usr = msg_user(hdr); 1935 int mtyp = msg_type(hdr); 1936 u16 oseqno = msg_seqno(hdr); 1937 u16 exp_pkts = msg_msgcnt(hdr); 1938 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len; 1939 int state = n->state; 1940 struct tipc_link *l, *tnl, *pl = NULL; 1941 struct tipc_media_addr *maddr; 1942 int pb_id; 1943 1944 if (trace_tipc_node_check_state_enabled()) { 1945 trace_tipc_skb_dump(skb, false, "skb for node state check"); 1946 trace_tipc_node_check_state(n, true, " "); 1947 } 1948 l = n->links[bearer_id].link; 1949 if (!l) 1950 return false; 1951 rcv_nxt = tipc_link_rcv_nxt(l); 1952 1953 1954 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1955 return true; 1956 1957 /* Find parallel link, if any */ 1958 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) { 1959 if ((pb_id != bearer_id) && n->links[pb_id].link) { 1960 pl = n->links[pb_id].link; 1961 break; 1962 } 1963 } 1964 1965 if (!tipc_link_validate_msg(l, hdr)) { 1966 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!"); 1967 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!"); 1968 return false; 1969 } 1970 1971 /* Check and update node accesibility if applicable */ 1972 if (state == SELF_UP_PEER_COMING) { 1973 if (!tipc_link_is_up(l)) 1974 return true; 1975 if (!msg_peer_link_is_up(hdr)) 1976 return true; 1977 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT); 1978 } 1979 1980 if (state == SELF_DOWN_PEER_LEAVING) { 1981 if (msg_peer_node_is_up(hdr)) 1982 return false; 1983 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1984 return true; 1985 } 1986 1987 if (state == SELF_LEAVING_PEER_DOWN) 1988 return false; 1989 1990 /* Ignore duplicate packets */ 1991 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1992 return true; 1993 1994 /* Initiate or update failover mode if applicable */ 1995 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1996 syncpt = oseqno + exp_pkts - 1; 1997 if (pl && !tipc_link_is_reset(pl)) { 1998 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1999 trace_tipc_node_link_down(n, true, 2000 "node link down <- failover!"); 2001 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 2002 tipc_link_inputq(l)); 2003 } 2004 2005 /* If parallel link was already down, and this happened before 2006 * the tunnel link came up, node failover was never started. 2007 * Ensure that a FAILOVER_MSG is sent to get peer out of 2008 * NODE_FAILINGOVER state, also this node must accept 2009 * TUNNEL_MSGs from peer. 2010 */ 2011 if (n->state != NODE_FAILINGOVER) 2012 tipc_node_link_failover(n, pl, l, xmitq); 2013 2014 /* If pkts arrive out of order, use lowest calculated syncpt */ 2015 if (less(syncpt, n->sync_point)) 2016 n->sync_point = syncpt; 2017 } 2018 2019 /* Open parallel link when tunnel link reaches synch point */ 2020 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { 2021 if (!more(rcv_nxt, n->sync_point)) 2022 return true; 2023 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); 2024 if (pl) 2025 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); 2026 return true; 2027 } 2028 2029 /* No syncing needed if only one link */ 2030 if (!pl || !tipc_link_is_up(pl)) 2031 return true; 2032 2033 /* Initiate synch mode if applicable */ 2034 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 2035 if (n->capabilities & TIPC_TUNNEL_ENHANCED) 2036 syncpt = msg_syncpt(hdr); 2037 else 2038 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1; 2039 if (!tipc_link_is_up(l)) 2040 __tipc_node_link_up(n, bearer_id, xmitq); 2041 if (n->state == SELF_UP_PEER_UP) { 2042 n->sync_point = syncpt; 2043 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 2044 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 2045 } 2046 } 2047 2048 /* Open tunnel link when parallel link reaches synch point */ 2049 if (n->state == NODE_SYNCHING) { 2050 if (tipc_link_is_synching(l)) { 2051 tnl = l; 2052 } else { 2053 tnl = pl; 2054 pl = l; 2055 } 2056 inputq_len = skb_queue_len(tipc_link_inputq(pl)); 2057 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len; 2058 if (more(dlv_nxt, n->sync_point)) { 2059 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 2060 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 2061 return true; 2062 } 2063 if (l == pl) 2064 return true; 2065 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) 2066 return true; 2067 if (usr == LINK_PROTOCOL) 2068 return true; 2069 return false; 2070 } 2071 return true; 2072 } 2073 2074 /** 2075 * tipc_rcv - process TIPC packets/messages arriving from off-node 2076 * @net: the applicable net namespace 2077 * @skb: TIPC packet 2078 * @b: pointer to bearer message arrived on 2079 * 2080 * Invoked with no locks held. Bearer pointer must point to a valid bearer 2081 * structure (i.e. cannot be NULL), but bearer can be inactive. 2082 */ 2083 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) 2084 { 2085 struct sk_buff_head xmitq; 2086 struct tipc_link_entry *le; 2087 struct tipc_msg *hdr; 2088 struct tipc_node *n; 2089 int bearer_id = b->identity; 2090 u32 self = tipc_own_addr(net); 2091 int usr, rc = 0; 2092 u16 bc_ack; 2093 #ifdef CONFIG_TIPC_CRYPTO 2094 struct tipc_ehdr *ehdr; 2095 2096 /* Check if message must be decrypted first */ 2097 if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb)) 2098 goto rcv; 2099 2100 ehdr = (struct tipc_ehdr *)skb->data; 2101 if (likely(ehdr->user != LINK_CONFIG)) { 2102 n = tipc_node_find(net, ntohl(ehdr->addr)); 2103 if (unlikely(!n)) 2104 goto discard; 2105 } else { 2106 n = tipc_node_find_by_id(net, ehdr->id); 2107 } 2108 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b); 2109 if (!skb) 2110 return; 2111 2112 rcv: 2113 #endif 2114 /* Ensure message is well-formed before touching the header */ 2115 if (unlikely(!tipc_msg_validate(&skb))) 2116 goto discard; 2117 __skb_queue_head_init(&xmitq); 2118 hdr = buf_msg(skb); 2119 usr = msg_user(hdr); 2120 bc_ack = msg_bcast_ack(hdr); 2121 2122 /* Handle arrival of discovery or broadcast packet */ 2123 if (unlikely(msg_non_seq(hdr))) { 2124 if (unlikely(usr == LINK_CONFIG)) 2125 return tipc_disc_rcv(net, skb, b); 2126 else 2127 return tipc_node_bc_rcv(net, skb, bearer_id); 2128 } 2129 2130 /* Discard unicast link messages destined for another node */ 2131 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self))) 2132 goto discard; 2133 2134 /* Locate neighboring node that sent packet */ 2135 n = tipc_node_find(net, msg_prevnode(hdr)); 2136 if (unlikely(!n)) 2137 goto discard; 2138 le = &n->links[bearer_id]; 2139 2140 /* Ensure broadcast reception is in synch with peer's send state */ 2141 if (unlikely(usr == LINK_PROTOCOL)) { 2142 if (unlikely(skb_linearize(skb))) { 2143 tipc_node_put(n); 2144 goto discard; 2145 } 2146 hdr = buf_msg(skb); 2147 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); 2148 } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) { 2149 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr); 2150 } 2151 2152 /* Receive packet directly if conditions permit */ 2153 tipc_node_read_lock(n); 2154 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) { 2155 spin_lock_bh(&le->lock); 2156 if (le->link) { 2157 rc = tipc_link_rcv(le->link, skb, &xmitq); 2158 skb = NULL; 2159 } 2160 spin_unlock_bh(&le->lock); 2161 } 2162 tipc_node_read_unlock(n); 2163 2164 /* Check/update node state before receiving */ 2165 if (unlikely(skb)) { 2166 if (unlikely(skb_linearize(skb))) 2167 goto out_node_put; 2168 tipc_node_write_lock(n); 2169 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { 2170 if (le->link) { 2171 rc = tipc_link_rcv(le->link, skb, &xmitq); 2172 skb = NULL; 2173 } 2174 } 2175 tipc_node_write_unlock(n); 2176 } 2177 2178 if (unlikely(rc & TIPC_LINK_UP_EVT)) 2179 tipc_node_link_up(n, bearer_id, &xmitq); 2180 2181 if (unlikely(rc & TIPC_LINK_DOWN_EVT)) 2182 tipc_node_link_down(n, bearer_id, false); 2183 2184 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) 2185 tipc_named_rcv(net, &n->bc_entry.namedq, 2186 &n->bc_entry.named_rcv_nxt, 2187 &n->bc_entry.named_open); 2188 2189 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) 2190 tipc_node_mcast_rcv(n); 2191 2192 if (!skb_queue_empty(&le->inputq)) 2193 tipc_sk_rcv(net, &le->inputq); 2194 2195 if (!skb_queue_empty(&xmitq)) 2196 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n); 2197 2198 out_node_put: 2199 tipc_node_put(n); 2200 discard: 2201 kfree_skb(skb); 2202 } 2203 2204 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b, 2205 int prop) 2206 { 2207 struct tipc_net *tn = tipc_net(net); 2208 int bearer_id = b->identity; 2209 struct sk_buff_head xmitq; 2210 struct tipc_link_entry *e; 2211 struct tipc_node *n; 2212 2213 __skb_queue_head_init(&xmitq); 2214 2215 rcu_read_lock(); 2216 2217 list_for_each_entry_rcu(n, &tn->node_list, list) { 2218 tipc_node_write_lock(n); 2219 e = &n->links[bearer_id]; 2220 if (e->link) { 2221 if (prop == TIPC_NLA_PROP_TOL) 2222 tipc_link_set_tolerance(e->link, b->tolerance, 2223 &xmitq); 2224 else if (prop == TIPC_NLA_PROP_MTU) 2225 tipc_link_set_mtu(e->link, b->mtu); 2226 2227 /* Update MTU for node link entry */ 2228 e->mtu = tipc_link_mss(e->link); 2229 } 2230 2231 tipc_node_write_unlock(n); 2232 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL); 2233 } 2234 2235 rcu_read_unlock(); 2236 } 2237 2238 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) 2239 { 2240 struct net *net = sock_net(skb->sk); 2241 struct tipc_net *tn = net_generic(net, tipc_net_id); 2242 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; 2243 struct tipc_node *peer, *temp_node; 2244 u8 node_id[NODE_ID_LEN]; 2245 u64 *w0 = (u64 *)&node_id[0]; 2246 u64 *w1 = (u64 *)&node_id[8]; 2247 u32 addr; 2248 int err; 2249 2250 /* We identify the peer by its net */ 2251 if (!info->attrs[TIPC_NLA_NET]) 2252 return -EINVAL; 2253 2254 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX, 2255 info->attrs[TIPC_NLA_NET], 2256 tipc_nl_net_policy, info->extack); 2257 if (err) 2258 return err; 2259 2260 /* attrs[TIPC_NLA_NET_NODEID] and attrs[TIPC_NLA_NET_ADDR] are 2261 * mutually exclusive cases 2262 */ 2263 if (attrs[TIPC_NLA_NET_ADDR]) { 2264 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); 2265 if (!addr) 2266 return -EINVAL; 2267 } 2268 2269 if (attrs[TIPC_NLA_NET_NODEID]) { 2270 if (!attrs[TIPC_NLA_NET_NODEID_W1]) 2271 return -EINVAL; 2272 *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]); 2273 *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]); 2274 addr = hash128to32(node_id); 2275 } 2276 2277 if (in_own_node(net, addr)) 2278 return -ENOTSUPP; 2279 2280 spin_lock_bh(&tn->node_list_lock); 2281 peer = tipc_node_find(net, addr); 2282 if (!peer) { 2283 spin_unlock_bh(&tn->node_list_lock); 2284 return -ENXIO; 2285 } 2286 2287 tipc_node_write_lock(peer); 2288 if (peer->state != SELF_DOWN_PEER_DOWN && 2289 peer->state != SELF_DOWN_PEER_LEAVING) { 2290 tipc_node_write_unlock(peer); 2291 err = -EBUSY; 2292 goto err_out; 2293 } 2294 2295 tipc_node_clear_links(peer); 2296 tipc_node_write_unlock(peer); 2297 tipc_node_delete(peer); 2298 2299 /* Calculate cluster capabilities */ 2300 tn->capabilities = TIPC_NODE_CAPABILITIES; 2301 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 2302 tn->capabilities &= temp_node->capabilities; 2303 } 2304 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST)); 2305 err = 0; 2306 err_out: 2307 tipc_node_put(peer); 2308 spin_unlock_bh(&tn->node_list_lock); 2309 2310 return err; 2311 } 2312 2313 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 2314 { 2315 int err; 2316 struct net *net = sock_net(skb->sk); 2317 struct tipc_net *tn = net_generic(net, tipc_net_id); 2318 int done = cb->args[0]; 2319 int last_addr = cb->args[1]; 2320 struct tipc_node *node; 2321 struct tipc_nl_msg msg; 2322 2323 if (done) 2324 return 0; 2325 2326 msg.skb = skb; 2327 msg.portid = NETLINK_CB(cb->skb).portid; 2328 msg.seq = cb->nlh->nlmsg_seq; 2329 2330 rcu_read_lock(); 2331 if (last_addr) { 2332 node = tipc_node_find(net, last_addr); 2333 if (!node) { 2334 rcu_read_unlock(); 2335 /* We never set seq or call nl_dump_check_consistent() 2336 * this means that setting prev_seq here will cause the 2337 * consistence check to fail in the netlink callback 2338 * handler. Resulting in the NLMSG_DONE message having 2339 * the NLM_F_DUMP_INTR flag set if the node state 2340 * changed while we released the lock. 2341 */ 2342 cb->prev_seq = 1; 2343 return -EPIPE; 2344 } 2345 tipc_node_put(node); 2346 } 2347 2348 list_for_each_entry_rcu(node, &tn->node_list, list) { 2349 if (node->preliminary) 2350 continue; 2351 if (last_addr) { 2352 if (node->addr == last_addr) 2353 last_addr = 0; 2354 else 2355 continue; 2356 } 2357 2358 tipc_node_read_lock(node); 2359 err = __tipc_nl_add_node(&msg, node); 2360 if (err) { 2361 last_addr = node->addr; 2362 tipc_node_read_unlock(node); 2363 goto out; 2364 } 2365 2366 tipc_node_read_unlock(node); 2367 } 2368 done = 1; 2369 out: 2370 cb->args[0] = done; 2371 cb->args[1] = last_addr; 2372 rcu_read_unlock(); 2373 2374 return skb->len; 2375 } 2376 2377 /* tipc_node_find_by_name - locate owner node of link by link's name 2378 * @net: the applicable net namespace 2379 * @name: pointer to link name string 2380 * @bearer_id: pointer to index in 'node->links' array where the link was found. 2381 * 2382 * Returns pointer to node owning the link, or 0 if no matching link is found. 2383 */ 2384 static struct tipc_node *tipc_node_find_by_name(struct net *net, 2385 const char *link_name, 2386 unsigned int *bearer_id) 2387 { 2388 struct tipc_net *tn = net_generic(net, tipc_net_id); 2389 struct tipc_link *l; 2390 struct tipc_node *n; 2391 struct tipc_node *found_node = NULL; 2392 int i; 2393 2394 *bearer_id = 0; 2395 rcu_read_lock(); 2396 list_for_each_entry_rcu(n, &tn->node_list, list) { 2397 tipc_node_read_lock(n); 2398 for (i = 0; i < MAX_BEARERS; i++) { 2399 l = n->links[i].link; 2400 if (l && !strcmp(tipc_link_name(l), link_name)) { 2401 *bearer_id = i; 2402 found_node = n; 2403 break; 2404 } 2405 } 2406 tipc_node_read_unlock(n); 2407 if (found_node) 2408 break; 2409 } 2410 rcu_read_unlock(); 2411 2412 return found_node; 2413 } 2414 2415 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) 2416 { 2417 int err; 2418 int res = 0; 2419 int bearer_id; 2420 char *name; 2421 struct tipc_link *link; 2422 struct tipc_node *node; 2423 struct sk_buff_head xmitq; 2424 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2425 struct net *net = sock_net(skb->sk); 2426 2427 __skb_queue_head_init(&xmitq); 2428 2429 if (!info->attrs[TIPC_NLA_LINK]) 2430 return -EINVAL; 2431 2432 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2433 info->attrs[TIPC_NLA_LINK], 2434 tipc_nl_link_policy, info->extack); 2435 if (err) 2436 return err; 2437 2438 if (!attrs[TIPC_NLA_LINK_NAME]) 2439 return -EINVAL; 2440 2441 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2442 2443 if (strcmp(name, tipc_bclink_name) == 0) 2444 return tipc_nl_bc_link_set(net, attrs); 2445 2446 node = tipc_node_find_by_name(net, name, &bearer_id); 2447 if (!node) 2448 return -EINVAL; 2449 2450 tipc_node_read_lock(node); 2451 2452 link = node->links[bearer_id].link; 2453 if (!link) { 2454 res = -EINVAL; 2455 goto out; 2456 } 2457 2458 if (attrs[TIPC_NLA_LINK_PROP]) { 2459 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 2460 2461 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props); 2462 if (err) { 2463 res = err; 2464 goto out; 2465 } 2466 2467 if (props[TIPC_NLA_PROP_TOL]) { 2468 u32 tol; 2469 2470 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 2471 tipc_link_set_tolerance(link, tol, &xmitq); 2472 } 2473 if (props[TIPC_NLA_PROP_PRIO]) { 2474 u32 prio; 2475 2476 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 2477 tipc_link_set_prio(link, prio, &xmitq); 2478 } 2479 if (props[TIPC_NLA_PROP_WIN]) { 2480 u32 max_win; 2481 2482 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 2483 tipc_link_set_queue_limits(link, 2484 tipc_link_min_win(link), 2485 max_win); 2486 } 2487 } 2488 2489 out: 2490 tipc_node_read_unlock(node); 2491 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr, 2492 NULL); 2493 return res; 2494 } 2495 2496 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) 2497 { 2498 struct net *net = genl_info_net(info); 2499 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2500 struct tipc_nl_msg msg; 2501 char *name; 2502 int err; 2503 2504 msg.portid = info->snd_portid; 2505 msg.seq = info->snd_seq; 2506 2507 if (!info->attrs[TIPC_NLA_LINK]) 2508 return -EINVAL; 2509 2510 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2511 info->attrs[TIPC_NLA_LINK], 2512 tipc_nl_link_policy, info->extack); 2513 if (err) 2514 return err; 2515 2516 if (!attrs[TIPC_NLA_LINK_NAME]) 2517 return -EINVAL; 2518 2519 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2520 2521 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2522 if (!msg.skb) 2523 return -ENOMEM; 2524 2525 if (strcmp(name, tipc_bclink_name) == 0) { 2526 err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl); 2527 if (err) 2528 goto err_free; 2529 } else { 2530 int bearer_id; 2531 struct tipc_node *node; 2532 struct tipc_link *link; 2533 2534 node = tipc_node_find_by_name(net, name, &bearer_id); 2535 if (!node) { 2536 err = -EINVAL; 2537 goto err_free; 2538 } 2539 2540 tipc_node_read_lock(node); 2541 link = node->links[bearer_id].link; 2542 if (!link) { 2543 tipc_node_read_unlock(node); 2544 err = -EINVAL; 2545 goto err_free; 2546 } 2547 2548 err = __tipc_nl_add_link(net, &msg, link, 0); 2549 tipc_node_read_unlock(node); 2550 if (err) 2551 goto err_free; 2552 } 2553 2554 return genlmsg_reply(msg.skb, info); 2555 2556 err_free: 2557 nlmsg_free(msg.skb); 2558 return err; 2559 } 2560 2561 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 2562 { 2563 int err; 2564 char *link_name; 2565 unsigned int bearer_id; 2566 struct tipc_link *link; 2567 struct tipc_node *node; 2568 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2569 struct net *net = sock_net(skb->sk); 2570 struct tipc_net *tn = tipc_net(net); 2571 struct tipc_link_entry *le; 2572 2573 if (!info->attrs[TIPC_NLA_LINK]) 2574 return -EINVAL; 2575 2576 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2577 info->attrs[TIPC_NLA_LINK], 2578 tipc_nl_link_policy, info->extack); 2579 if (err) 2580 return err; 2581 2582 if (!attrs[TIPC_NLA_LINK_NAME]) 2583 return -EINVAL; 2584 2585 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2586 2587 err = -EINVAL; 2588 if (!strcmp(link_name, tipc_bclink_name)) { 2589 err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net)); 2590 if (err) 2591 return err; 2592 return 0; 2593 } else if (strstr(link_name, tipc_bclink_name)) { 2594 rcu_read_lock(); 2595 list_for_each_entry_rcu(node, &tn->node_list, list) { 2596 tipc_node_read_lock(node); 2597 link = node->bc_entry.link; 2598 if (link && !strcmp(link_name, tipc_link_name(link))) { 2599 err = tipc_bclink_reset_stats(net, link); 2600 tipc_node_read_unlock(node); 2601 break; 2602 } 2603 tipc_node_read_unlock(node); 2604 } 2605 rcu_read_unlock(); 2606 return err; 2607 } 2608 2609 node = tipc_node_find_by_name(net, link_name, &bearer_id); 2610 if (!node) 2611 return -EINVAL; 2612 2613 le = &node->links[bearer_id]; 2614 tipc_node_read_lock(node); 2615 spin_lock_bh(&le->lock); 2616 link = node->links[bearer_id].link; 2617 if (!link) { 2618 spin_unlock_bh(&le->lock); 2619 tipc_node_read_unlock(node); 2620 return -EINVAL; 2621 } 2622 tipc_link_reset_stats(link); 2623 spin_unlock_bh(&le->lock); 2624 tipc_node_read_unlock(node); 2625 return 0; 2626 } 2627 2628 /* Caller should hold node lock */ 2629 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 2630 struct tipc_node *node, u32 *prev_link, 2631 bool bc_link) 2632 { 2633 u32 i; 2634 int err; 2635 2636 for (i = *prev_link; i < MAX_BEARERS; i++) { 2637 *prev_link = i; 2638 2639 if (!node->links[i].link) 2640 continue; 2641 2642 err = __tipc_nl_add_link(net, msg, 2643 node->links[i].link, NLM_F_MULTI); 2644 if (err) 2645 return err; 2646 } 2647 2648 if (bc_link) { 2649 *prev_link = i; 2650 err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link); 2651 if (err) 2652 return err; 2653 } 2654 2655 *prev_link = 0; 2656 2657 return 0; 2658 } 2659 2660 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb) 2661 { 2662 struct net *net = sock_net(skb->sk); 2663 struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs; 2664 struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; 2665 struct tipc_net *tn = net_generic(net, tipc_net_id); 2666 struct tipc_node *node; 2667 struct tipc_nl_msg msg; 2668 u32 prev_node = cb->args[0]; 2669 u32 prev_link = cb->args[1]; 2670 int done = cb->args[2]; 2671 bool bc_link = cb->args[3]; 2672 int err; 2673 2674 if (done) 2675 return 0; 2676 2677 if (!prev_node) { 2678 /* Check if broadcast-receiver links dumping is needed */ 2679 if (attrs && attrs[TIPC_NLA_LINK]) { 2680 err = nla_parse_nested_deprecated(link, 2681 TIPC_NLA_LINK_MAX, 2682 attrs[TIPC_NLA_LINK], 2683 tipc_nl_link_policy, 2684 NULL); 2685 if (unlikely(err)) 2686 return err; 2687 if (unlikely(!link[TIPC_NLA_LINK_BROADCAST])) 2688 return -EINVAL; 2689 bc_link = true; 2690 } 2691 } 2692 2693 msg.skb = skb; 2694 msg.portid = NETLINK_CB(cb->skb).portid; 2695 msg.seq = cb->nlh->nlmsg_seq; 2696 2697 rcu_read_lock(); 2698 if (prev_node) { 2699 node = tipc_node_find(net, prev_node); 2700 if (!node) { 2701 /* We never set seq or call nl_dump_check_consistent() 2702 * this means that setting prev_seq here will cause the 2703 * consistence check to fail in the netlink callback 2704 * handler. Resulting in the last NLMSG_DONE message 2705 * having the NLM_F_DUMP_INTR flag set. 2706 */ 2707 cb->prev_seq = 1; 2708 goto out; 2709 } 2710 tipc_node_put(node); 2711 2712 list_for_each_entry_continue_rcu(node, &tn->node_list, 2713 list) { 2714 tipc_node_read_lock(node); 2715 err = __tipc_nl_add_node_links(net, &msg, node, 2716 &prev_link, bc_link); 2717 tipc_node_read_unlock(node); 2718 if (err) 2719 goto out; 2720 2721 prev_node = node->addr; 2722 } 2723 } else { 2724 err = tipc_nl_add_bc_link(net, &msg, tn->bcl); 2725 if (err) 2726 goto out; 2727 2728 list_for_each_entry_rcu(node, &tn->node_list, list) { 2729 tipc_node_read_lock(node); 2730 err = __tipc_nl_add_node_links(net, &msg, node, 2731 &prev_link, bc_link); 2732 tipc_node_read_unlock(node); 2733 if (err) 2734 goto out; 2735 2736 prev_node = node->addr; 2737 } 2738 } 2739 done = 1; 2740 out: 2741 rcu_read_unlock(); 2742 2743 cb->args[0] = prev_node; 2744 cb->args[1] = prev_link; 2745 cb->args[2] = done; 2746 cb->args[3] = bc_link; 2747 2748 return skb->len; 2749 } 2750 2751 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info) 2752 { 2753 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1]; 2754 struct net *net = sock_net(skb->sk); 2755 int err; 2756 2757 if (!info->attrs[TIPC_NLA_MON]) 2758 return -EINVAL; 2759 2760 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX, 2761 info->attrs[TIPC_NLA_MON], 2762 tipc_nl_monitor_policy, 2763 info->extack); 2764 if (err) 2765 return err; 2766 2767 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) { 2768 u32 val; 2769 2770 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]); 2771 err = tipc_nl_monitor_set_threshold(net, val); 2772 if (err) 2773 return err; 2774 } 2775 2776 return 0; 2777 } 2778 2779 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg) 2780 { 2781 struct nlattr *attrs; 2782 void *hdr; 2783 u32 val; 2784 2785 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2786 0, TIPC_NL_MON_GET); 2787 if (!hdr) 2788 return -EMSGSIZE; 2789 2790 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON); 2791 if (!attrs) 2792 goto msg_full; 2793 2794 val = tipc_nl_monitor_get_threshold(net); 2795 2796 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val)) 2797 goto attr_msg_full; 2798 2799 nla_nest_end(msg->skb, attrs); 2800 genlmsg_end(msg->skb, hdr); 2801 2802 return 0; 2803 2804 attr_msg_full: 2805 nla_nest_cancel(msg->skb, attrs); 2806 msg_full: 2807 genlmsg_cancel(msg->skb, hdr); 2808 2809 return -EMSGSIZE; 2810 } 2811 2812 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info) 2813 { 2814 struct net *net = sock_net(skb->sk); 2815 struct tipc_nl_msg msg; 2816 int err; 2817 2818 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2819 if (!msg.skb) 2820 return -ENOMEM; 2821 msg.portid = info->snd_portid; 2822 msg.seq = info->snd_seq; 2823 2824 err = __tipc_nl_add_monitor_prop(net, &msg); 2825 if (err) { 2826 nlmsg_free(msg.skb); 2827 return err; 2828 } 2829 2830 return genlmsg_reply(msg.skb, info); 2831 } 2832 2833 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) 2834 { 2835 struct net *net = sock_net(skb->sk); 2836 u32 prev_bearer = cb->args[0]; 2837 struct tipc_nl_msg msg; 2838 int bearer_id; 2839 int err; 2840 2841 if (prev_bearer == MAX_BEARERS) 2842 return 0; 2843 2844 msg.skb = skb; 2845 msg.portid = NETLINK_CB(cb->skb).portid; 2846 msg.seq = cb->nlh->nlmsg_seq; 2847 2848 rtnl_lock(); 2849 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { 2850 err = __tipc_nl_add_monitor(net, &msg, bearer_id); 2851 if (err) 2852 break; 2853 } 2854 rtnl_unlock(); 2855 cb->args[0] = bearer_id; 2856 2857 return skb->len; 2858 } 2859 2860 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb, 2861 struct netlink_callback *cb) 2862 { 2863 struct net *net = sock_net(skb->sk); 2864 u32 prev_node = cb->args[1]; 2865 u32 bearer_id = cb->args[2]; 2866 int done = cb->args[0]; 2867 struct tipc_nl_msg msg; 2868 int err; 2869 2870 if (!prev_node) { 2871 struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs; 2872 struct nlattr *mon[TIPC_NLA_MON_MAX + 1]; 2873 2874 if (!attrs[TIPC_NLA_MON]) 2875 return -EINVAL; 2876 2877 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX, 2878 attrs[TIPC_NLA_MON], 2879 tipc_nl_monitor_policy, 2880 NULL); 2881 if (err) 2882 return err; 2883 2884 if (!mon[TIPC_NLA_MON_REF]) 2885 return -EINVAL; 2886 2887 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]); 2888 2889 if (bearer_id >= MAX_BEARERS) 2890 return -EINVAL; 2891 } 2892 2893 if (done) 2894 return 0; 2895 2896 msg.skb = skb; 2897 msg.portid = NETLINK_CB(cb->skb).portid; 2898 msg.seq = cb->nlh->nlmsg_seq; 2899 2900 rtnl_lock(); 2901 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node); 2902 if (!err) 2903 done = 1; 2904 2905 rtnl_unlock(); 2906 cb->args[0] = done; 2907 cb->args[1] = prev_node; 2908 cb->args[2] = bearer_id; 2909 2910 return skb->len; 2911 } 2912 2913 #ifdef CONFIG_TIPC_CRYPTO 2914 static int tipc_nl_retrieve_key(struct nlattr **attrs, 2915 struct tipc_aead_key **pkey) 2916 { 2917 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY]; 2918 struct tipc_aead_key *key; 2919 2920 if (!attr) 2921 return -ENODATA; 2922 2923 if (nla_len(attr) < sizeof(*key)) 2924 return -EINVAL; 2925 key = (struct tipc_aead_key *)nla_data(attr); 2926 if (key->keylen > TIPC_AEAD_KEYLEN_MAX || 2927 nla_len(attr) < tipc_aead_key_size(key)) 2928 return -EINVAL; 2929 2930 *pkey = key; 2931 return 0; 2932 } 2933 2934 static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id) 2935 { 2936 struct nlattr *attr = attrs[TIPC_NLA_NODE_ID]; 2937 2938 if (!attr) 2939 return -ENODATA; 2940 2941 if (nla_len(attr) < TIPC_NODEID_LEN) 2942 return -EINVAL; 2943 2944 *node_id = (u8 *)nla_data(attr); 2945 return 0; 2946 } 2947 2948 static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv) 2949 { 2950 struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING]; 2951 2952 if (!attr) 2953 return -ENODATA; 2954 2955 *intv = nla_get_u32(attr); 2956 return 0; 2957 } 2958 2959 static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info) 2960 { 2961 struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1]; 2962 struct net *net = sock_net(skb->sk); 2963 struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx; 2964 struct tipc_node *n = NULL; 2965 struct tipc_aead_key *ukey; 2966 bool rekeying = true, master_key = false; 2967 u8 *id, *own_id, mode; 2968 u32 intv = 0; 2969 int rc = 0; 2970 2971 if (!info->attrs[TIPC_NLA_NODE]) 2972 return -EINVAL; 2973 2974 rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX, 2975 info->attrs[TIPC_NLA_NODE], 2976 tipc_nl_node_policy, info->extack); 2977 if (rc) 2978 return rc; 2979 2980 own_id = tipc_own_id(net); 2981 if (!own_id) { 2982 GENL_SET_ERR_MSG(info, "not found own node identity (set id?)"); 2983 return -EPERM; 2984 } 2985 2986 rc = tipc_nl_retrieve_rekeying(attrs, &intv); 2987 if (rc == -ENODATA) 2988 rekeying = false; 2989 2990 rc = tipc_nl_retrieve_key(attrs, &ukey); 2991 if (rc == -ENODATA && rekeying) 2992 goto rekeying; 2993 else if (rc) 2994 return rc; 2995 2996 rc = tipc_aead_key_validate(ukey, info); 2997 if (rc) 2998 return rc; 2999 3000 rc = tipc_nl_retrieve_nodeid(attrs, &id); 3001 switch (rc) { 3002 case -ENODATA: 3003 mode = CLUSTER_KEY; 3004 master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]); 3005 break; 3006 case 0: 3007 mode = PER_NODE_KEY; 3008 if (memcmp(id, own_id, NODE_ID_LEN)) { 3009 n = tipc_node_find_by_id(net, id) ?: 3010 tipc_node_create(net, 0, id, 0xffffu, 0, true); 3011 if (unlikely(!n)) 3012 return -ENOMEM; 3013 c = n->crypto_rx; 3014 } 3015 break; 3016 default: 3017 return rc; 3018 } 3019 3020 /* Initiate the TX/RX key */ 3021 rc = tipc_crypto_key_init(c, ukey, mode, master_key); 3022 if (n) 3023 tipc_node_put(n); 3024 3025 if (unlikely(rc < 0)) { 3026 GENL_SET_ERR_MSG(info, "unable to initiate or attach new key"); 3027 return rc; 3028 } else if (c == tx) { 3029 /* Distribute TX key but not master one */ 3030 if (!master_key && tipc_crypto_key_distr(tx, rc, NULL)) 3031 GENL_SET_ERR_MSG(info, "failed to replicate new key"); 3032 rekeying: 3033 /* Schedule TX rekeying if needed */ 3034 tipc_crypto_rekeying_sched(tx, rekeying, intv); 3035 } 3036 3037 return 0; 3038 } 3039 3040 int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info) 3041 { 3042 int err; 3043 3044 rtnl_lock(); 3045 err = __tipc_nl_node_set_key(skb, info); 3046 rtnl_unlock(); 3047 3048 return err; 3049 } 3050 3051 static int __tipc_nl_node_flush_key(struct sk_buff *skb, 3052 struct genl_info *info) 3053 { 3054 struct net *net = sock_net(skb->sk); 3055 struct tipc_net *tn = tipc_net(net); 3056 struct tipc_node *n; 3057 3058 tipc_crypto_key_flush(tn->crypto_tx); 3059 rcu_read_lock(); 3060 list_for_each_entry_rcu(n, &tn->node_list, list) 3061 tipc_crypto_key_flush(n->crypto_rx); 3062 rcu_read_unlock(); 3063 3064 return 0; 3065 } 3066 3067 int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info) 3068 { 3069 int err; 3070 3071 rtnl_lock(); 3072 err = __tipc_nl_node_flush_key(skb, info); 3073 rtnl_unlock(); 3074 3075 return err; 3076 } 3077 #endif 3078 3079 /** 3080 * tipc_node_dump - dump TIPC node data 3081 * @n: tipc node to be dumped 3082 * @more: dump more? 3083 * - false: dump only tipc node data 3084 * - true: dump node link data as well 3085 * @buf: returned buffer of dump data in format 3086 */ 3087 int tipc_node_dump(struct tipc_node *n, bool more, char *buf) 3088 { 3089 int i = 0; 3090 size_t sz = (more) ? NODE_LMAX : NODE_LMIN; 3091 3092 if (!n) { 3093 i += scnprintf(buf, sz, "node data: (null)\n"); 3094 return i; 3095 } 3096 3097 i += scnprintf(buf, sz, "node data: %x", n->addr); 3098 i += scnprintf(buf + i, sz - i, " %x", n->state); 3099 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]); 3100 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]); 3101 i += scnprintf(buf + i, sz - i, " %x", n->action_flags); 3102 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent); 3103 i += scnprintf(buf + i, sz - i, " %u", n->sync_point); 3104 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt); 3105 i += scnprintf(buf + i, sz - i, " %u", n->working_links); 3106 i += scnprintf(buf + i, sz - i, " %x", n->capabilities); 3107 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv); 3108 3109 if (!more) 3110 return i; 3111 3112 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n"); 3113 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu); 3114 i += scnprintf(buf + i, sz - i, " media: "); 3115 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr); 3116 i += scnprintf(buf + i, sz - i, "\n"); 3117 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i); 3118 i += scnprintf(buf + i, sz - i, " inputq: "); 3119 i += tipc_list_dump(&n->links[0].inputq, false, buf + i); 3120 3121 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n"); 3122 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu); 3123 i += scnprintf(buf + i, sz - i, " media: "); 3124 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr); 3125 i += scnprintf(buf + i, sz - i, "\n"); 3126 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i); 3127 i += scnprintf(buf + i, sz - i, " inputq: "); 3128 i += tipc_list_dump(&n->links[1].inputq, false, buf + i); 3129 3130 i += scnprintf(buf + i, sz - i, "bclink:\n "); 3131 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i); 3132 3133 return i; 3134 } 3135 3136 void tipc_node_pre_cleanup_net(struct net *exit_net) 3137 { 3138 struct tipc_node *n; 3139 struct tipc_net *tn; 3140 struct net *tmp; 3141 3142 rcu_read_lock(); 3143 for_each_net_rcu(tmp) { 3144 if (tmp == exit_net) 3145 continue; 3146 tn = tipc_net(tmp); 3147 if (!tn) 3148 continue; 3149 spin_lock_bh(&tn->node_list_lock); 3150 list_for_each_entry_rcu(n, &tn->node_list, list) { 3151 if (!n->peer_net) 3152 continue; 3153 if (n->peer_net != exit_net) 3154 continue; 3155 tipc_node_write_lock(n); 3156 n->peer_net = NULL; 3157 n->peer_hash_mix = 0; 3158 tipc_node_write_unlock_fast(n); 3159 break; 3160 } 3161 spin_unlock_bh(&tn->node_list_lock); 3162 } 3163 rcu_read_unlock(); 3164 } 3165