1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "node.h" 40 #include "name_distr.h" 41 #include "socket.h" 42 #include "bcast.h" 43 #include "monitor.h" 44 #include "discover.h" 45 #include "netlink.h" 46 #include "trace.h" 47 #include "crypto.h" 48 49 #define INVALID_NODE_SIG 0x10000 50 #define NODE_CLEANUP_AFTER 300000 51 52 /* Flags used to take different actions according to flag type 53 * TIPC_NOTIFY_NODE_DOWN: notify node is down 54 * TIPC_NOTIFY_NODE_UP: notify node is up 55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type 56 */ 57 enum { 58 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 59 TIPC_NOTIFY_NODE_UP = (1 << 4), 60 TIPC_NOTIFY_LINK_UP = (1 << 6), 61 TIPC_NOTIFY_LINK_DOWN = (1 << 7) 62 }; 63 64 struct tipc_link_entry { 65 struct tipc_link *link; 66 spinlock_t lock; /* per link */ 67 u32 mtu; 68 struct sk_buff_head inputq; 69 struct tipc_media_addr maddr; 70 }; 71 72 struct tipc_bclink_entry { 73 struct tipc_link *link; 74 struct sk_buff_head inputq1; 75 struct sk_buff_head arrvq; 76 struct sk_buff_head inputq2; 77 struct sk_buff_head namedq; 78 u16 named_rcv_nxt; 79 bool named_open; 80 }; 81 82 /** 83 * struct tipc_node - TIPC node structure 84 * @addr: network address of node 85 * @kref: reference counter to node object 86 * @lock: rwlock governing access to structure 87 * @net: the applicable net namespace 88 * @hash: links to adjacent nodes in unsorted hash chain 89 * @active_links: bearer ids of active links, used as index into links[] array 90 * @links: array containing references to all links to node 91 * @bc_entry: broadcast link entry 92 * @action_flags: bit mask of different types of node actions 93 * @state: connectivity state vs peer node 94 * @preliminary: a preliminary node or not 95 * @failover_sent: failover sent or not 96 * @sync_point: sequence number where synch/failover is finished 97 * @list: links to adjacent nodes in sorted list of cluster's nodes 98 * @working_links: number of working links to node (both active and standby) 99 * @link_cnt: number of links to node 100 * @capabilities: bitmap, indicating peer node's functional capabilities 101 * @signature: node instance identifier 102 * @link_id: local and remote bearer ids of changing link, if any 103 * @peer_id: 128-bit ID of peer 104 * @peer_id_string: ID string of peer 105 * @publ_list: list of publications 106 * @conn_sks: list of connections (FIXME) 107 * @timer: node's keepalive timer 108 * @keepalive_intv: keepalive interval in milliseconds 109 * @rcu: rcu struct for tipc_node 110 * @delete_at: indicates the time for deleting a down node 111 * @peer_net: peer's net namespace 112 * @peer_hash_mix: hash for this peer (FIXME) 113 * @crypto_rx: RX crypto handler 114 */ 115 struct tipc_node { 116 u32 addr; 117 struct kref kref; 118 rwlock_t lock; 119 struct net *net; 120 struct hlist_node hash; 121 int active_links[2]; 122 struct tipc_link_entry links[MAX_BEARERS]; 123 struct tipc_bclink_entry bc_entry; 124 int action_flags; 125 struct list_head list; 126 int state; 127 bool preliminary; 128 bool failover_sent; 129 u16 sync_point; 130 int link_cnt; 131 u16 working_links; 132 u16 capabilities; 133 u32 signature; 134 u32 link_id; 135 u8 peer_id[16]; 136 char peer_id_string[NODE_ID_STR_LEN]; 137 struct list_head publ_list; 138 struct list_head conn_sks; 139 unsigned long keepalive_intv; 140 struct timer_list timer; 141 struct rcu_head rcu; 142 unsigned long delete_at; 143 struct net *peer_net; 144 u32 peer_hash_mix; 145 #ifdef CONFIG_TIPC_CRYPTO 146 struct tipc_crypto *crypto_rx; 147 #endif 148 }; 149 150 /* Node FSM states and events: 151 */ 152 enum { 153 SELF_DOWN_PEER_DOWN = 0xdd, 154 SELF_UP_PEER_UP = 0xaa, 155 SELF_DOWN_PEER_LEAVING = 0xd1, 156 SELF_UP_PEER_COMING = 0xac, 157 SELF_COMING_PEER_UP = 0xca, 158 SELF_LEAVING_PEER_DOWN = 0x1d, 159 NODE_FAILINGOVER = 0xf0, 160 NODE_SYNCHING = 0xcc 161 }; 162 163 enum { 164 SELF_ESTABL_CONTACT_EVT = 0xece, 165 SELF_LOST_CONTACT_EVT = 0x1ce, 166 PEER_ESTABL_CONTACT_EVT = 0x9ece, 167 PEER_LOST_CONTACT_EVT = 0x91ce, 168 NODE_FAILOVER_BEGIN_EVT = 0xfbe, 169 NODE_FAILOVER_END_EVT = 0xfee, 170 NODE_SYNCH_BEGIN_EVT = 0xcbe, 171 NODE_SYNCH_END_EVT = 0xcee 172 }; 173 174 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 175 struct sk_buff_head *xmitq, 176 struct tipc_media_addr **maddr); 177 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, 178 bool delete); 179 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq); 180 static void tipc_node_delete(struct tipc_node *node); 181 static void tipc_node_timeout(struct timer_list *t); 182 static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 183 static struct tipc_node *tipc_node_find(struct net *net, u32 addr); 184 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); 185 static bool node_is_up(struct tipc_node *n); 186 static void tipc_node_delete_from_list(struct tipc_node *node); 187 188 struct tipc_sock_conn { 189 u32 port; 190 u32 peer_port; 191 u32 peer_node; 192 struct list_head list; 193 }; 194 195 static struct tipc_link *node_active_link(struct tipc_node *n, int sel) 196 { 197 int bearer_id = n->active_links[sel & 1]; 198 199 if (unlikely(bearer_id == INVALID_BEARER_ID)) 200 return NULL; 201 202 return n->links[bearer_id].link; 203 } 204 205 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected) 206 { 207 struct tipc_node *n; 208 int bearer_id; 209 unsigned int mtu = MAX_MSG_SIZE; 210 211 n = tipc_node_find(net, addr); 212 if (unlikely(!n)) 213 return mtu; 214 215 /* Allow MAX_MSG_SIZE when building connection oriented message 216 * if they are in the same core network 217 */ 218 if (n->peer_net && connected) { 219 tipc_node_put(n); 220 return mtu; 221 } 222 223 bearer_id = n->active_links[sel & 1]; 224 if (likely(bearer_id != INVALID_BEARER_ID)) 225 mtu = n->links[bearer_id].mtu; 226 tipc_node_put(n); 227 return mtu; 228 } 229 230 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id) 231 { 232 u8 *own_id = tipc_own_id(net); 233 struct tipc_node *n; 234 235 if (!own_id) 236 return true; 237 238 if (addr == tipc_own_addr(net)) { 239 memcpy(id, own_id, TIPC_NODEID_LEN); 240 return true; 241 } 242 n = tipc_node_find(net, addr); 243 if (!n) 244 return false; 245 246 memcpy(id, &n->peer_id, TIPC_NODEID_LEN); 247 tipc_node_put(n); 248 return true; 249 } 250 251 u16 tipc_node_get_capabilities(struct net *net, u32 addr) 252 { 253 struct tipc_node *n; 254 u16 caps; 255 256 n = tipc_node_find(net, addr); 257 if (unlikely(!n)) 258 return TIPC_NODE_CAPABILITIES; 259 caps = n->capabilities; 260 tipc_node_put(n); 261 return caps; 262 } 263 264 u32 tipc_node_get_addr(struct tipc_node *node) 265 { 266 return (node) ? node->addr : 0; 267 } 268 269 char *tipc_node_get_id_str(struct tipc_node *node) 270 { 271 return node->peer_id_string; 272 } 273 274 #ifdef CONFIG_TIPC_CRYPTO 275 /** 276 * tipc_node_crypto_rx - Retrieve crypto RX handle from node 277 * @__n: target tipc_node 278 * Note: node ref counter must be held first! 279 */ 280 struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n) 281 { 282 return (__n) ? __n->crypto_rx : NULL; 283 } 284 285 struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos) 286 { 287 return container_of(pos, struct tipc_node, list)->crypto_rx; 288 } 289 290 struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr) 291 { 292 struct tipc_node *n; 293 294 n = tipc_node_find(net, addr); 295 return (n) ? n->crypto_rx : NULL; 296 } 297 #endif 298 299 static void tipc_node_free(struct rcu_head *rp) 300 { 301 struct tipc_node *n = container_of(rp, struct tipc_node, rcu); 302 303 #ifdef CONFIG_TIPC_CRYPTO 304 tipc_crypto_stop(&n->crypto_rx); 305 #endif 306 kfree(n); 307 } 308 309 static void tipc_node_kref_release(struct kref *kref) 310 { 311 struct tipc_node *n = container_of(kref, struct tipc_node, kref); 312 313 kfree(n->bc_entry.link); 314 call_rcu(&n->rcu, tipc_node_free); 315 } 316 317 void tipc_node_put(struct tipc_node *node) 318 { 319 kref_put(&node->kref, tipc_node_kref_release); 320 } 321 322 void tipc_node_get(struct tipc_node *node) 323 { 324 kref_get(&node->kref); 325 } 326 327 /* 328 * tipc_node_find - locate specified node object, if it exists 329 */ 330 static struct tipc_node *tipc_node_find(struct net *net, u32 addr) 331 { 332 struct tipc_net *tn = tipc_net(net); 333 struct tipc_node *node; 334 unsigned int thash = tipc_hashfn(addr); 335 336 rcu_read_lock(); 337 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) { 338 if (node->addr != addr || node->preliminary) 339 continue; 340 if (!kref_get_unless_zero(&node->kref)) 341 node = NULL; 342 break; 343 } 344 rcu_read_unlock(); 345 return node; 346 } 347 348 /* tipc_node_find_by_id - locate specified node object by its 128-bit id 349 * Note: this function is called only when a discovery request failed 350 * to find the node by its 32-bit id, and is not time critical 351 */ 352 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id) 353 { 354 struct tipc_net *tn = tipc_net(net); 355 struct tipc_node *n; 356 bool found = false; 357 358 rcu_read_lock(); 359 list_for_each_entry_rcu(n, &tn->node_list, list) { 360 read_lock_bh(&n->lock); 361 if (!memcmp(id, n->peer_id, 16) && 362 kref_get_unless_zero(&n->kref)) 363 found = true; 364 read_unlock_bh(&n->lock); 365 if (found) 366 break; 367 } 368 rcu_read_unlock(); 369 return found ? n : NULL; 370 } 371 372 static void tipc_node_read_lock(struct tipc_node *n) 373 __acquires(n->lock) 374 { 375 read_lock_bh(&n->lock); 376 } 377 378 static void tipc_node_read_unlock(struct tipc_node *n) 379 __releases(n->lock) 380 { 381 read_unlock_bh(&n->lock); 382 } 383 384 static void tipc_node_write_lock(struct tipc_node *n) 385 __acquires(n->lock) 386 { 387 write_lock_bh(&n->lock); 388 } 389 390 static void tipc_node_write_unlock_fast(struct tipc_node *n) 391 __releases(n->lock) 392 { 393 write_unlock_bh(&n->lock); 394 } 395 396 static void tipc_node_write_unlock(struct tipc_node *n) 397 __releases(n->lock) 398 { 399 struct tipc_socket_addr sk; 400 struct net *net = n->net; 401 u32 flags = n->action_flags; 402 struct list_head *publ_list; 403 struct tipc_uaddr ua; 404 u32 bearer_id, node; 405 406 if (likely(!flags)) { 407 write_unlock_bh(&n->lock); 408 return; 409 } 410 411 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE, 412 TIPC_LINK_STATE, n->addr, n->addr); 413 sk.ref = n->link_id; 414 sk.node = tipc_own_addr(net); 415 node = n->addr; 416 bearer_id = n->link_id & 0xffff; 417 publ_list = &n->publ_list; 418 419 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | 420 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP); 421 422 write_unlock_bh(&n->lock); 423 424 if (flags & TIPC_NOTIFY_NODE_DOWN) 425 tipc_publ_notify(net, publ_list, node, n->capabilities); 426 427 if (flags & TIPC_NOTIFY_NODE_UP) 428 tipc_named_node_up(net, node, n->capabilities); 429 430 if (flags & TIPC_NOTIFY_LINK_UP) { 431 tipc_mon_peer_up(net, node, bearer_id); 432 tipc_nametbl_publish(net, &ua, &sk, sk.ref); 433 } 434 if (flags & TIPC_NOTIFY_LINK_DOWN) { 435 tipc_mon_peer_down(net, node, bearer_id); 436 tipc_nametbl_withdraw(net, &ua, &sk, sk.ref); 437 } 438 } 439 440 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes) 441 { 442 int net_id = tipc_netid(n->net); 443 struct tipc_net *tn_peer; 444 struct net *tmp; 445 u32 hash_chk; 446 447 if (n->peer_net) 448 return; 449 450 for_each_net_rcu(tmp) { 451 tn_peer = tipc_net(tmp); 452 if (!tn_peer) 453 continue; 454 /* Integrity checking whether node exists in namespace or not */ 455 if (tn_peer->net_id != net_id) 456 continue; 457 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN)) 458 continue; 459 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random); 460 if (hash_mixes ^ hash_chk) 461 continue; 462 n->peer_net = tmp; 463 n->peer_hash_mix = hash_mixes; 464 break; 465 } 466 } 467 468 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id, 469 u16 capabilities, u32 hash_mixes, 470 bool preliminary) 471 { 472 struct tipc_net *tn = net_generic(net, tipc_net_id); 473 struct tipc_link *l, *snd_l = tipc_bc_sndlink(net); 474 struct tipc_node *n, *temp_node; 475 unsigned long intv; 476 int bearer_id; 477 int i; 478 479 spin_lock_bh(&tn->node_list_lock); 480 n = tipc_node_find(net, addr) ?: 481 tipc_node_find_by_id(net, peer_id); 482 if (n) { 483 if (!n->preliminary) 484 goto update; 485 if (preliminary) 486 goto exit; 487 /* A preliminary node becomes "real" now, refresh its data */ 488 tipc_node_write_lock(n); 489 if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX, 490 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l), 491 n->capabilities, &n->bc_entry.inputq1, 492 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) { 493 pr_warn("Broadcast rcv link refresh failed, no memory\n"); 494 tipc_node_write_unlock_fast(n); 495 tipc_node_put(n); 496 n = NULL; 497 goto exit; 498 } 499 n->preliminary = false; 500 n->addr = addr; 501 hlist_del_rcu(&n->hash); 502 hlist_add_head_rcu(&n->hash, 503 &tn->node_htable[tipc_hashfn(addr)]); 504 list_del_rcu(&n->list); 505 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 506 if (n->addr < temp_node->addr) 507 break; 508 } 509 list_add_tail_rcu(&n->list, &temp_node->list); 510 tipc_node_write_unlock_fast(n); 511 512 update: 513 if (n->peer_hash_mix ^ hash_mixes) 514 tipc_node_assign_peer_net(n, hash_mixes); 515 if (n->capabilities == capabilities) 516 goto exit; 517 /* Same node may come back with new capabilities */ 518 tipc_node_write_lock(n); 519 n->capabilities = capabilities; 520 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 521 l = n->links[bearer_id].link; 522 if (l) 523 tipc_link_update_caps(l, capabilities); 524 } 525 tipc_node_write_unlock_fast(n); 526 527 /* Calculate cluster capabilities */ 528 tn->capabilities = TIPC_NODE_CAPABILITIES; 529 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 530 tn->capabilities &= temp_node->capabilities; 531 } 532 533 tipc_bcast_toggle_rcast(net, 534 (tn->capabilities & TIPC_BCAST_RCAST)); 535 536 goto exit; 537 } 538 n = kzalloc(sizeof(*n), GFP_ATOMIC); 539 if (!n) { 540 pr_warn("Node creation failed, no memory\n"); 541 goto exit; 542 } 543 tipc_nodeid2string(n->peer_id_string, peer_id); 544 #ifdef CONFIG_TIPC_CRYPTO 545 if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) { 546 pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string); 547 kfree(n); 548 n = NULL; 549 goto exit; 550 } 551 #endif 552 n->addr = addr; 553 n->preliminary = preliminary; 554 memcpy(&n->peer_id, peer_id, 16); 555 n->net = net; 556 n->peer_net = NULL; 557 n->peer_hash_mix = 0; 558 /* Assign kernel local namespace if exists */ 559 tipc_node_assign_peer_net(n, hash_mixes); 560 n->capabilities = capabilities; 561 kref_init(&n->kref); 562 rwlock_init(&n->lock); 563 INIT_HLIST_NODE(&n->hash); 564 INIT_LIST_HEAD(&n->list); 565 INIT_LIST_HEAD(&n->publ_list); 566 INIT_LIST_HEAD(&n->conn_sks); 567 skb_queue_head_init(&n->bc_entry.namedq); 568 skb_queue_head_init(&n->bc_entry.inputq1); 569 __skb_queue_head_init(&n->bc_entry.arrvq); 570 skb_queue_head_init(&n->bc_entry.inputq2); 571 for (i = 0; i < MAX_BEARERS; i++) 572 spin_lock_init(&n->links[i].lock); 573 n->state = SELF_DOWN_PEER_LEAVING; 574 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 575 n->signature = INVALID_NODE_SIG; 576 n->active_links[0] = INVALID_BEARER_ID; 577 n->active_links[1] = INVALID_BEARER_ID; 578 if (!preliminary && 579 !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX, 580 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l), 581 n->capabilities, &n->bc_entry.inputq1, 582 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) { 583 pr_warn("Broadcast rcv link creation failed, no memory\n"); 584 tipc_node_put(n); 585 n = NULL; 586 goto exit; 587 } 588 tipc_node_get(n); 589 timer_setup(&n->timer, tipc_node_timeout, 0); 590 /* Start a slow timer anyway, crypto needs it */ 591 n->keepalive_intv = 10000; 592 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 593 if (!mod_timer(&n->timer, intv)) 594 tipc_node_get(n); 595 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]); 596 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 597 if (n->addr < temp_node->addr) 598 break; 599 } 600 list_add_tail_rcu(&n->list, &temp_node->list); 601 /* Calculate cluster capabilities */ 602 tn->capabilities = TIPC_NODE_CAPABILITIES; 603 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 604 tn->capabilities &= temp_node->capabilities; 605 } 606 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST)); 607 trace_tipc_node_create(n, true, " "); 608 exit: 609 spin_unlock_bh(&tn->node_list_lock); 610 return n; 611 } 612 613 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 614 { 615 unsigned long tol = tipc_link_tolerance(l); 616 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 617 618 /* Link with lowest tolerance determines timer interval */ 619 if (intv < n->keepalive_intv) 620 n->keepalive_intv = intv; 621 622 /* Ensure link's abort limit corresponds to current tolerance */ 623 tipc_link_set_abort_limit(l, tol / n->keepalive_intv); 624 } 625 626 static void tipc_node_delete_from_list(struct tipc_node *node) 627 { 628 #ifdef CONFIG_TIPC_CRYPTO 629 tipc_crypto_key_flush(node->crypto_rx); 630 #endif 631 list_del_rcu(&node->list); 632 hlist_del_rcu(&node->hash); 633 tipc_node_put(node); 634 } 635 636 static void tipc_node_delete(struct tipc_node *node) 637 { 638 trace_tipc_node_delete(node, true, " "); 639 tipc_node_delete_from_list(node); 640 641 del_timer_sync(&node->timer); 642 tipc_node_put(node); 643 } 644 645 void tipc_node_stop(struct net *net) 646 { 647 struct tipc_net *tn = tipc_net(net); 648 struct tipc_node *node, *t_node; 649 650 spin_lock_bh(&tn->node_list_lock); 651 list_for_each_entry_safe(node, t_node, &tn->node_list, list) 652 tipc_node_delete(node); 653 spin_unlock_bh(&tn->node_list_lock); 654 } 655 656 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) 657 { 658 struct tipc_node *n; 659 660 if (in_own_node(net, addr)) 661 return; 662 663 n = tipc_node_find(net, addr); 664 if (!n) { 665 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr); 666 return; 667 } 668 tipc_node_write_lock(n); 669 list_add_tail(subscr, &n->publ_list); 670 tipc_node_write_unlock_fast(n); 671 tipc_node_put(n); 672 } 673 674 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) 675 { 676 struct tipc_node *n; 677 678 if (in_own_node(net, addr)) 679 return; 680 681 n = tipc_node_find(net, addr); 682 if (!n) { 683 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr); 684 return; 685 } 686 tipc_node_write_lock(n); 687 list_del_init(subscr); 688 tipc_node_write_unlock_fast(n); 689 tipc_node_put(n); 690 } 691 692 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 693 { 694 struct tipc_node *node; 695 struct tipc_sock_conn *conn; 696 int err = 0; 697 698 if (in_own_node(net, dnode)) 699 return 0; 700 701 node = tipc_node_find(net, dnode); 702 if (!node) { 703 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 704 return -EHOSTUNREACH; 705 } 706 conn = kmalloc(sizeof(*conn), GFP_ATOMIC); 707 if (!conn) { 708 err = -EHOSTUNREACH; 709 goto exit; 710 } 711 conn->peer_node = dnode; 712 conn->port = port; 713 conn->peer_port = peer_port; 714 715 tipc_node_write_lock(node); 716 list_add_tail(&conn->list, &node->conn_sks); 717 tipc_node_write_unlock(node); 718 exit: 719 tipc_node_put(node); 720 return err; 721 } 722 723 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) 724 { 725 struct tipc_node *node; 726 struct tipc_sock_conn *conn, *safe; 727 728 if (in_own_node(net, dnode)) 729 return; 730 731 node = tipc_node_find(net, dnode); 732 if (!node) 733 return; 734 735 tipc_node_write_lock(node); 736 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 737 if (port != conn->port) 738 continue; 739 list_del(&conn->list); 740 kfree(conn); 741 } 742 tipc_node_write_unlock(node); 743 tipc_node_put(node); 744 } 745 746 static void tipc_node_clear_links(struct tipc_node *node) 747 { 748 int i; 749 750 for (i = 0; i < MAX_BEARERS; i++) { 751 struct tipc_link_entry *le = &node->links[i]; 752 753 if (le->link) { 754 kfree(le->link); 755 le->link = NULL; 756 node->link_cnt--; 757 } 758 } 759 } 760 761 /* tipc_node_cleanup - delete nodes that does not 762 * have active links for NODE_CLEANUP_AFTER time 763 */ 764 static bool tipc_node_cleanup(struct tipc_node *peer) 765 { 766 struct tipc_node *temp_node; 767 struct tipc_net *tn = tipc_net(peer->net); 768 bool deleted = false; 769 770 /* If lock held by tipc_node_stop() the node will be deleted anyway */ 771 if (!spin_trylock_bh(&tn->node_list_lock)) 772 return false; 773 774 tipc_node_write_lock(peer); 775 776 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { 777 tipc_node_clear_links(peer); 778 tipc_node_delete_from_list(peer); 779 deleted = true; 780 } 781 tipc_node_write_unlock(peer); 782 783 if (!deleted) { 784 spin_unlock_bh(&tn->node_list_lock); 785 return deleted; 786 } 787 788 /* Calculate cluster capabilities */ 789 tn->capabilities = TIPC_NODE_CAPABILITIES; 790 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 791 tn->capabilities &= temp_node->capabilities; 792 } 793 tipc_bcast_toggle_rcast(peer->net, 794 (tn->capabilities & TIPC_BCAST_RCAST)); 795 spin_unlock_bh(&tn->node_list_lock); 796 return deleted; 797 } 798 799 /* tipc_node_timeout - handle expiration of node timer 800 */ 801 static void tipc_node_timeout(struct timer_list *t) 802 { 803 struct tipc_node *n = from_timer(n, t, timer); 804 struct tipc_link_entry *le; 805 struct sk_buff_head xmitq; 806 int remains = n->link_cnt; 807 int bearer_id; 808 int rc = 0; 809 810 trace_tipc_node_timeout(n, false, " "); 811 if (!node_is_up(n) && tipc_node_cleanup(n)) { 812 /*Removing the reference of Timer*/ 813 tipc_node_put(n); 814 return; 815 } 816 817 #ifdef CONFIG_TIPC_CRYPTO 818 /* Take any crypto key related actions first */ 819 tipc_crypto_timeout(n->crypto_rx); 820 #endif 821 __skb_queue_head_init(&xmitq); 822 823 /* Initial node interval to value larger (10 seconds), then it will be 824 * recalculated with link lowest tolerance 825 */ 826 tipc_node_read_lock(n); 827 n->keepalive_intv = 10000; 828 tipc_node_read_unlock(n); 829 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { 830 tipc_node_read_lock(n); 831 le = &n->links[bearer_id]; 832 if (le->link) { 833 spin_lock_bh(&le->lock); 834 /* Link tolerance may change asynchronously: */ 835 tipc_node_calculate_timer(n, le->link); 836 rc = tipc_link_timeout(le->link, &xmitq); 837 spin_unlock_bh(&le->lock); 838 remains--; 839 } 840 tipc_node_read_unlock(n); 841 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n); 842 if (rc & TIPC_LINK_DOWN_EVT) 843 tipc_node_link_down(n, bearer_id, false); 844 } 845 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv)); 846 } 847 848 /** 849 * __tipc_node_link_up - handle addition of link 850 * @n: target tipc_node 851 * @bearer_id: id of the bearer 852 * @xmitq: queue for messages to be xmited on 853 * Node lock must be held by caller 854 * Link becomes active (alone or shared) or standby, depending on its priority. 855 */ 856 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, 857 struct sk_buff_head *xmitq) 858 { 859 int *slot0 = &n->active_links[0]; 860 int *slot1 = &n->active_links[1]; 861 struct tipc_link *ol = node_active_link(n, 0); 862 struct tipc_link *nl = n->links[bearer_id].link; 863 864 if (!nl || tipc_link_is_up(nl)) 865 return; 866 867 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); 868 if (!tipc_link_is_up(nl)) 869 return; 870 871 n->working_links++; 872 n->action_flags |= TIPC_NOTIFY_LINK_UP; 873 n->link_id = tipc_link_id(nl); 874 875 /* Leave room for tunnel header when returning 'mtu' to users: */ 876 n->links[bearer_id].mtu = tipc_link_mss(nl); 877 878 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 879 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 880 881 pr_debug("Established link <%s> on network plane %c\n", 882 tipc_link_name(nl), tipc_link_plane(nl)); 883 trace_tipc_node_link_up(n, true, " "); 884 885 /* Ensure that a STATE message goes first */ 886 tipc_link_build_state_msg(nl, xmitq); 887 888 /* First link? => give it both slots */ 889 if (!ol) { 890 *slot0 = bearer_id; 891 *slot1 = bearer_id; 892 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 893 n->action_flags |= TIPC_NOTIFY_NODE_UP; 894 tipc_link_set_active(nl, true); 895 tipc_bcast_add_peer(n->net, nl, xmitq); 896 return; 897 } 898 899 /* Second link => redistribute slots */ 900 if (tipc_link_prio(nl) > tipc_link_prio(ol)) { 901 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol)); 902 *slot0 = bearer_id; 903 *slot1 = bearer_id; 904 tipc_link_set_active(nl, true); 905 tipc_link_set_active(ol, false); 906 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) { 907 tipc_link_set_active(nl, true); 908 *slot1 = bearer_id; 909 } else { 910 pr_debug("New link <%s> is standby\n", tipc_link_name(nl)); 911 } 912 913 /* Prepare synchronization with first link */ 914 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq); 915 } 916 917 /** 918 * tipc_node_link_up - handle addition of link 919 * @n: target tipc_node 920 * @bearer_id: id of the bearer 921 * @xmitq: queue for messages to be xmited on 922 * 923 * Link becomes active (alone or shared) or standby, depending on its priority. 924 */ 925 static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 926 struct sk_buff_head *xmitq) 927 { 928 struct tipc_media_addr *maddr; 929 930 tipc_node_write_lock(n); 931 __tipc_node_link_up(n, bearer_id, xmitq); 932 maddr = &n->links[bearer_id].maddr; 933 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n); 934 tipc_node_write_unlock(n); 935 } 936 937 /** 938 * tipc_node_link_failover() - start failover in case "half-failover" 939 * 940 * This function is only called in a very special situation where link 941 * failover can be already started on peer node but not on this node. 942 * This can happen when e.g.:: 943 * 944 * 1. Both links <1A-2A>, <1B-2B> down 945 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network 946 * disturbance, wrong session, etc.) 947 * 3. Link <1B-2B> up 948 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout) 949 * 5. Node 2 starts failover onto link <1B-2B> 950 * 951 * ==> Node 1 does never start link/node failover! 952 * 953 * @n: tipc node structure 954 * @l: link peer endpoint failingover (- can be NULL) 955 * @tnl: tunnel link 956 * @xmitq: queue for messages to be xmited on tnl link later 957 */ 958 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l, 959 struct tipc_link *tnl, 960 struct sk_buff_head *xmitq) 961 { 962 /* Avoid to be "self-failover" that can never end */ 963 if (!tipc_link_is_up(tnl)) 964 return; 965 966 /* Don't rush, failure link may be in the process of resetting */ 967 if (l && !tipc_link_is_reset(l)) 968 return; 969 970 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 971 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 972 973 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 974 tipc_link_failover_prepare(l, tnl, xmitq); 975 976 if (l) 977 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 978 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 979 } 980 981 /** 982 * __tipc_node_link_down - handle loss of link 983 * @n: target tipc_node 984 * @bearer_id: id of the bearer 985 * @xmitq: queue for messages to be xmited on 986 * @maddr: output media address of the bearer 987 */ 988 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 989 struct sk_buff_head *xmitq, 990 struct tipc_media_addr **maddr) 991 { 992 struct tipc_link_entry *le = &n->links[*bearer_id]; 993 int *slot0 = &n->active_links[0]; 994 int *slot1 = &n->active_links[1]; 995 int i, highest = 0, prio; 996 struct tipc_link *l, *_l, *tnl; 997 998 l = n->links[*bearer_id].link; 999 if (!l || tipc_link_is_reset(l)) 1000 return; 1001 1002 n->working_links--; 1003 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 1004 n->link_id = tipc_link_id(l); 1005 1006 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 1007 1008 pr_debug("Lost link <%s> on network plane %c\n", 1009 tipc_link_name(l), tipc_link_plane(l)); 1010 1011 /* Select new active link if any available */ 1012 *slot0 = INVALID_BEARER_ID; 1013 *slot1 = INVALID_BEARER_ID; 1014 for (i = 0; i < MAX_BEARERS; i++) { 1015 _l = n->links[i].link; 1016 if (!_l || !tipc_link_is_up(_l)) 1017 continue; 1018 if (_l == l) 1019 continue; 1020 prio = tipc_link_prio(_l); 1021 if (prio < highest) 1022 continue; 1023 if (prio > highest) { 1024 highest = prio; 1025 *slot0 = i; 1026 *slot1 = i; 1027 continue; 1028 } 1029 *slot1 = i; 1030 } 1031 1032 if (!node_is_up(n)) { 1033 if (tipc_link_peer_is_down(l)) 1034 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1035 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); 1036 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!"); 1037 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1038 tipc_link_reset(l); 1039 tipc_link_build_reset_msg(l, xmitq); 1040 *maddr = &n->links[*bearer_id].maddr; 1041 node_lost_contact(n, &le->inputq); 1042 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 1043 return; 1044 } 1045 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 1046 1047 /* There is still a working link => initiate failover */ 1048 *bearer_id = n->active_links[0]; 1049 tnl = n->links[*bearer_id].link; 1050 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 1051 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 1052 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 1053 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 1054 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!"); 1055 tipc_link_reset(l); 1056 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1057 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1058 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 1059 *maddr = &n->links[*bearer_id].maddr; 1060 } 1061 1062 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 1063 { 1064 struct tipc_link_entry *le = &n->links[bearer_id]; 1065 struct tipc_media_addr *maddr = NULL; 1066 struct tipc_link *l = le->link; 1067 int old_bearer_id = bearer_id; 1068 struct sk_buff_head xmitq; 1069 1070 if (!l) 1071 return; 1072 1073 __skb_queue_head_init(&xmitq); 1074 1075 tipc_node_write_lock(n); 1076 if (!tipc_link_is_establishing(l)) { 1077 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 1078 } else { 1079 /* Defuse pending tipc_node_link_up() */ 1080 tipc_link_reset(l); 1081 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1082 } 1083 if (delete) { 1084 kfree(l); 1085 le->link = NULL; 1086 n->link_cnt--; 1087 } 1088 trace_tipc_node_link_down(n, true, "node link down or deleted!"); 1089 tipc_node_write_unlock(n); 1090 if (delete) 1091 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); 1092 if (!skb_queue_empty(&xmitq)) 1093 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n); 1094 tipc_sk_rcv(n->net, &le->inputq); 1095 } 1096 1097 static bool node_is_up(struct tipc_node *n) 1098 { 1099 return n->active_links[0] != INVALID_BEARER_ID; 1100 } 1101 1102 bool tipc_node_is_up(struct net *net, u32 addr) 1103 { 1104 struct tipc_node *n; 1105 bool retval = false; 1106 1107 if (in_own_node(net, addr)) 1108 return true; 1109 1110 n = tipc_node_find(net, addr); 1111 if (!n) 1112 return false; 1113 retval = node_is_up(n); 1114 tipc_node_put(n); 1115 return retval; 1116 } 1117 1118 static u32 tipc_node_suggest_addr(struct net *net, u32 addr) 1119 { 1120 struct tipc_node *n; 1121 1122 addr ^= tipc_net(net)->random; 1123 while ((n = tipc_node_find(net, addr))) { 1124 tipc_node_put(n); 1125 addr++; 1126 } 1127 return addr; 1128 } 1129 1130 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not 1131 * Returns suggested address if any, otherwise 0 1132 */ 1133 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) 1134 { 1135 struct tipc_net *tn = tipc_net(net); 1136 struct tipc_node *n; 1137 bool preliminary; 1138 u32 sugg_addr; 1139 1140 /* Suggest new address if some other peer is using this one */ 1141 n = tipc_node_find(net, addr); 1142 if (n) { 1143 if (!memcmp(n->peer_id, id, NODE_ID_LEN)) 1144 addr = 0; 1145 tipc_node_put(n); 1146 if (!addr) 1147 return 0; 1148 return tipc_node_suggest_addr(net, addr); 1149 } 1150 1151 /* Suggest previously used address if peer is known */ 1152 n = tipc_node_find_by_id(net, id); 1153 if (n) { 1154 sugg_addr = n->addr; 1155 preliminary = n->preliminary; 1156 tipc_node_put(n); 1157 if (!preliminary) 1158 return sugg_addr; 1159 } 1160 1161 /* Even this node may be in conflict */ 1162 if (tn->trial_addr == addr) 1163 return tipc_node_suggest_addr(net, addr); 1164 1165 return 0; 1166 } 1167 1168 void tipc_node_check_dest(struct net *net, u32 addr, 1169 u8 *peer_id, struct tipc_bearer *b, 1170 u16 capabilities, u32 signature, u32 hash_mixes, 1171 struct tipc_media_addr *maddr, 1172 bool *respond, bool *dupl_addr) 1173 { 1174 struct tipc_node *n; 1175 struct tipc_link *l; 1176 struct tipc_link_entry *le; 1177 bool addr_match = false; 1178 bool sign_match = false; 1179 bool link_up = false; 1180 bool link_is_reset = false; 1181 bool accept_addr = false; 1182 bool reset = false; 1183 char *if_name; 1184 unsigned long intv; 1185 u16 session; 1186 1187 *dupl_addr = false; 1188 *respond = false; 1189 1190 n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes, 1191 false); 1192 if (!n) 1193 return; 1194 1195 tipc_node_write_lock(n); 1196 1197 le = &n->links[b->identity]; 1198 1199 /* Prepare to validate requesting node's signature and media address */ 1200 l = le->link; 1201 link_up = l && tipc_link_is_up(l); 1202 link_is_reset = l && tipc_link_is_reset(l); 1203 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); 1204 sign_match = (signature == n->signature); 1205 1206 /* These three flags give us eight permutations: */ 1207 1208 if (sign_match && addr_match && link_up) { 1209 /* All is fine. Ignore requests. */ 1210 /* Peer node is not a container/local namespace */ 1211 if (!n->peer_hash_mix) 1212 n->peer_hash_mix = hash_mixes; 1213 } else if (sign_match && addr_match && !link_up) { 1214 /* Respond. The link will come up in due time */ 1215 *respond = true; 1216 } else if (sign_match && !addr_match && link_up) { 1217 /* Peer has changed i/f address without rebooting. 1218 * If so, the link will reset soon, and the next 1219 * discovery will be accepted. So we can ignore it. 1220 * It may also be a cloned or malicious peer having 1221 * chosen the same node address and signature as an 1222 * existing one. 1223 * Ignore requests until the link goes down, if ever. 1224 */ 1225 *dupl_addr = true; 1226 } else if (sign_match && !addr_match && !link_up) { 1227 /* Peer link has changed i/f address without rebooting. 1228 * It may also be a cloned or malicious peer; we can't 1229 * distinguish between the two. 1230 * The signature is correct, so we must accept. 1231 */ 1232 accept_addr = true; 1233 *respond = true; 1234 reset = true; 1235 } else if (!sign_match && addr_match && link_up) { 1236 /* Peer node rebooted. Two possibilities: 1237 * - Delayed re-discovery; this link endpoint has already 1238 * reset and re-established contact with the peer, before 1239 * receiving a discovery message from that node. 1240 * (The peer happened to receive one from this node first). 1241 * - The peer came back so fast that our side has not 1242 * discovered it yet. Probing from this side will soon 1243 * reset the link, since there can be no working link 1244 * endpoint at the peer end, and the link will re-establish. 1245 * Accept the signature, since it comes from a known peer. 1246 */ 1247 n->signature = signature; 1248 } else if (!sign_match && addr_match && !link_up) { 1249 /* The peer node has rebooted. 1250 * Accept signature, since it is a known peer. 1251 */ 1252 n->signature = signature; 1253 *respond = true; 1254 } else if (!sign_match && !addr_match && link_up) { 1255 /* Peer rebooted with new address, or a new/duplicate peer. 1256 * Ignore until the link goes down, if ever. 1257 */ 1258 *dupl_addr = true; 1259 } else if (!sign_match && !addr_match && !link_up) { 1260 /* Peer rebooted with new address, or it is a new peer. 1261 * Accept signature and address. 1262 */ 1263 n->signature = signature; 1264 accept_addr = true; 1265 *respond = true; 1266 reset = true; 1267 } 1268 1269 if (!accept_addr) 1270 goto exit; 1271 1272 /* Now create new link if not already existing */ 1273 if (!l) { 1274 if (n->link_cnt == 2) 1275 goto exit; 1276 1277 if_name = strchr(b->name, ':') + 1; 1278 get_random_bytes(&session, sizeof(u16)); 1279 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 1280 b->net_plane, b->mtu, b->priority, 1281 b->min_win, b->max_win, session, 1282 tipc_own_addr(net), addr, peer_id, 1283 n->capabilities, 1284 tipc_bc_sndlink(n->net), n->bc_entry.link, 1285 &le->inputq, 1286 &n->bc_entry.namedq, &l)) { 1287 *respond = false; 1288 goto exit; 1289 } 1290 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!"); 1291 tipc_link_reset(l); 1292 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1293 if (n->state == NODE_FAILINGOVER) 1294 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1295 link_is_reset = tipc_link_is_reset(l); 1296 le->link = l; 1297 n->link_cnt++; 1298 tipc_node_calculate_timer(n, l); 1299 if (n->link_cnt == 1) { 1300 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 1301 if (!mod_timer(&n->timer, intv)) 1302 tipc_node_get(n); 1303 } 1304 } 1305 memcpy(&le->maddr, maddr, sizeof(*maddr)); 1306 exit: 1307 tipc_node_write_unlock(n); 1308 if (reset && !link_is_reset) 1309 tipc_node_link_down(n, b->identity, false); 1310 tipc_node_put(n); 1311 } 1312 1313 void tipc_node_delete_links(struct net *net, int bearer_id) 1314 { 1315 struct tipc_net *tn = net_generic(net, tipc_net_id); 1316 struct tipc_node *n; 1317 1318 rcu_read_lock(); 1319 list_for_each_entry_rcu(n, &tn->node_list, list) { 1320 tipc_node_link_down(n, bearer_id, true); 1321 } 1322 rcu_read_unlock(); 1323 } 1324 1325 static void tipc_node_reset_links(struct tipc_node *n) 1326 { 1327 int i; 1328 1329 pr_warn("Resetting all links to %x\n", n->addr); 1330 1331 trace_tipc_node_reset_links(n, true, " "); 1332 for (i = 0; i < MAX_BEARERS; i++) { 1333 tipc_node_link_down(n, i, false); 1334 } 1335 } 1336 1337 /* tipc_node_fsm_evt - node finite state machine 1338 * Determines when contact is allowed with peer node 1339 */ 1340 static void tipc_node_fsm_evt(struct tipc_node *n, int evt) 1341 { 1342 int state = n->state; 1343 1344 switch (state) { 1345 case SELF_DOWN_PEER_DOWN: 1346 switch (evt) { 1347 case SELF_ESTABL_CONTACT_EVT: 1348 state = SELF_UP_PEER_COMING; 1349 break; 1350 case PEER_ESTABL_CONTACT_EVT: 1351 state = SELF_COMING_PEER_UP; 1352 break; 1353 case SELF_LOST_CONTACT_EVT: 1354 case PEER_LOST_CONTACT_EVT: 1355 break; 1356 case NODE_SYNCH_END_EVT: 1357 case NODE_SYNCH_BEGIN_EVT: 1358 case NODE_FAILOVER_BEGIN_EVT: 1359 case NODE_FAILOVER_END_EVT: 1360 default: 1361 goto illegal_evt; 1362 } 1363 break; 1364 case SELF_UP_PEER_UP: 1365 switch (evt) { 1366 case SELF_LOST_CONTACT_EVT: 1367 state = SELF_DOWN_PEER_LEAVING; 1368 break; 1369 case PEER_LOST_CONTACT_EVT: 1370 state = SELF_LEAVING_PEER_DOWN; 1371 break; 1372 case NODE_SYNCH_BEGIN_EVT: 1373 state = NODE_SYNCHING; 1374 break; 1375 case NODE_FAILOVER_BEGIN_EVT: 1376 state = NODE_FAILINGOVER; 1377 break; 1378 case SELF_ESTABL_CONTACT_EVT: 1379 case PEER_ESTABL_CONTACT_EVT: 1380 case NODE_SYNCH_END_EVT: 1381 case NODE_FAILOVER_END_EVT: 1382 break; 1383 default: 1384 goto illegal_evt; 1385 } 1386 break; 1387 case SELF_DOWN_PEER_LEAVING: 1388 switch (evt) { 1389 case PEER_LOST_CONTACT_EVT: 1390 state = SELF_DOWN_PEER_DOWN; 1391 break; 1392 case SELF_ESTABL_CONTACT_EVT: 1393 case PEER_ESTABL_CONTACT_EVT: 1394 case SELF_LOST_CONTACT_EVT: 1395 break; 1396 case NODE_SYNCH_END_EVT: 1397 case NODE_SYNCH_BEGIN_EVT: 1398 case NODE_FAILOVER_BEGIN_EVT: 1399 case NODE_FAILOVER_END_EVT: 1400 default: 1401 goto illegal_evt; 1402 } 1403 break; 1404 case SELF_UP_PEER_COMING: 1405 switch (evt) { 1406 case PEER_ESTABL_CONTACT_EVT: 1407 state = SELF_UP_PEER_UP; 1408 break; 1409 case SELF_LOST_CONTACT_EVT: 1410 state = SELF_DOWN_PEER_DOWN; 1411 break; 1412 case SELF_ESTABL_CONTACT_EVT: 1413 case PEER_LOST_CONTACT_EVT: 1414 case NODE_SYNCH_END_EVT: 1415 case NODE_FAILOVER_BEGIN_EVT: 1416 break; 1417 case NODE_SYNCH_BEGIN_EVT: 1418 case NODE_FAILOVER_END_EVT: 1419 default: 1420 goto illegal_evt; 1421 } 1422 break; 1423 case SELF_COMING_PEER_UP: 1424 switch (evt) { 1425 case SELF_ESTABL_CONTACT_EVT: 1426 state = SELF_UP_PEER_UP; 1427 break; 1428 case PEER_LOST_CONTACT_EVT: 1429 state = SELF_DOWN_PEER_DOWN; 1430 break; 1431 case SELF_LOST_CONTACT_EVT: 1432 case PEER_ESTABL_CONTACT_EVT: 1433 break; 1434 case NODE_SYNCH_END_EVT: 1435 case NODE_SYNCH_BEGIN_EVT: 1436 case NODE_FAILOVER_BEGIN_EVT: 1437 case NODE_FAILOVER_END_EVT: 1438 default: 1439 goto illegal_evt; 1440 } 1441 break; 1442 case SELF_LEAVING_PEER_DOWN: 1443 switch (evt) { 1444 case SELF_LOST_CONTACT_EVT: 1445 state = SELF_DOWN_PEER_DOWN; 1446 break; 1447 case SELF_ESTABL_CONTACT_EVT: 1448 case PEER_ESTABL_CONTACT_EVT: 1449 case PEER_LOST_CONTACT_EVT: 1450 break; 1451 case NODE_SYNCH_END_EVT: 1452 case NODE_SYNCH_BEGIN_EVT: 1453 case NODE_FAILOVER_BEGIN_EVT: 1454 case NODE_FAILOVER_END_EVT: 1455 default: 1456 goto illegal_evt; 1457 } 1458 break; 1459 case NODE_FAILINGOVER: 1460 switch (evt) { 1461 case SELF_LOST_CONTACT_EVT: 1462 state = SELF_DOWN_PEER_LEAVING; 1463 break; 1464 case PEER_LOST_CONTACT_EVT: 1465 state = SELF_LEAVING_PEER_DOWN; 1466 break; 1467 case NODE_FAILOVER_END_EVT: 1468 state = SELF_UP_PEER_UP; 1469 break; 1470 case NODE_FAILOVER_BEGIN_EVT: 1471 case SELF_ESTABL_CONTACT_EVT: 1472 case PEER_ESTABL_CONTACT_EVT: 1473 break; 1474 case NODE_SYNCH_BEGIN_EVT: 1475 case NODE_SYNCH_END_EVT: 1476 default: 1477 goto illegal_evt; 1478 } 1479 break; 1480 case NODE_SYNCHING: 1481 switch (evt) { 1482 case SELF_LOST_CONTACT_EVT: 1483 state = SELF_DOWN_PEER_LEAVING; 1484 break; 1485 case PEER_LOST_CONTACT_EVT: 1486 state = SELF_LEAVING_PEER_DOWN; 1487 break; 1488 case NODE_SYNCH_END_EVT: 1489 state = SELF_UP_PEER_UP; 1490 break; 1491 case NODE_FAILOVER_BEGIN_EVT: 1492 state = NODE_FAILINGOVER; 1493 break; 1494 case NODE_SYNCH_BEGIN_EVT: 1495 case SELF_ESTABL_CONTACT_EVT: 1496 case PEER_ESTABL_CONTACT_EVT: 1497 break; 1498 case NODE_FAILOVER_END_EVT: 1499 default: 1500 goto illegal_evt; 1501 } 1502 break; 1503 default: 1504 pr_err("Unknown node fsm state %x\n", state); 1505 break; 1506 } 1507 trace_tipc_node_fsm(n->peer_id, n->state, state, evt); 1508 n->state = state; 1509 return; 1510 1511 illegal_evt: 1512 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1513 trace_tipc_node_fsm(n->peer_id, n->state, state, evt); 1514 } 1515 1516 static void node_lost_contact(struct tipc_node *n, 1517 struct sk_buff_head *inputq) 1518 { 1519 struct tipc_sock_conn *conn, *safe; 1520 struct tipc_link *l; 1521 struct list_head *conns = &n->conn_sks; 1522 struct sk_buff *skb; 1523 uint i; 1524 1525 pr_debug("Lost contact with %x\n", n->addr); 1526 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); 1527 trace_tipc_node_lost_contact(n, true, " "); 1528 1529 /* Clean up broadcast state */ 1530 tipc_bcast_remove_peer(n->net, n->bc_entry.link); 1531 skb_queue_purge(&n->bc_entry.namedq); 1532 1533 /* Abort any ongoing link failover */ 1534 for (i = 0; i < MAX_BEARERS; i++) { 1535 l = n->links[i].link; 1536 if (l) 1537 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); 1538 } 1539 1540 /* Notify publications from this node */ 1541 n->action_flags |= TIPC_NOTIFY_NODE_DOWN; 1542 n->peer_net = NULL; 1543 n->peer_hash_mix = 0; 1544 /* Notify sockets connected to node */ 1545 list_for_each_entry_safe(conn, safe, conns, list) { 1546 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 1547 SHORT_H_SIZE, 0, tipc_own_addr(n->net), 1548 conn->peer_node, conn->port, 1549 conn->peer_port, TIPC_ERR_NO_NODE); 1550 if (likely(skb)) 1551 skb_queue_tail(inputq, skb); 1552 list_del(&conn->list); 1553 kfree(conn); 1554 } 1555 } 1556 1557 /** 1558 * tipc_node_get_linkname - get the name of a link 1559 * 1560 * @net: the applicable net namespace 1561 * @bearer_id: id of the bearer 1562 * @addr: peer node address 1563 * @linkname: link name output buffer 1564 * @len: size of @linkname output buffer 1565 * 1566 * Return: 0 on success 1567 */ 1568 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, 1569 char *linkname, size_t len) 1570 { 1571 struct tipc_link *link; 1572 int err = -EINVAL; 1573 struct tipc_node *node = tipc_node_find(net, addr); 1574 1575 if (!node) 1576 return err; 1577 1578 if (bearer_id >= MAX_BEARERS) 1579 goto exit; 1580 1581 tipc_node_read_lock(node); 1582 link = node->links[bearer_id].link; 1583 if (link) { 1584 strncpy(linkname, tipc_link_name(link), len); 1585 err = 0; 1586 } 1587 tipc_node_read_unlock(node); 1588 exit: 1589 tipc_node_put(node); 1590 return err; 1591 } 1592 1593 /* Caller should hold node lock for the passed node */ 1594 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1595 { 1596 void *hdr; 1597 struct nlattr *attrs; 1598 1599 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1600 NLM_F_MULTI, TIPC_NL_NODE_GET); 1601 if (!hdr) 1602 return -EMSGSIZE; 1603 1604 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE); 1605 if (!attrs) 1606 goto msg_full; 1607 1608 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) 1609 goto attr_msg_full; 1610 if (node_is_up(node)) 1611 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) 1612 goto attr_msg_full; 1613 1614 nla_nest_end(msg->skb, attrs); 1615 genlmsg_end(msg->skb, hdr); 1616 1617 return 0; 1618 1619 attr_msg_full: 1620 nla_nest_cancel(msg->skb, attrs); 1621 msg_full: 1622 genlmsg_cancel(msg->skb, hdr); 1623 1624 return -EMSGSIZE; 1625 } 1626 1627 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list) 1628 { 1629 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 1630 struct sk_buff_head inputq; 1631 1632 switch (msg_user(hdr)) { 1633 case TIPC_LOW_IMPORTANCE: 1634 case TIPC_MEDIUM_IMPORTANCE: 1635 case TIPC_HIGH_IMPORTANCE: 1636 case TIPC_CRITICAL_IMPORTANCE: 1637 if (msg_connected(hdr) || msg_named(hdr) || 1638 msg_direct(hdr)) { 1639 tipc_loopback_trace(peer_net, list); 1640 spin_lock_init(&list->lock); 1641 tipc_sk_rcv(peer_net, list); 1642 return; 1643 } 1644 if (msg_mcast(hdr)) { 1645 tipc_loopback_trace(peer_net, list); 1646 skb_queue_head_init(&inputq); 1647 tipc_sk_mcast_rcv(peer_net, list, &inputq); 1648 __skb_queue_purge(list); 1649 skb_queue_purge(&inputq); 1650 return; 1651 } 1652 return; 1653 case MSG_FRAGMENTER: 1654 if (tipc_msg_assemble(list)) { 1655 tipc_loopback_trace(peer_net, list); 1656 skb_queue_head_init(&inputq); 1657 tipc_sk_mcast_rcv(peer_net, list, &inputq); 1658 __skb_queue_purge(list); 1659 skb_queue_purge(&inputq); 1660 } 1661 return; 1662 case GROUP_PROTOCOL: 1663 case CONN_MANAGER: 1664 tipc_loopback_trace(peer_net, list); 1665 spin_lock_init(&list->lock); 1666 tipc_sk_rcv(peer_net, list); 1667 return; 1668 case LINK_PROTOCOL: 1669 case NAME_DISTRIBUTOR: 1670 case TUNNEL_PROTOCOL: 1671 case BCAST_PROTOCOL: 1672 return; 1673 default: 1674 return; 1675 } 1676 } 1677 1678 /** 1679 * tipc_node_xmit() - general link level function for message sending 1680 * @net: the applicable net namespace 1681 * @list: chain of buffers containing message 1682 * @dnode: address of destination node 1683 * @selector: a number used for deterministic link selection 1684 * Consumes the buffer chain. 1685 * Return: 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF 1686 */ 1687 int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1688 u32 dnode, int selector) 1689 { 1690 struct tipc_link_entry *le = NULL; 1691 struct tipc_node *n; 1692 struct sk_buff_head xmitq; 1693 bool node_up = false; 1694 struct net *peer_net; 1695 int bearer_id; 1696 int rc; 1697 1698 if (in_own_node(net, dnode)) { 1699 tipc_loopback_trace(net, list); 1700 spin_lock_init(&list->lock); 1701 tipc_sk_rcv(net, list); 1702 return 0; 1703 } 1704 1705 n = tipc_node_find(net, dnode); 1706 if (unlikely(!n)) { 1707 __skb_queue_purge(list); 1708 return -EHOSTUNREACH; 1709 } 1710 1711 rcu_read_lock(); 1712 tipc_node_read_lock(n); 1713 node_up = node_is_up(n); 1714 peer_net = n->peer_net; 1715 tipc_node_read_unlock(n); 1716 if (node_up && peer_net && check_net(peer_net)) { 1717 /* xmit inner linux container */ 1718 tipc_lxc_xmit(peer_net, list); 1719 if (likely(skb_queue_empty(list))) { 1720 rcu_read_unlock(); 1721 tipc_node_put(n); 1722 return 0; 1723 } 1724 } 1725 rcu_read_unlock(); 1726 1727 tipc_node_read_lock(n); 1728 bearer_id = n->active_links[selector & 1]; 1729 if (unlikely(bearer_id == INVALID_BEARER_ID)) { 1730 tipc_node_read_unlock(n); 1731 tipc_node_put(n); 1732 __skb_queue_purge(list); 1733 return -EHOSTUNREACH; 1734 } 1735 1736 __skb_queue_head_init(&xmitq); 1737 le = &n->links[bearer_id]; 1738 spin_lock_bh(&le->lock); 1739 rc = tipc_link_xmit(le->link, list, &xmitq); 1740 spin_unlock_bh(&le->lock); 1741 tipc_node_read_unlock(n); 1742 1743 if (unlikely(rc == -ENOBUFS)) 1744 tipc_node_link_down(n, bearer_id, false); 1745 else 1746 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n); 1747 1748 tipc_node_put(n); 1749 1750 return rc; 1751 } 1752 1753 /* tipc_node_xmit_skb(): send single buffer to destination 1754 * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE 1755 * messages, which will not be rejected 1756 * The only exception is datagram messages rerouted after secondary 1757 * lookup, which are rare and safe to dispose of anyway. 1758 */ 1759 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 1760 u32 selector) 1761 { 1762 struct sk_buff_head head; 1763 1764 __skb_queue_head_init(&head); 1765 __skb_queue_tail(&head, skb); 1766 tipc_node_xmit(net, &head, dnode, selector); 1767 return 0; 1768 } 1769 1770 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations 1771 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected 1772 */ 1773 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) 1774 { 1775 struct sk_buff *skb; 1776 u32 selector, dnode; 1777 1778 while ((skb = __skb_dequeue(xmitq))) { 1779 selector = msg_origport(buf_msg(skb)); 1780 dnode = msg_destnode(buf_msg(skb)); 1781 tipc_node_xmit_skb(net, skb, dnode, selector); 1782 } 1783 return 0; 1784 } 1785 1786 void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) 1787 { 1788 struct sk_buff_head xmitq; 1789 struct sk_buff *txskb; 1790 struct tipc_node *n; 1791 u16 dummy; 1792 u32 dst; 1793 1794 /* Use broadcast if all nodes support it */ 1795 if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { 1796 __skb_queue_head_init(&xmitq); 1797 __skb_queue_tail(&xmitq, skb); 1798 tipc_bcast_xmit(net, &xmitq, &dummy); 1799 return; 1800 } 1801 1802 /* Otherwise use legacy replicast method */ 1803 rcu_read_lock(); 1804 list_for_each_entry_rcu(n, tipc_nodes(net), list) { 1805 dst = n->addr; 1806 if (in_own_node(net, dst)) 1807 continue; 1808 if (!node_is_up(n)) 1809 continue; 1810 txskb = pskb_copy(skb, GFP_ATOMIC); 1811 if (!txskb) 1812 break; 1813 msg_set_destnode(buf_msg(txskb), dst); 1814 tipc_node_xmit_skb(net, txskb, dst, 0); 1815 } 1816 rcu_read_unlock(); 1817 kfree_skb(skb); 1818 } 1819 1820 static void tipc_node_mcast_rcv(struct tipc_node *n) 1821 { 1822 struct tipc_bclink_entry *be = &n->bc_entry; 1823 1824 /* 'arrvq' is under inputq2's lock protection */ 1825 spin_lock_bh(&be->inputq2.lock); 1826 spin_lock_bh(&be->inputq1.lock); 1827 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); 1828 spin_unlock_bh(&be->inputq1.lock); 1829 spin_unlock_bh(&be->inputq2.lock); 1830 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2); 1831 } 1832 1833 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, 1834 int bearer_id, struct sk_buff_head *xmitq) 1835 { 1836 struct tipc_link *ucl; 1837 int rc; 1838 1839 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq); 1840 1841 if (rc & TIPC_LINK_DOWN_EVT) { 1842 tipc_node_reset_links(n); 1843 return; 1844 } 1845 1846 if (!(rc & TIPC_LINK_SND_STATE)) 1847 return; 1848 1849 /* If probe message, a STATE response will be sent anyway */ 1850 if (msg_probe(hdr)) 1851 return; 1852 1853 /* Produce a STATE message carrying broadcast NACK */ 1854 tipc_node_read_lock(n); 1855 ucl = n->links[bearer_id].link; 1856 if (ucl) 1857 tipc_link_build_state_msg(ucl, xmitq); 1858 tipc_node_read_unlock(n); 1859 } 1860 1861 /** 1862 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1863 * @net: the applicable net namespace 1864 * @skb: TIPC packet 1865 * @bearer_id: id of bearer message arrived on 1866 * 1867 * Invoked with no locks held. 1868 */ 1869 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id) 1870 { 1871 int rc; 1872 struct sk_buff_head xmitq; 1873 struct tipc_bclink_entry *be; 1874 struct tipc_link_entry *le; 1875 struct tipc_msg *hdr = buf_msg(skb); 1876 int usr = msg_user(hdr); 1877 u32 dnode = msg_destnode(hdr); 1878 struct tipc_node *n; 1879 1880 __skb_queue_head_init(&xmitq); 1881 1882 /* If NACK for other node, let rcv link for that node peek into it */ 1883 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net))) 1884 n = tipc_node_find(net, dnode); 1885 else 1886 n = tipc_node_find(net, msg_prevnode(hdr)); 1887 if (!n) { 1888 kfree_skb(skb); 1889 return; 1890 } 1891 be = &n->bc_entry; 1892 le = &n->links[bearer_id]; 1893 1894 rc = tipc_bcast_rcv(net, be->link, skb); 1895 1896 /* Broadcast ACKs are sent on a unicast link */ 1897 if (rc & TIPC_LINK_SND_STATE) { 1898 tipc_node_read_lock(n); 1899 tipc_link_build_state_msg(le->link, &xmitq); 1900 tipc_node_read_unlock(n); 1901 } 1902 1903 if (!skb_queue_empty(&xmitq)) 1904 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n); 1905 1906 if (!skb_queue_empty(&be->inputq1)) 1907 tipc_node_mcast_rcv(n); 1908 1909 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ 1910 if (!skb_queue_empty(&n->bc_entry.namedq)) 1911 tipc_named_rcv(net, &n->bc_entry.namedq, 1912 &n->bc_entry.named_rcv_nxt, 1913 &n->bc_entry.named_open); 1914 1915 /* If reassembly or retransmission failure => reset all links to peer */ 1916 if (rc & TIPC_LINK_DOWN_EVT) 1917 tipc_node_reset_links(n); 1918 1919 tipc_node_put(n); 1920 } 1921 1922 /** 1923 * tipc_node_check_state - check and if necessary update node state 1924 * @n: target tipc_node 1925 * @skb: TIPC packet 1926 * @bearer_id: identity of bearer delivering the packet 1927 * @xmitq: queue for messages to be xmited on 1928 * Return: true if state and msg are ok, otherwise false 1929 */ 1930 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, 1931 int bearer_id, struct sk_buff_head *xmitq) 1932 { 1933 struct tipc_msg *hdr = buf_msg(skb); 1934 int usr = msg_user(hdr); 1935 int mtyp = msg_type(hdr); 1936 u16 oseqno = msg_seqno(hdr); 1937 u16 exp_pkts = msg_msgcnt(hdr); 1938 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len; 1939 int state = n->state; 1940 struct tipc_link *l, *tnl, *pl = NULL; 1941 struct tipc_media_addr *maddr; 1942 int pb_id; 1943 1944 if (trace_tipc_node_check_state_enabled()) { 1945 trace_tipc_skb_dump(skb, false, "skb for node state check"); 1946 trace_tipc_node_check_state(n, true, " "); 1947 } 1948 l = n->links[bearer_id].link; 1949 if (!l) 1950 return false; 1951 rcv_nxt = tipc_link_rcv_nxt(l); 1952 1953 1954 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1955 return true; 1956 1957 /* Find parallel link, if any */ 1958 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) { 1959 if ((pb_id != bearer_id) && n->links[pb_id].link) { 1960 pl = n->links[pb_id].link; 1961 break; 1962 } 1963 } 1964 1965 if (!tipc_link_validate_msg(l, hdr)) { 1966 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!"); 1967 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!"); 1968 return false; 1969 } 1970 1971 /* Check and update node accesibility if applicable */ 1972 if (state == SELF_UP_PEER_COMING) { 1973 if (!tipc_link_is_up(l)) 1974 return true; 1975 if (!msg_peer_link_is_up(hdr)) 1976 return true; 1977 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT); 1978 } 1979 1980 if (state == SELF_DOWN_PEER_LEAVING) { 1981 if (msg_peer_node_is_up(hdr)) 1982 return false; 1983 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1984 return true; 1985 } 1986 1987 if (state == SELF_LEAVING_PEER_DOWN) 1988 return false; 1989 1990 /* Ignore duplicate packets */ 1991 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1992 return true; 1993 1994 /* Initiate or update failover mode if applicable */ 1995 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1996 syncpt = oseqno + exp_pkts - 1; 1997 if (pl && !tipc_link_is_reset(pl)) { 1998 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1999 trace_tipc_node_link_down(n, true, 2000 "node link down <- failover!"); 2001 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 2002 tipc_link_inputq(l)); 2003 } 2004 2005 /* If parallel link was already down, and this happened before 2006 * the tunnel link came up, node failover was never started. 2007 * Ensure that a FAILOVER_MSG is sent to get peer out of 2008 * NODE_FAILINGOVER state, also this node must accept 2009 * TUNNEL_MSGs from peer. 2010 */ 2011 if (n->state != NODE_FAILINGOVER) 2012 tipc_node_link_failover(n, pl, l, xmitq); 2013 2014 /* If pkts arrive out of order, use lowest calculated syncpt */ 2015 if (less(syncpt, n->sync_point)) 2016 n->sync_point = syncpt; 2017 } 2018 2019 /* Open parallel link when tunnel link reaches synch point */ 2020 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { 2021 if (!more(rcv_nxt, n->sync_point)) 2022 return true; 2023 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); 2024 if (pl) 2025 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); 2026 return true; 2027 } 2028 2029 /* No syncing needed if only one link */ 2030 if (!pl || !tipc_link_is_up(pl)) 2031 return true; 2032 2033 /* Initiate synch mode if applicable */ 2034 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 2035 if (n->capabilities & TIPC_TUNNEL_ENHANCED) 2036 syncpt = msg_syncpt(hdr); 2037 else 2038 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1; 2039 if (!tipc_link_is_up(l)) 2040 __tipc_node_link_up(n, bearer_id, xmitq); 2041 if (n->state == SELF_UP_PEER_UP) { 2042 n->sync_point = syncpt; 2043 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 2044 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 2045 } 2046 } 2047 2048 /* Open tunnel link when parallel link reaches synch point */ 2049 if (n->state == NODE_SYNCHING) { 2050 if (tipc_link_is_synching(l)) { 2051 tnl = l; 2052 } else { 2053 tnl = pl; 2054 pl = l; 2055 } 2056 inputq_len = skb_queue_len(tipc_link_inputq(pl)); 2057 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len; 2058 if (more(dlv_nxt, n->sync_point)) { 2059 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 2060 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 2061 return true; 2062 } 2063 if (l == pl) 2064 return true; 2065 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) 2066 return true; 2067 if (usr == LINK_PROTOCOL) 2068 return true; 2069 return false; 2070 } 2071 return true; 2072 } 2073 2074 /** 2075 * tipc_rcv - process TIPC packets/messages arriving from off-node 2076 * @net: the applicable net namespace 2077 * @skb: TIPC packet 2078 * @b: pointer to bearer message arrived on 2079 * 2080 * Invoked with no locks held. Bearer pointer must point to a valid bearer 2081 * structure (i.e. cannot be NULL), but bearer can be inactive. 2082 */ 2083 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) 2084 { 2085 struct sk_buff_head xmitq; 2086 struct tipc_link_entry *le; 2087 struct tipc_msg *hdr; 2088 struct tipc_node *n; 2089 int bearer_id = b->identity; 2090 u32 self = tipc_own_addr(net); 2091 int usr, rc = 0; 2092 u16 bc_ack; 2093 #ifdef CONFIG_TIPC_CRYPTO 2094 struct tipc_ehdr *ehdr; 2095 2096 /* Check if message must be decrypted first */ 2097 if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb)) 2098 goto rcv; 2099 2100 ehdr = (struct tipc_ehdr *)skb->data; 2101 if (likely(ehdr->user != LINK_CONFIG)) { 2102 n = tipc_node_find(net, ntohl(ehdr->addr)); 2103 if (unlikely(!n)) 2104 goto discard; 2105 } else { 2106 n = tipc_node_find_by_id(net, ehdr->id); 2107 } 2108 skb_dst_force(skb); 2109 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b); 2110 if (!skb) 2111 return; 2112 2113 rcv: 2114 #endif 2115 /* Ensure message is well-formed before touching the header */ 2116 if (unlikely(!tipc_msg_validate(&skb))) 2117 goto discard; 2118 __skb_queue_head_init(&xmitq); 2119 hdr = buf_msg(skb); 2120 usr = msg_user(hdr); 2121 bc_ack = msg_bcast_ack(hdr); 2122 2123 /* Handle arrival of discovery or broadcast packet */ 2124 if (unlikely(msg_non_seq(hdr))) { 2125 if (unlikely(usr == LINK_CONFIG)) 2126 return tipc_disc_rcv(net, skb, b); 2127 else 2128 return tipc_node_bc_rcv(net, skb, bearer_id); 2129 } 2130 2131 /* Discard unicast link messages destined for another node */ 2132 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self))) 2133 goto discard; 2134 2135 /* Locate neighboring node that sent packet */ 2136 n = tipc_node_find(net, msg_prevnode(hdr)); 2137 if (unlikely(!n)) 2138 goto discard; 2139 le = &n->links[bearer_id]; 2140 2141 /* Ensure broadcast reception is in synch with peer's send state */ 2142 if (unlikely(usr == LINK_PROTOCOL)) { 2143 if (unlikely(skb_linearize(skb))) { 2144 tipc_node_put(n); 2145 goto discard; 2146 } 2147 hdr = buf_msg(skb); 2148 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); 2149 } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) { 2150 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr); 2151 } 2152 2153 /* Receive packet directly if conditions permit */ 2154 tipc_node_read_lock(n); 2155 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) { 2156 spin_lock_bh(&le->lock); 2157 if (le->link) { 2158 rc = tipc_link_rcv(le->link, skb, &xmitq); 2159 skb = NULL; 2160 } 2161 spin_unlock_bh(&le->lock); 2162 } 2163 tipc_node_read_unlock(n); 2164 2165 /* Check/update node state before receiving */ 2166 if (unlikely(skb)) { 2167 if (unlikely(skb_linearize(skb))) 2168 goto out_node_put; 2169 tipc_node_write_lock(n); 2170 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { 2171 if (le->link) { 2172 rc = tipc_link_rcv(le->link, skb, &xmitq); 2173 skb = NULL; 2174 } 2175 } 2176 tipc_node_write_unlock(n); 2177 } 2178 2179 if (unlikely(rc & TIPC_LINK_UP_EVT)) 2180 tipc_node_link_up(n, bearer_id, &xmitq); 2181 2182 if (unlikely(rc & TIPC_LINK_DOWN_EVT)) 2183 tipc_node_link_down(n, bearer_id, false); 2184 2185 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) 2186 tipc_named_rcv(net, &n->bc_entry.namedq, 2187 &n->bc_entry.named_rcv_nxt, 2188 &n->bc_entry.named_open); 2189 2190 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) 2191 tipc_node_mcast_rcv(n); 2192 2193 if (!skb_queue_empty(&le->inputq)) 2194 tipc_sk_rcv(net, &le->inputq); 2195 2196 if (!skb_queue_empty(&xmitq)) 2197 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n); 2198 2199 out_node_put: 2200 tipc_node_put(n); 2201 discard: 2202 kfree_skb(skb); 2203 } 2204 2205 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b, 2206 int prop) 2207 { 2208 struct tipc_net *tn = tipc_net(net); 2209 int bearer_id = b->identity; 2210 struct sk_buff_head xmitq; 2211 struct tipc_link_entry *e; 2212 struct tipc_node *n; 2213 2214 __skb_queue_head_init(&xmitq); 2215 2216 rcu_read_lock(); 2217 2218 list_for_each_entry_rcu(n, &tn->node_list, list) { 2219 tipc_node_write_lock(n); 2220 e = &n->links[bearer_id]; 2221 if (e->link) { 2222 if (prop == TIPC_NLA_PROP_TOL) 2223 tipc_link_set_tolerance(e->link, b->tolerance, 2224 &xmitq); 2225 else if (prop == TIPC_NLA_PROP_MTU) 2226 tipc_link_set_mtu(e->link, b->mtu); 2227 2228 /* Update MTU for node link entry */ 2229 e->mtu = tipc_link_mss(e->link); 2230 } 2231 2232 tipc_node_write_unlock(n); 2233 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL); 2234 } 2235 2236 rcu_read_unlock(); 2237 } 2238 2239 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) 2240 { 2241 struct net *net = sock_net(skb->sk); 2242 struct tipc_net *tn = net_generic(net, tipc_net_id); 2243 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; 2244 struct tipc_node *peer, *temp_node; 2245 u8 node_id[NODE_ID_LEN]; 2246 u64 *w0 = (u64 *)&node_id[0]; 2247 u64 *w1 = (u64 *)&node_id[8]; 2248 u32 addr; 2249 int err; 2250 2251 /* We identify the peer by its net */ 2252 if (!info->attrs[TIPC_NLA_NET]) 2253 return -EINVAL; 2254 2255 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX, 2256 info->attrs[TIPC_NLA_NET], 2257 tipc_nl_net_policy, info->extack); 2258 if (err) 2259 return err; 2260 2261 /* attrs[TIPC_NLA_NET_NODEID] and attrs[TIPC_NLA_NET_ADDR] are 2262 * mutually exclusive cases 2263 */ 2264 if (attrs[TIPC_NLA_NET_ADDR]) { 2265 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); 2266 if (!addr) 2267 return -EINVAL; 2268 } 2269 2270 if (attrs[TIPC_NLA_NET_NODEID]) { 2271 if (!attrs[TIPC_NLA_NET_NODEID_W1]) 2272 return -EINVAL; 2273 *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]); 2274 *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]); 2275 addr = hash128to32(node_id); 2276 } 2277 2278 if (in_own_node(net, addr)) 2279 return -ENOTSUPP; 2280 2281 spin_lock_bh(&tn->node_list_lock); 2282 peer = tipc_node_find(net, addr); 2283 if (!peer) { 2284 spin_unlock_bh(&tn->node_list_lock); 2285 return -ENXIO; 2286 } 2287 2288 tipc_node_write_lock(peer); 2289 if (peer->state != SELF_DOWN_PEER_DOWN && 2290 peer->state != SELF_DOWN_PEER_LEAVING) { 2291 tipc_node_write_unlock(peer); 2292 err = -EBUSY; 2293 goto err_out; 2294 } 2295 2296 tipc_node_clear_links(peer); 2297 tipc_node_write_unlock(peer); 2298 tipc_node_delete(peer); 2299 2300 /* Calculate cluster capabilities */ 2301 tn->capabilities = TIPC_NODE_CAPABILITIES; 2302 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 2303 tn->capabilities &= temp_node->capabilities; 2304 } 2305 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST)); 2306 err = 0; 2307 err_out: 2308 tipc_node_put(peer); 2309 spin_unlock_bh(&tn->node_list_lock); 2310 2311 return err; 2312 } 2313 2314 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 2315 { 2316 int err; 2317 struct net *net = sock_net(skb->sk); 2318 struct tipc_net *tn = net_generic(net, tipc_net_id); 2319 int done = cb->args[0]; 2320 int last_addr = cb->args[1]; 2321 struct tipc_node *node; 2322 struct tipc_nl_msg msg; 2323 2324 if (done) 2325 return 0; 2326 2327 msg.skb = skb; 2328 msg.portid = NETLINK_CB(cb->skb).portid; 2329 msg.seq = cb->nlh->nlmsg_seq; 2330 2331 rcu_read_lock(); 2332 if (last_addr) { 2333 node = tipc_node_find(net, last_addr); 2334 if (!node) { 2335 rcu_read_unlock(); 2336 /* We never set seq or call nl_dump_check_consistent() 2337 * this means that setting prev_seq here will cause the 2338 * consistence check to fail in the netlink callback 2339 * handler. Resulting in the NLMSG_DONE message having 2340 * the NLM_F_DUMP_INTR flag set if the node state 2341 * changed while we released the lock. 2342 */ 2343 cb->prev_seq = 1; 2344 return -EPIPE; 2345 } 2346 tipc_node_put(node); 2347 } 2348 2349 list_for_each_entry_rcu(node, &tn->node_list, list) { 2350 if (node->preliminary) 2351 continue; 2352 if (last_addr) { 2353 if (node->addr == last_addr) 2354 last_addr = 0; 2355 else 2356 continue; 2357 } 2358 2359 tipc_node_read_lock(node); 2360 err = __tipc_nl_add_node(&msg, node); 2361 if (err) { 2362 last_addr = node->addr; 2363 tipc_node_read_unlock(node); 2364 goto out; 2365 } 2366 2367 tipc_node_read_unlock(node); 2368 } 2369 done = 1; 2370 out: 2371 cb->args[0] = done; 2372 cb->args[1] = last_addr; 2373 rcu_read_unlock(); 2374 2375 return skb->len; 2376 } 2377 2378 /* tipc_node_find_by_name - locate owner node of link by link's name 2379 * @net: the applicable net namespace 2380 * @name: pointer to link name string 2381 * @bearer_id: pointer to index in 'node->links' array where the link was found. 2382 * 2383 * Returns pointer to node owning the link, or 0 if no matching link is found. 2384 */ 2385 static struct tipc_node *tipc_node_find_by_name(struct net *net, 2386 const char *link_name, 2387 unsigned int *bearer_id) 2388 { 2389 struct tipc_net *tn = net_generic(net, tipc_net_id); 2390 struct tipc_link *l; 2391 struct tipc_node *n; 2392 struct tipc_node *found_node = NULL; 2393 int i; 2394 2395 *bearer_id = 0; 2396 rcu_read_lock(); 2397 list_for_each_entry_rcu(n, &tn->node_list, list) { 2398 tipc_node_read_lock(n); 2399 for (i = 0; i < MAX_BEARERS; i++) { 2400 l = n->links[i].link; 2401 if (l && !strcmp(tipc_link_name(l), link_name)) { 2402 *bearer_id = i; 2403 found_node = n; 2404 break; 2405 } 2406 } 2407 tipc_node_read_unlock(n); 2408 if (found_node) 2409 break; 2410 } 2411 rcu_read_unlock(); 2412 2413 return found_node; 2414 } 2415 2416 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) 2417 { 2418 int err; 2419 int res = 0; 2420 int bearer_id; 2421 char *name; 2422 struct tipc_link *link; 2423 struct tipc_node *node; 2424 struct sk_buff_head xmitq; 2425 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2426 struct net *net = sock_net(skb->sk); 2427 2428 __skb_queue_head_init(&xmitq); 2429 2430 if (!info->attrs[TIPC_NLA_LINK]) 2431 return -EINVAL; 2432 2433 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2434 info->attrs[TIPC_NLA_LINK], 2435 tipc_nl_link_policy, info->extack); 2436 if (err) 2437 return err; 2438 2439 if (!attrs[TIPC_NLA_LINK_NAME]) 2440 return -EINVAL; 2441 2442 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2443 2444 if (strcmp(name, tipc_bclink_name) == 0) 2445 return tipc_nl_bc_link_set(net, attrs); 2446 2447 node = tipc_node_find_by_name(net, name, &bearer_id); 2448 if (!node) 2449 return -EINVAL; 2450 2451 tipc_node_read_lock(node); 2452 2453 link = node->links[bearer_id].link; 2454 if (!link) { 2455 res = -EINVAL; 2456 goto out; 2457 } 2458 2459 if (attrs[TIPC_NLA_LINK_PROP]) { 2460 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 2461 2462 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props); 2463 if (err) { 2464 res = err; 2465 goto out; 2466 } 2467 2468 if (props[TIPC_NLA_PROP_TOL]) { 2469 u32 tol; 2470 2471 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 2472 tipc_link_set_tolerance(link, tol, &xmitq); 2473 } 2474 if (props[TIPC_NLA_PROP_PRIO]) { 2475 u32 prio; 2476 2477 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 2478 tipc_link_set_prio(link, prio, &xmitq); 2479 } 2480 if (props[TIPC_NLA_PROP_WIN]) { 2481 u32 max_win; 2482 2483 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 2484 tipc_link_set_queue_limits(link, 2485 tipc_link_min_win(link), 2486 max_win); 2487 } 2488 } 2489 2490 out: 2491 tipc_node_read_unlock(node); 2492 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr, 2493 NULL); 2494 return res; 2495 } 2496 2497 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) 2498 { 2499 struct net *net = genl_info_net(info); 2500 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2501 struct tipc_nl_msg msg; 2502 char *name; 2503 int err; 2504 2505 msg.portid = info->snd_portid; 2506 msg.seq = info->snd_seq; 2507 2508 if (!info->attrs[TIPC_NLA_LINK]) 2509 return -EINVAL; 2510 2511 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2512 info->attrs[TIPC_NLA_LINK], 2513 tipc_nl_link_policy, info->extack); 2514 if (err) 2515 return err; 2516 2517 if (!attrs[TIPC_NLA_LINK_NAME]) 2518 return -EINVAL; 2519 2520 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2521 2522 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2523 if (!msg.skb) 2524 return -ENOMEM; 2525 2526 if (strcmp(name, tipc_bclink_name) == 0) { 2527 err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl); 2528 if (err) 2529 goto err_free; 2530 } else { 2531 int bearer_id; 2532 struct tipc_node *node; 2533 struct tipc_link *link; 2534 2535 node = tipc_node_find_by_name(net, name, &bearer_id); 2536 if (!node) { 2537 err = -EINVAL; 2538 goto err_free; 2539 } 2540 2541 tipc_node_read_lock(node); 2542 link = node->links[bearer_id].link; 2543 if (!link) { 2544 tipc_node_read_unlock(node); 2545 err = -EINVAL; 2546 goto err_free; 2547 } 2548 2549 err = __tipc_nl_add_link(net, &msg, link, 0); 2550 tipc_node_read_unlock(node); 2551 if (err) 2552 goto err_free; 2553 } 2554 2555 return genlmsg_reply(msg.skb, info); 2556 2557 err_free: 2558 nlmsg_free(msg.skb); 2559 return err; 2560 } 2561 2562 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 2563 { 2564 int err; 2565 char *link_name; 2566 unsigned int bearer_id; 2567 struct tipc_link *link; 2568 struct tipc_node *node; 2569 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2570 struct net *net = sock_net(skb->sk); 2571 struct tipc_net *tn = tipc_net(net); 2572 struct tipc_link_entry *le; 2573 2574 if (!info->attrs[TIPC_NLA_LINK]) 2575 return -EINVAL; 2576 2577 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX, 2578 info->attrs[TIPC_NLA_LINK], 2579 tipc_nl_link_policy, info->extack); 2580 if (err) 2581 return err; 2582 2583 if (!attrs[TIPC_NLA_LINK_NAME]) 2584 return -EINVAL; 2585 2586 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2587 2588 err = -EINVAL; 2589 if (!strcmp(link_name, tipc_bclink_name)) { 2590 err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net)); 2591 if (err) 2592 return err; 2593 return 0; 2594 } else if (strstr(link_name, tipc_bclink_name)) { 2595 rcu_read_lock(); 2596 list_for_each_entry_rcu(node, &tn->node_list, list) { 2597 tipc_node_read_lock(node); 2598 link = node->bc_entry.link; 2599 if (link && !strcmp(link_name, tipc_link_name(link))) { 2600 err = tipc_bclink_reset_stats(net, link); 2601 tipc_node_read_unlock(node); 2602 break; 2603 } 2604 tipc_node_read_unlock(node); 2605 } 2606 rcu_read_unlock(); 2607 return err; 2608 } 2609 2610 node = tipc_node_find_by_name(net, link_name, &bearer_id); 2611 if (!node) 2612 return -EINVAL; 2613 2614 le = &node->links[bearer_id]; 2615 tipc_node_read_lock(node); 2616 spin_lock_bh(&le->lock); 2617 link = node->links[bearer_id].link; 2618 if (!link) { 2619 spin_unlock_bh(&le->lock); 2620 tipc_node_read_unlock(node); 2621 return -EINVAL; 2622 } 2623 tipc_link_reset_stats(link); 2624 spin_unlock_bh(&le->lock); 2625 tipc_node_read_unlock(node); 2626 return 0; 2627 } 2628 2629 /* Caller should hold node lock */ 2630 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 2631 struct tipc_node *node, u32 *prev_link, 2632 bool bc_link) 2633 { 2634 u32 i; 2635 int err; 2636 2637 for (i = *prev_link; i < MAX_BEARERS; i++) { 2638 *prev_link = i; 2639 2640 if (!node->links[i].link) 2641 continue; 2642 2643 err = __tipc_nl_add_link(net, msg, 2644 node->links[i].link, NLM_F_MULTI); 2645 if (err) 2646 return err; 2647 } 2648 2649 if (bc_link) { 2650 *prev_link = i; 2651 err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link); 2652 if (err) 2653 return err; 2654 } 2655 2656 *prev_link = 0; 2657 2658 return 0; 2659 } 2660 2661 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb) 2662 { 2663 struct net *net = sock_net(skb->sk); 2664 struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs; 2665 struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; 2666 struct tipc_net *tn = net_generic(net, tipc_net_id); 2667 struct tipc_node *node; 2668 struct tipc_nl_msg msg; 2669 u32 prev_node = cb->args[0]; 2670 u32 prev_link = cb->args[1]; 2671 int done = cb->args[2]; 2672 bool bc_link = cb->args[3]; 2673 int err; 2674 2675 if (done) 2676 return 0; 2677 2678 if (!prev_node) { 2679 /* Check if broadcast-receiver links dumping is needed */ 2680 if (attrs && attrs[TIPC_NLA_LINK]) { 2681 err = nla_parse_nested_deprecated(link, 2682 TIPC_NLA_LINK_MAX, 2683 attrs[TIPC_NLA_LINK], 2684 tipc_nl_link_policy, 2685 NULL); 2686 if (unlikely(err)) 2687 return err; 2688 if (unlikely(!link[TIPC_NLA_LINK_BROADCAST])) 2689 return -EINVAL; 2690 bc_link = true; 2691 } 2692 } 2693 2694 msg.skb = skb; 2695 msg.portid = NETLINK_CB(cb->skb).portid; 2696 msg.seq = cb->nlh->nlmsg_seq; 2697 2698 rcu_read_lock(); 2699 if (prev_node) { 2700 node = tipc_node_find(net, prev_node); 2701 if (!node) { 2702 /* We never set seq or call nl_dump_check_consistent() 2703 * this means that setting prev_seq here will cause the 2704 * consistence check to fail in the netlink callback 2705 * handler. Resulting in the last NLMSG_DONE message 2706 * having the NLM_F_DUMP_INTR flag set. 2707 */ 2708 cb->prev_seq = 1; 2709 goto out; 2710 } 2711 tipc_node_put(node); 2712 2713 list_for_each_entry_continue_rcu(node, &tn->node_list, 2714 list) { 2715 tipc_node_read_lock(node); 2716 err = __tipc_nl_add_node_links(net, &msg, node, 2717 &prev_link, bc_link); 2718 tipc_node_read_unlock(node); 2719 if (err) 2720 goto out; 2721 2722 prev_node = node->addr; 2723 } 2724 } else { 2725 err = tipc_nl_add_bc_link(net, &msg, tn->bcl); 2726 if (err) 2727 goto out; 2728 2729 list_for_each_entry_rcu(node, &tn->node_list, list) { 2730 tipc_node_read_lock(node); 2731 err = __tipc_nl_add_node_links(net, &msg, node, 2732 &prev_link, bc_link); 2733 tipc_node_read_unlock(node); 2734 if (err) 2735 goto out; 2736 2737 prev_node = node->addr; 2738 } 2739 } 2740 done = 1; 2741 out: 2742 rcu_read_unlock(); 2743 2744 cb->args[0] = prev_node; 2745 cb->args[1] = prev_link; 2746 cb->args[2] = done; 2747 cb->args[3] = bc_link; 2748 2749 return skb->len; 2750 } 2751 2752 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info) 2753 { 2754 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1]; 2755 struct net *net = sock_net(skb->sk); 2756 int err; 2757 2758 if (!info->attrs[TIPC_NLA_MON]) 2759 return -EINVAL; 2760 2761 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX, 2762 info->attrs[TIPC_NLA_MON], 2763 tipc_nl_monitor_policy, 2764 info->extack); 2765 if (err) 2766 return err; 2767 2768 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) { 2769 u32 val; 2770 2771 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]); 2772 err = tipc_nl_monitor_set_threshold(net, val); 2773 if (err) 2774 return err; 2775 } 2776 2777 return 0; 2778 } 2779 2780 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg) 2781 { 2782 struct nlattr *attrs; 2783 void *hdr; 2784 u32 val; 2785 2786 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2787 0, TIPC_NL_MON_GET); 2788 if (!hdr) 2789 return -EMSGSIZE; 2790 2791 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON); 2792 if (!attrs) 2793 goto msg_full; 2794 2795 val = tipc_nl_monitor_get_threshold(net); 2796 2797 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val)) 2798 goto attr_msg_full; 2799 2800 nla_nest_end(msg->skb, attrs); 2801 genlmsg_end(msg->skb, hdr); 2802 2803 return 0; 2804 2805 attr_msg_full: 2806 nla_nest_cancel(msg->skb, attrs); 2807 msg_full: 2808 genlmsg_cancel(msg->skb, hdr); 2809 2810 return -EMSGSIZE; 2811 } 2812 2813 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info) 2814 { 2815 struct net *net = sock_net(skb->sk); 2816 struct tipc_nl_msg msg; 2817 int err; 2818 2819 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2820 if (!msg.skb) 2821 return -ENOMEM; 2822 msg.portid = info->snd_portid; 2823 msg.seq = info->snd_seq; 2824 2825 err = __tipc_nl_add_monitor_prop(net, &msg); 2826 if (err) { 2827 nlmsg_free(msg.skb); 2828 return err; 2829 } 2830 2831 return genlmsg_reply(msg.skb, info); 2832 } 2833 2834 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) 2835 { 2836 struct net *net = sock_net(skb->sk); 2837 u32 prev_bearer = cb->args[0]; 2838 struct tipc_nl_msg msg; 2839 int bearer_id; 2840 int err; 2841 2842 if (prev_bearer == MAX_BEARERS) 2843 return 0; 2844 2845 msg.skb = skb; 2846 msg.portid = NETLINK_CB(cb->skb).portid; 2847 msg.seq = cb->nlh->nlmsg_seq; 2848 2849 rtnl_lock(); 2850 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { 2851 err = __tipc_nl_add_monitor(net, &msg, bearer_id); 2852 if (err) 2853 break; 2854 } 2855 rtnl_unlock(); 2856 cb->args[0] = bearer_id; 2857 2858 return skb->len; 2859 } 2860 2861 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb, 2862 struct netlink_callback *cb) 2863 { 2864 struct net *net = sock_net(skb->sk); 2865 u32 prev_node = cb->args[1]; 2866 u32 bearer_id = cb->args[2]; 2867 int done = cb->args[0]; 2868 struct tipc_nl_msg msg; 2869 int err; 2870 2871 if (!prev_node) { 2872 struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs; 2873 struct nlattr *mon[TIPC_NLA_MON_MAX + 1]; 2874 2875 if (!attrs[TIPC_NLA_MON]) 2876 return -EINVAL; 2877 2878 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX, 2879 attrs[TIPC_NLA_MON], 2880 tipc_nl_monitor_policy, 2881 NULL); 2882 if (err) 2883 return err; 2884 2885 if (!mon[TIPC_NLA_MON_REF]) 2886 return -EINVAL; 2887 2888 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]); 2889 2890 if (bearer_id >= MAX_BEARERS) 2891 return -EINVAL; 2892 } 2893 2894 if (done) 2895 return 0; 2896 2897 msg.skb = skb; 2898 msg.portid = NETLINK_CB(cb->skb).portid; 2899 msg.seq = cb->nlh->nlmsg_seq; 2900 2901 rtnl_lock(); 2902 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node); 2903 if (!err) 2904 done = 1; 2905 2906 rtnl_unlock(); 2907 cb->args[0] = done; 2908 cb->args[1] = prev_node; 2909 cb->args[2] = bearer_id; 2910 2911 return skb->len; 2912 } 2913 2914 #ifdef CONFIG_TIPC_CRYPTO 2915 static int tipc_nl_retrieve_key(struct nlattr **attrs, 2916 struct tipc_aead_key **pkey) 2917 { 2918 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY]; 2919 struct tipc_aead_key *key; 2920 2921 if (!attr) 2922 return -ENODATA; 2923 2924 if (nla_len(attr) < sizeof(*key)) 2925 return -EINVAL; 2926 key = (struct tipc_aead_key *)nla_data(attr); 2927 if (key->keylen > TIPC_AEAD_KEYLEN_MAX || 2928 nla_len(attr) < tipc_aead_key_size(key)) 2929 return -EINVAL; 2930 2931 *pkey = key; 2932 return 0; 2933 } 2934 2935 static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id) 2936 { 2937 struct nlattr *attr = attrs[TIPC_NLA_NODE_ID]; 2938 2939 if (!attr) 2940 return -ENODATA; 2941 2942 if (nla_len(attr) < TIPC_NODEID_LEN) 2943 return -EINVAL; 2944 2945 *node_id = (u8 *)nla_data(attr); 2946 return 0; 2947 } 2948 2949 static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv) 2950 { 2951 struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING]; 2952 2953 if (!attr) 2954 return -ENODATA; 2955 2956 *intv = nla_get_u32(attr); 2957 return 0; 2958 } 2959 2960 static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info) 2961 { 2962 struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1]; 2963 struct net *net = sock_net(skb->sk); 2964 struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx; 2965 struct tipc_node *n = NULL; 2966 struct tipc_aead_key *ukey; 2967 bool rekeying = true, master_key = false; 2968 u8 *id, *own_id, mode; 2969 u32 intv = 0; 2970 int rc = 0; 2971 2972 if (!info->attrs[TIPC_NLA_NODE]) 2973 return -EINVAL; 2974 2975 rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX, 2976 info->attrs[TIPC_NLA_NODE], 2977 tipc_nl_node_policy, info->extack); 2978 if (rc) 2979 return rc; 2980 2981 own_id = tipc_own_id(net); 2982 if (!own_id) { 2983 GENL_SET_ERR_MSG(info, "not found own node identity (set id?)"); 2984 return -EPERM; 2985 } 2986 2987 rc = tipc_nl_retrieve_rekeying(attrs, &intv); 2988 if (rc == -ENODATA) 2989 rekeying = false; 2990 2991 rc = tipc_nl_retrieve_key(attrs, &ukey); 2992 if (rc == -ENODATA && rekeying) 2993 goto rekeying; 2994 else if (rc) 2995 return rc; 2996 2997 rc = tipc_aead_key_validate(ukey, info); 2998 if (rc) 2999 return rc; 3000 3001 rc = tipc_nl_retrieve_nodeid(attrs, &id); 3002 switch (rc) { 3003 case -ENODATA: 3004 mode = CLUSTER_KEY; 3005 master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]); 3006 break; 3007 case 0: 3008 mode = PER_NODE_KEY; 3009 if (memcmp(id, own_id, NODE_ID_LEN)) { 3010 n = tipc_node_find_by_id(net, id) ?: 3011 tipc_node_create(net, 0, id, 0xffffu, 0, true); 3012 if (unlikely(!n)) 3013 return -ENOMEM; 3014 c = n->crypto_rx; 3015 } 3016 break; 3017 default: 3018 return rc; 3019 } 3020 3021 /* Initiate the TX/RX key */ 3022 rc = tipc_crypto_key_init(c, ukey, mode, master_key); 3023 if (n) 3024 tipc_node_put(n); 3025 3026 if (unlikely(rc < 0)) { 3027 GENL_SET_ERR_MSG(info, "unable to initiate or attach new key"); 3028 return rc; 3029 } else if (c == tx) { 3030 /* Distribute TX key but not master one */ 3031 if (!master_key && tipc_crypto_key_distr(tx, rc, NULL)) 3032 GENL_SET_ERR_MSG(info, "failed to replicate new key"); 3033 rekeying: 3034 /* Schedule TX rekeying if needed */ 3035 tipc_crypto_rekeying_sched(tx, rekeying, intv); 3036 } 3037 3038 return 0; 3039 } 3040 3041 int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info) 3042 { 3043 int err; 3044 3045 rtnl_lock(); 3046 err = __tipc_nl_node_set_key(skb, info); 3047 rtnl_unlock(); 3048 3049 return err; 3050 } 3051 3052 static int __tipc_nl_node_flush_key(struct sk_buff *skb, 3053 struct genl_info *info) 3054 { 3055 struct net *net = sock_net(skb->sk); 3056 struct tipc_net *tn = tipc_net(net); 3057 struct tipc_node *n; 3058 3059 tipc_crypto_key_flush(tn->crypto_tx); 3060 rcu_read_lock(); 3061 list_for_each_entry_rcu(n, &tn->node_list, list) 3062 tipc_crypto_key_flush(n->crypto_rx); 3063 rcu_read_unlock(); 3064 3065 return 0; 3066 } 3067 3068 int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info) 3069 { 3070 int err; 3071 3072 rtnl_lock(); 3073 err = __tipc_nl_node_flush_key(skb, info); 3074 rtnl_unlock(); 3075 3076 return err; 3077 } 3078 #endif 3079 3080 /** 3081 * tipc_node_dump - dump TIPC node data 3082 * @n: tipc node to be dumped 3083 * @more: dump more? 3084 * - false: dump only tipc node data 3085 * - true: dump node link data as well 3086 * @buf: returned buffer of dump data in format 3087 */ 3088 int tipc_node_dump(struct tipc_node *n, bool more, char *buf) 3089 { 3090 int i = 0; 3091 size_t sz = (more) ? NODE_LMAX : NODE_LMIN; 3092 3093 if (!n) { 3094 i += scnprintf(buf, sz, "node data: (null)\n"); 3095 return i; 3096 } 3097 3098 i += scnprintf(buf, sz, "node data: %x", n->addr); 3099 i += scnprintf(buf + i, sz - i, " %x", n->state); 3100 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]); 3101 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]); 3102 i += scnprintf(buf + i, sz - i, " %x", n->action_flags); 3103 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent); 3104 i += scnprintf(buf + i, sz - i, " %u", n->sync_point); 3105 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt); 3106 i += scnprintf(buf + i, sz - i, " %u", n->working_links); 3107 i += scnprintf(buf + i, sz - i, " %x", n->capabilities); 3108 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv); 3109 3110 if (!more) 3111 return i; 3112 3113 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n"); 3114 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu); 3115 i += scnprintf(buf + i, sz - i, " media: "); 3116 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr); 3117 i += scnprintf(buf + i, sz - i, "\n"); 3118 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i); 3119 i += scnprintf(buf + i, sz - i, " inputq: "); 3120 i += tipc_list_dump(&n->links[0].inputq, false, buf + i); 3121 3122 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n"); 3123 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu); 3124 i += scnprintf(buf + i, sz - i, " media: "); 3125 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr); 3126 i += scnprintf(buf + i, sz - i, "\n"); 3127 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i); 3128 i += scnprintf(buf + i, sz - i, " inputq: "); 3129 i += tipc_list_dump(&n->links[1].inputq, false, buf + i); 3130 3131 i += scnprintf(buf + i, sz - i, "bclink:\n "); 3132 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i); 3133 3134 return i; 3135 } 3136 3137 void tipc_node_pre_cleanup_net(struct net *exit_net) 3138 { 3139 struct tipc_node *n; 3140 struct tipc_net *tn; 3141 struct net *tmp; 3142 3143 rcu_read_lock(); 3144 for_each_net_rcu(tmp) { 3145 if (tmp == exit_net) 3146 continue; 3147 tn = tipc_net(tmp); 3148 if (!tn) 3149 continue; 3150 spin_lock_bh(&tn->node_list_lock); 3151 list_for_each_entry_rcu(n, &tn->node_list, list) { 3152 if (!n->peer_net) 3153 continue; 3154 if (n->peer_net != exit_net) 3155 continue; 3156 tipc_node_write_lock(n); 3157 n->peer_net = NULL; 3158 n->peer_hash_mix = 0; 3159 tipc_node_write_unlock_fast(n); 3160 break; 3161 } 3162 spin_unlock_bh(&tn->node_list_lock); 3163 } 3164 rcu_read_unlock(); 3165 } 3166