1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "node.h" 40 #include "name_distr.h" 41 #include "socket.h" 42 #include "bcast.h" 43 #include "monitor.h" 44 #include "discover.h" 45 #include "netlink.h" 46 47 #define INVALID_NODE_SIG 0x10000 48 49 /* Flags used to take different actions according to flag type 50 * TIPC_NOTIFY_NODE_DOWN: notify node is down 51 * TIPC_NOTIFY_NODE_UP: notify node is up 52 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type 53 */ 54 enum { 55 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 56 TIPC_NOTIFY_NODE_UP = (1 << 4), 57 TIPC_NOTIFY_LINK_UP = (1 << 6), 58 TIPC_NOTIFY_LINK_DOWN = (1 << 7) 59 }; 60 61 struct tipc_link_entry { 62 struct tipc_link *link; 63 spinlock_t lock; /* per link */ 64 u32 mtu; 65 struct sk_buff_head inputq; 66 struct tipc_media_addr maddr; 67 }; 68 69 struct tipc_bclink_entry { 70 struct tipc_link *link; 71 struct sk_buff_head inputq1; 72 struct sk_buff_head arrvq; 73 struct sk_buff_head inputq2; 74 struct sk_buff_head namedq; 75 }; 76 77 /** 78 * struct tipc_node - TIPC node structure 79 * @addr: network address of node 80 * @ref: reference counter to node object 81 * @lock: rwlock governing access to structure 82 * @net: the applicable net namespace 83 * @hash: links to adjacent nodes in unsorted hash chain 84 * @inputq: pointer to input queue containing messages for msg event 85 * @namedq: pointer to name table input queue with name table messages 86 * @active_links: bearer ids of active links, used as index into links[] array 87 * @links: array containing references to all links to node 88 * @action_flags: bit mask of different types of node actions 89 * @state: connectivity state vs peer node 90 * @sync_point: sequence number where synch/failover is finished 91 * @list: links to adjacent nodes in sorted list of cluster's nodes 92 * @working_links: number of working links to node (both active and standby) 93 * @link_cnt: number of links to node 94 * @capabilities: bitmap, indicating peer node's functional capabilities 95 * @signature: node instance identifier 96 * @link_id: local and remote bearer ids of changing link, if any 97 * @publ_list: list of publications 98 * @rcu: rcu struct for tipc_node 99 */ 100 struct tipc_node { 101 u32 addr; 102 struct kref kref; 103 rwlock_t lock; 104 struct net *net; 105 struct hlist_node hash; 106 int active_links[2]; 107 struct tipc_link_entry links[MAX_BEARERS]; 108 struct tipc_bclink_entry bc_entry; 109 int action_flags; 110 struct list_head list; 111 int state; 112 u16 sync_point; 113 int link_cnt; 114 u16 working_links; 115 u16 capabilities; 116 u32 signature; 117 u32 link_id; 118 u8 peer_id[16]; 119 struct list_head publ_list; 120 struct list_head conn_sks; 121 unsigned long keepalive_intv; 122 struct timer_list timer; 123 struct rcu_head rcu; 124 }; 125 126 /* Node FSM states and events: 127 */ 128 enum { 129 SELF_DOWN_PEER_DOWN = 0xdd, 130 SELF_UP_PEER_UP = 0xaa, 131 SELF_DOWN_PEER_LEAVING = 0xd1, 132 SELF_UP_PEER_COMING = 0xac, 133 SELF_COMING_PEER_UP = 0xca, 134 SELF_LEAVING_PEER_DOWN = 0x1d, 135 NODE_FAILINGOVER = 0xf0, 136 NODE_SYNCHING = 0xcc 137 }; 138 139 enum { 140 SELF_ESTABL_CONTACT_EVT = 0xece, 141 SELF_LOST_CONTACT_EVT = 0x1ce, 142 PEER_ESTABL_CONTACT_EVT = 0x9ece, 143 PEER_LOST_CONTACT_EVT = 0x91ce, 144 NODE_FAILOVER_BEGIN_EVT = 0xfbe, 145 NODE_FAILOVER_END_EVT = 0xfee, 146 NODE_SYNCH_BEGIN_EVT = 0xcbe, 147 NODE_SYNCH_END_EVT = 0xcee 148 }; 149 150 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 151 struct sk_buff_head *xmitq, 152 struct tipc_media_addr **maddr); 153 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, 154 bool delete); 155 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq); 156 static void tipc_node_delete(struct tipc_node *node); 157 static void tipc_node_timeout(struct timer_list *t); 158 static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 159 static struct tipc_node *tipc_node_find(struct net *net, u32 addr); 160 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); 161 static void tipc_node_put(struct tipc_node *node); 162 static bool node_is_up(struct tipc_node *n); 163 164 struct tipc_sock_conn { 165 u32 port; 166 u32 peer_port; 167 u32 peer_node; 168 struct list_head list; 169 }; 170 171 static struct tipc_link *node_active_link(struct tipc_node *n, int sel) 172 { 173 int bearer_id = n->active_links[sel & 1]; 174 175 if (unlikely(bearer_id == INVALID_BEARER_ID)) 176 return NULL; 177 178 return n->links[bearer_id].link; 179 } 180 181 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel) 182 { 183 struct tipc_node *n; 184 int bearer_id; 185 unsigned int mtu = MAX_MSG_SIZE; 186 187 n = tipc_node_find(net, addr); 188 if (unlikely(!n)) 189 return mtu; 190 191 bearer_id = n->active_links[sel & 1]; 192 if (likely(bearer_id != INVALID_BEARER_ID)) 193 mtu = n->links[bearer_id].mtu; 194 tipc_node_put(n); 195 return mtu; 196 } 197 198 u16 tipc_node_get_capabilities(struct net *net, u32 addr) 199 { 200 struct tipc_node *n; 201 u16 caps; 202 203 n = tipc_node_find(net, addr); 204 if (unlikely(!n)) 205 return TIPC_NODE_CAPABILITIES; 206 caps = n->capabilities; 207 tipc_node_put(n); 208 return caps; 209 } 210 211 static void tipc_node_kref_release(struct kref *kref) 212 { 213 struct tipc_node *n = container_of(kref, struct tipc_node, kref); 214 215 kfree(n->bc_entry.link); 216 kfree_rcu(n, rcu); 217 } 218 219 static void tipc_node_put(struct tipc_node *node) 220 { 221 kref_put(&node->kref, tipc_node_kref_release); 222 } 223 224 static void tipc_node_get(struct tipc_node *node) 225 { 226 kref_get(&node->kref); 227 } 228 229 /* 230 * tipc_node_find - locate specified node object, if it exists 231 */ 232 static struct tipc_node *tipc_node_find(struct net *net, u32 addr) 233 { 234 struct tipc_net *tn = tipc_net(net); 235 struct tipc_node *node; 236 unsigned int thash = tipc_hashfn(addr); 237 238 rcu_read_lock(); 239 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) { 240 if (node->addr != addr) 241 continue; 242 if (!kref_get_unless_zero(&node->kref)) 243 node = NULL; 244 break; 245 } 246 rcu_read_unlock(); 247 return node; 248 } 249 250 /* tipc_node_find_by_id - locate specified node object by its 128-bit id 251 * Note: this function is called only when a discovery request failed 252 * to find the node by its 32-bit id, and is not time critical 253 */ 254 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id) 255 { 256 struct tipc_net *tn = tipc_net(net); 257 struct tipc_node *n; 258 bool found = false; 259 260 rcu_read_lock(); 261 list_for_each_entry_rcu(n, &tn->node_list, list) { 262 read_lock_bh(&n->lock); 263 if (!memcmp(id, n->peer_id, 16) && 264 kref_get_unless_zero(&n->kref)) 265 found = true; 266 read_unlock_bh(&n->lock); 267 if (found) 268 break; 269 } 270 rcu_read_unlock(); 271 return found ? n : NULL; 272 } 273 274 static void tipc_node_read_lock(struct tipc_node *n) 275 { 276 read_lock_bh(&n->lock); 277 } 278 279 static void tipc_node_read_unlock(struct tipc_node *n) 280 { 281 read_unlock_bh(&n->lock); 282 } 283 284 static void tipc_node_write_lock(struct tipc_node *n) 285 { 286 write_lock_bh(&n->lock); 287 } 288 289 static void tipc_node_write_unlock_fast(struct tipc_node *n) 290 { 291 write_unlock_bh(&n->lock); 292 } 293 294 static void tipc_node_write_unlock(struct tipc_node *n) 295 { 296 struct net *net = n->net; 297 u32 addr = 0; 298 u32 flags = n->action_flags; 299 u32 link_id = 0; 300 u32 bearer_id; 301 struct list_head *publ_list; 302 303 if (likely(!flags)) { 304 write_unlock_bh(&n->lock); 305 return; 306 } 307 308 addr = n->addr; 309 link_id = n->link_id; 310 bearer_id = link_id & 0xffff; 311 publ_list = &n->publ_list; 312 313 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | 314 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP); 315 316 write_unlock_bh(&n->lock); 317 318 if (flags & TIPC_NOTIFY_NODE_DOWN) 319 tipc_publ_notify(net, publ_list, addr); 320 321 if (flags & TIPC_NOTIFY_NODE_UP) 322 tipc_named_node_up(net, addr); 323 324 if (flags & TIPC_NOTIFY_LINK_UP) { 325 tipc_mon_peer_up(net, addr, bearer_id); 326 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, 327 TIPC_NODE_SCOPE, link_id, link_id); 328 } 329 if (flags & TIPC_NOTIFY_LINK_DOWN) { 330 tipc_mon_peer_down(net, addr, bearer_id); 331 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, 332 addr, link_id); 333 } 334 } 335 336 static struct tipc_node *tipc_node_create(struct net *net, u32 addr, 337 u8 *peer_id, u16 capabilities) 338 { 339 struct tipc_net *tn = net_generic(net, tipc_net_id); 340 struct tipc_node *n, *temp_node; 341 int i; 342 343 spin_lock_bh(&tn->node_list_lock); 344 n = tipc_node_find(net, addr); 345 if (n) { 346 /* Same node may come back with new capabilities */ 347 n->capabilities = capabilities; 348 goto exit; 349 } 350 n = kzalloc(sizeof(*n), GFP_ATOMIC); 351 if (!n) { 352 pr_warn("Node creation failed, no memory\n"); 353 goto exit; 354 } 355 n->addr = addr; 356 memcpy(&n->peer_id, peer_id, 16); 357 n->net = net; 358 n->capabilities = capabilities; 359 kref_init(&n->kref); 360 rwlock_init(&n->lock); 361 INIT_HLIST_NODE(&n->hash); 362 INIT_LIST_HEAD(&n->list); 363 INIT_LIST_HEAD(&n->publ_list); 364 INIT_LIST_HEAD(&n->conn_sks); 365 skb_queue_head_init(&n->bc_entry.namedq); 366 skb_queue_head_init(&n->bc_entry.inputq1); 367 __skb_queue_head_init(&n->bc_entry.arrvq); 368 skb_queue_head_init(&n->bc_entry.inputq2); 369 for (i = 0; i < MAX_BEARERS; i++) 370 spin_lock_init(&n->links[i].lock); 371 n->state = SELF_DOWN_PEER_LEAVING; 372 n->signature = INVALID_NODE_SIG; 373 n->active_links[0] = INVALID_BEARER_ID; 374 n->active_links[1] = INVALID_BEARER_ID; 375 if (!tipc_link_bc_create(net, tipc_own_addr(net), 376 addr, U16_MAX, 377 tipc_link_window(tipc_bc_sndlink(net)), 378 n->capabilities, 379 &n->bc_entry.inputq1, 380 &n->bc_entry.namedq, 381 tipc_bc_sndlink(net), 382 &n->bc_entry.link)) { 383 pr_warn("Broadcast rcv link creation failed, no memory\n"); 384 kfree(n); 385 n = NULL; 386 goto exit; 387 } 388 tipc_node_get(n); 389 timer_setup(&n->timer, tipc_node_timeout, 0); 390 n->keepalive_intv = U32_MAX; 391 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]); 392 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 393 if (n->addr < temp_node->addr) 394 break; 395 } 396 list_add_tail_rcu(&n->list, &temp_node->list); 397 exit: 398 spin_unlock_bh(&tn->node_list_lock); 399 return n; 400 } 401 402 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 403 { 404 unsigned long tol = tipc_link_tolerance(l); 405 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 406 407 /* Link with lowest tolerance determines timer interval */ 408 if (intv < n->keepalive_intv) 409 n->keepalive_intv = intv; 410 411 /* Ensure link's abort limit corresponds to current tolerance */ 412 tipc_link_set_abort_limit(l, tol / n->keepalive_intv); 413 } 414 415 static void tipc_node_delete(struct tipc_node *node) 416 { 417 list_del_rcu(&node->list); 418 hlist_del_rcu(&node->hash); 419 tipc_node_put(node); 420 421 del_timer_sync(&node->timer); 422 tipc_node_put(node); 423 } 424 425 void tipc_node_stop(struct net *net) 426 { 427 struct tipc_net *tn = tipc_net(net); 428 struct tipc_node *node, *t_node; 429 430 spin_lock_bh(&tn->node_list_lock); 431 list_for_each_entry_safe(node, t_node, &tn->node_list, list) 432 tipc_node_delete(node); 433 spin_unlock_bh(&tn->node_list_lock); 434 } 435 436 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) 437 { 438 struct tipc_node *n; 439 440 if (in_own_node(net, addr)) 441 return; 442 443 n = tipc_node_find(net, addr); 444 if (!n) { 445 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr); 446 return; 447 } 448 tipc_node_write_lock(n); 449 list_add_tail(subscr, &n->publ_list); 450 tipc_node_write_unlock_fast(n); 451 tipc_node_put(n); 452 } 453 454 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) 455 { 456 struct tipc_node *n; 457 458 if (in_own_node(net, addr)) 459 return; 460 461 n = tipc_node_find(net, addr); 462 if (!n) { 463 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr); 464 return; 465 } 466 tipc_node_write_lock(n); 467 list_del_init(subscr); 468 tipc_node_write_unlock_fast(n); 469 tipc_node_put(n); 470 } 471 472 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 473 { 474 struct tipc_node *node; 475 struct tipc_sock_conn *conn; 476 int err = 0; 477 478 if (in_own_node(net, dnode)) 479 return 0; 480 481 node = tipc_node_find(net, dnode); 482 if (!node) { 483 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 484 return -EHOSTUNREACH; 485 } 486 conn = kmalloc(sizeof(*conn), GFP_ATOMIC); 487 if (!conn) { 488 err = -EHOSTUNREACH; 489 goto exit; 490 } 491 conn->peer_node = dnode; 492 conn->port = port; 493 conn->peer_port = peer_port; 494 495 tipc_node_write_lock(node); 496 list_add_tail(&conn->list, &node->conn_sks); 497 tipc_node_write_unlock(node); 498 exit: 499 tipc_node_put(node); 500 return err; 501 } 502 503 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) 504 { 505 struct tipc_node *node; 506 struct tipc_sock_conn *conn, *safe; 507 508 if (in_own_node(net, dnode)) 509 return; 510 511 node = tipc_node_find(net, dnode); 512 if (!node) 513 return; 514 515 tipc_node_write_lock(node); 516 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 517 if (port != conn->port) 518 continue; 519 list_del(&conn->list); 520 kfree(conn); 521 } 522 tipc_node_write_unlock(node); 523 tipc_node_put(node); 524 } 525 526 /* tipc_node_timeout - handle expiration of node timer 527 */ 528 static void tipc_node_timeout(struct timer_list *t) 529 { 530 struct tipc_node *n = from_timer(n, t, timer); 531 struct tipc_link_entry *le; 532 struct sk_buff_head xmitq; 533 int bearer_id; 534 int rc = 0; 535 536 __skb_queue_head_init(&xmitq); 537 538 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 539 tipc_node_read_lock(n); 540 le = &n->links[bearer_id]; 541 spin_lock_bh(&le->lock); 542 if (le->link) { 543 /* Link tolerance may change asynchronously: */ 544 tipc_node_calculate_timer(n, le->link); 545 rc = tipc_link_timeout(le->link, &xmitq); 546 } 547 spin_unlock_bh(&le->lock); 548 tipc_node_read_unlock(n); 549 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); 550 if (rc & TIPC_LINK_DOWN_EVT) 551 tipc_node_link_down(n, bearer_id, false); 552 } 553 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv)); 554 } 555 556 /** 557 * __tipc_node_link_up - handle addition of link 558 * Node lock must be held by caller 559 * Link becomes active (alone or shared) or standby, depending on its priority. 560 */ 561 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, 562 struct sk_buff_head *xmitq) 563 { 564 int *slot0 = &n->active_links[0]; 565 int *slot1 = &n->active_links[1]; 566 struct tipc_link *ol = node_active_link(n, 0); 567 struct tipc_link *nl = n->links[bearer_id].link; 568 569 if (!nl || tipc_link_is_up(nl)) 570 return; 571 572 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); 573 if (!tipc_link_is_up(nl)) 574 return; 575 576 n->working_links++; 577 n->action_flags |= TIPC_NOTIFY_LINK_UP; 578 n->link_id = tipc_link_id(nl); 579 580 /* Leave room for tunnel header when returning 'mtu' to users: */ 581 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE; 582 583 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 584 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 585 586 pr_debug("Established link <%s> on network plane %c\n", 587 tipc_link_name(nl), tipc_link_plane(nl)); 588 589 /* Ensure that a STATE message goes first */ 590 tipc_link_build_state_msg(nl, xmitq); 591 592 /* First link? => give it both slots */ 593 if (!ol) { 594 *slot0 = bearer_id; 595 *slot1 = bearer_id; 596 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 597 n->action_flags |= TIPC_NOTIFY_NODE_UP; 598 tipc_link_set_active(nl, true); 599 tipc_bcast_add_peer(n->net, nl, xmitq); 600 return; 601 } 602 603 /* Second link => redistribute slots */ 604 if (tipc_link_prio(nl) > tipc_link_prio(ol)) { 605 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol)); 606 *slot0 = bearer_id; 607 *slot1 = bearer_id; 608 tipc_link_set_active(nl, true); 609 tipc_link_set_active(ol, false); 610 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) { 611 tipc_link_set_active(nl, true); 612 *slot1 = bearer_id; 613 } else { 614 pr_debug("New link <%s> is standby\n", tipc_link_name(nl)); 615 } 616 617 /* Prepare synchronization with first link */ 618 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq); 619 } 620 621 /** 622 * tipc_node_link_up - handle addition of link 623 * 624 * Link becomes active (alone or shared) or standby, depending on its priority. 625 */ 626 static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 627 struct sk_buff_head *xmitq) 628 { 629 struct tipc_media_addr *maddr; 630 631 tipc_node_write_lock(n); 632 __tipc_node_link_up(n, bearer_id, xmitq); 633 maddr = &n->links[bearer_id].maddr; 634 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr); 635 tipc_node_write_unlock(n); 636 } 637 638 /** 639 * __tipc_node_link_down - handle loss of link 640 */ 641 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, 642 struct sk_buff_head *xmitq, 643 struct tipc_media_addr **maddr) 644 { 645 struct tipc_link_entry *le = &n->links[*bearer_id]; 646 int *slot0 = &n->active_links[0]; 647 int *slot1 = &n->active_links[1]; 648 int i, highest = 0, prio; 649 struct tipc_link *l, *_l, *tnl; 650 651 l = n->links[*bearer_id].link; 652 if (!l || tipc_link_is_reset(l)) 653 return; 654 655 n->working_links--; 656 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 657 n->link_id = tipc_link_id(l); 658 659 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 660 661 pr_debug("Lost link <%s> on network plane %c\n", 662 tipc_link_name(l), tipc_link_plane(l)); 663 664 /* Select new active link if any available */ 665 *slot0 = INVALID_BEARER_ID; 666 *slot1 = INVALID_BEARER_ID; 667 for (i = 0; i < MAX_BEARERS; i++) { 668 _l = n->links[i].link; 669 if (!_l || !tipc_link_is_up(_l)) 670 continue; 671 if (_l == l) 672 continue; 673 prio = tipc_link_prio(_l); 674 if (prio < highest) 675 continue; 676 if (prio > highest) { 677 highest = prio; 678 *slot0 = i; 679 *slot1 = i; 680 continue; 681 } 682 *slot1 = i; 683 } 684 685 if (!node_is_up(n)) { 686 if (tipc_link_peer_is_down(l)) 687 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 688 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); 689 tipc_link_fsm_evt(l, LINK_RESET_EVT); 690 tipc_link_reset(l); 691 tipc_link_build_reset_msg(l, xmitq); 692 *maddr = &n->links[*bearer_id].maddr; 693 node_lost_contact(n, &le->inputq); 694 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 695 return; 696 } 697 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 698 699 /* There is still a working link => initiate failover */ 700 *bearer_id = n->active_links[0]; 701 tnl = n->links[*bearer_id].link; 702 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 703 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 704 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); 705 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 706 tipc_link_reset(l); 707 tipc_link_fsm_evt(l, LINK_RESET_EVT); 708 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 709 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 710 *maddr = &n->links[*bearer_id].maddr; 711 } 712 713 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 714 { 715 struct tipc_link_entry *le = &n->links[bearer_id]; 716 struct tipc_link *l = le->link; 717 struct tipc_media_addr *maddr; 718 struct sk_buff_head xmitq; 719 int old_bearer_id = bearer_id; 720 721 if (!l) 722 return; 723 724 __skb_queue_head_init(&xmitq); 725 726 tipc_node_write_lock(n); 727 if (!tipc_link_is_establishing(l)) { 728 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 729 if (delete) { 730 kfree(l); 731 le->link = NULL; 732 n->link_cnt--; 733 } 734 } else { 735 /* Defuse pending tipc_node_link_up() */ 736 tipc_link_fsm_evt(l, LINK_RESET_EVT); 737 } 738 tipc_node_write_unlock(n); 739 if (delete) 740 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); 741 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); 742 tipc_sk_rcv(n->net, &le->inputq); 743 } 744 745 static bool node_is_up(struct tipc_node *n) 746 { 747 return n->active_links[0] != INVALID_BEARER_ID; 748 } 749 750 bool tipc_node_is_up(struct net *net, u32 addr) 751 { 752 struct tipc_node *n; 753 bool retval = false; 754 755 if (in_own_node(net, addr)) 756 return true; 757 758 n = tipc_node_find(net, addr); 759 if (!n) 760 return false; 761 retval = node_is_up(n); 762 tipc_node_put(n); 763 return retval; 764 } 765 766 static u32 tipc_node_suggest_addr(struct net *net, u32 addr) 767 { 768 struct tipc_node *n; 769 770 addr ^= tipc_net(net)->random; 771 while ((n = tipc_node_find(net, addr))) { 772 tipc_node_put(n); 773 addr++; 774 } 775 return addr; 776 } 777 778 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not 779 */ 780 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr) 781 { 782 struct tipc_net *tn = tipc_net(net); 783 struct tipc_node *n; 784 785 /* Suggest new address if some other peer is using this one */ 786 n = tipc_node_find(net, addr); 787 if (n) { 788 if (!memcmp(n->peer_id, id, NODE_ID_LEN)) 789 addr = 0; 790 tipc_node_put(n); 791 if (!addr) 792 return 0; 793 return tipc_node_suggest_addr(net, addr); 794 } 795 796 /* Suggest previously used address if peer is known */ 797 n = tipc_node_find_by_id(net, id); 798 if (n) { 799 addr = n->addr; 800 tipc_node_put(n); 801 } 802 /* Even this node may be in trial phase */ 803 if (tn->trial_addr == addr) 804 return tipc_node_suggest_addr(net, addr); 805 806 return addr; 807 } 808 809 void tipc_node_check_dest(struct net *net, u32 addr, 810 u8 *peer_id, struct tipc_bearer *b, 811 u16 capabilities, u32 signature, 812 struct tipc_media_addr *maddr, 813 bool *respond, bool *dupl_addr) 814 { 815 struct tipc_node *n; 816 struct tipc_link *l; 817 struct tipc_link_entry *le; 818 bool addr_match = false; 819 bool sign_match = false; 820 bool link_up = false; 821 bool accept_addr = false; 822 bool reset = true; 823 char *if_name; 824 unsigned long intv; 825 826 *dupl_addr = false; 827 *respond = false; 828 829 n = tipc_node_create(net, addr, peer_id, capabilities); 830 if (!n) 831 return; 832 833 tipc_node_write_lock(n); 834 835 le = &n->links[b->identity]; 836 837 /* Prepare to validate requesting node's signature and media address */ 838 l = le->link; 839 link_up = l && tipc_link_is_up(l); 840 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); 841 sign_match = (signature == n->signature); 842 843 /* These three flags give us eight permutations: */ 844 845 if (sign_match && addr_match && link_up) { 846 /* All is fine. Do nothing. */ 847 reset = false; 848 } else if (sign_match && addr_match && !link_up) { 849 /* Respond. The link will come up in due time */ 850 *respond = true; 851 } else if (sign_match && !addr_match && link_up) { 852 /* Peer has changed i/f address without rebooting. 853 * If so, the link will reset soon, and the next 854 * discovery will be accepted. So we can ignore it. 855 * It may also be an cloned or malicious peer having 856 * chosen the same node address and signature as an 857 * existing one. 858 * Ignore requests until the link goes down, if ever. 859 */ 860 *dupl_addr = true; 861 } else if (sign_match && !addr_match && !link_up) { 862 /* Peer link has changed i/f address without rebooting. 863 * It may also be a cloned or malicious peer; we can't 864 * distinguish between the two. 865 * The signature is correct, so we must accept. 866 */ 867 accept_addr = true; 868 *respond = true; 869 } else if (!sign_match && addr_match && link_up) { 870 /* Peer node rebooted. Two possibilities: 871 * - Delayed re-discovery; this link endpoint has already 872 * reset and re-established contact with the peer, before 873 * receiving a discovery message from that node. 874 * (The peer happened to receive one from this node first). 875 * - The peer came back so fast that our side has not 876 * discovered it yet. Probing from this side will soon 877 * reset the link, since there can be no working link 878 * endpoint at the peer end, and the link will re-establish. 879 * Accept the signature, since it comes from a known peer. 880 */ 881 n->signature = signature; 882 } else if (!sign_match && addr_match && !link_up) { 883 /* The peer node has rebooted. 884 * Accept signature, since it is a known peer. 885 */ 886 n->signature = signature; 887 *respond = true; 888 } else if (!sign_match && !addr_match && link_up) { 889 /* Peer rebooted with new address, or a new/duplicate peer. 890 * Ignore until the link goes down, if ever. 891 */ 892 *dupl_addr = true; 893 } else if (!sign_match && !addr_match && !link_up) { 894 /* Peer rebooted with new address, or it is a new peer. 895 * Accept signature and address. 896 */ 897 n->signature = signature; 898 accept_addr = true; 899 *respond = true; 900 } 901 902 if (!accept_addr) 903 goto exit; 904 905 /* Now create new link if not already existing */ 906 if (!l) { 907 if (n->link_cnt == 2) 908 goto exit; 909 910 if_name = strchr(b->name, ':') + 1; 911 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 912 b->net_plane, b->mtu, b->priority, 913 b->window, mod(tipc_net(net)->random), 914 tipc_own_addr(net), addr, peer_id, 915 n->capabilities, 916 tipc_bc_sndlink(n->net), n->bc_entry.link, 917 &le->inputq, 918 &n->bc_entry.namedq, &l)) { 919 *respond = false; 920 goto exit; 921 } 922 tipc_link_reset(l); 923 tipc_link_fsm_evt(l, LINK_RESET_EVT); 924 if (n->state == NODE_FAILINGOVER) 925 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 926 le->link = l; 927 n->link_cnt++; 928 tipc_node_calculate_timer(n, l); 929 if (n->link_cnt == 1) { 930 intv = jiffies + msecs_to_jiffies(n->keepalive_intv); 931 if (!mod_timer(&n->timer, intv)) 932 tipc_node_get(n); 933 } 934 } 935 memcpy(&le->maddr, maddr, sizeof(*maddr)); 936 exit: 937 tipc_node_write_unlock(n); 938 if (reset && l && !tipc_link_is_reset(l)) 939 tipc_node_link_down(n, b->identity, false); 940 tipc_node_put(n); 941 } 942 943 void tipc_node_delete_links(struct net *net, int bearer_id) 944 { 945 struct tipc_net *tn = net_generic(net, tipc_net_id); 946 struct tipc_node *n; 947 948 rcu_read_lock(); 949 list_for_each_entry_rcu(n, &tn->node_list, list) { 950 tipc_node_link_down(n, bearer_id, true); 951 } 952 rcu_read_unlock(); 953 } 954 955 static void tipc_node_reset_links(struct tipc_node *n) 956 { 957 int i; 958 959 pr_warn("Resetting all links to %x\n", n->addr); 960 961 for (i = 0; i < MAX_BEARERS; i++) { 962 tipc_node_link_down(n, i, false); 963 } 964 } 965 966 /* tipc_node_fsm_evt - node finite state machine 967 * Determines when contact is allowed with peer node 968 */ 969 static void tipc_node_fsm_evt(struct tipc_node *n, int evt) 970 { 971 int state = n->state; 972 973 switch (state) { 974 case SELF_DOWN_PEER_DOWN: 975 switch (evt) { 976 case SELF_ESTABL_CONTACT_EVT: 977 state = SELF_UP_PEER_COMING; 978 break; 979 case PEER_ESTABL_CONTACT_EVT: 980 state = SELF_COMING_PEER_UP; 981 break; 982 case SELF_LOST_CONTACT_EVT: 983 case PEER_LOST_CONTACT_EVT: 984 break; 985 case NODE_SYNCH_END_EVT: 986 case NODE_SYNCH_BEGIN_EVT: 987 case NODE_FAILOVER_BEGIN_EVT: 988 case NODE_FAILOVER_END_EVT: 989 default: 990 goto illegal_evt; 991 } 992 break; 993 case SELF_UP_PEER_UP: 994 switch (evt) { 995 case SELF_LOST_CONTACT_EVT: 996 state = SELF_DOWN_PEER_LEAVING; 997 break; 998 case PEER_LOST_CONTACT_EVT: 999 state = SELF_LEAVING_PEER_DOWN; 1000 break; 1001 case NODE_SYNCH_BEGIN_EVT: 1002 state = NODE_SYNCHING; 1003 break; 1004 case NODE_FAILOVER_BEGIN_EVT: 1005 state = NODE_FAILINGOVER; 1006 break; 1007 case SELF_ESTABL_CONTACT_EVT: 1008 case PEER_ESTABL_CONTACT_EVT: 1009 case NODE_SYNCH_END_EVT: 1010 case NODE_FAILOVER_END_EVT: 1011 break; 1012 default: 1013 goto illegal_evt; 1014 } 1015 break; 1016 case SELF_DOWN_PEER_LEAVING: 1017 switch (evt) { 1018 case PEER_LOST_CONTACT_EVT: 1019 state = SELF_DOWN_PEER_DOWN; 1020 break; 1021 case SELF_ESTABL_CONTACT_EVT: 1022 case PEER_ESTABL_CONTACT_EVT: 1023 case SELF_LOST_CONTACT_EVT: 1024 break; 1025 case NODE_SYNCH_END_EVT: 1026 case NODE_SYNCH_BEGIN_EVT: 1027 case NODE_FAILOVER_BEGIN_EVT: 1028 case NODE_FAILOVER_END_EVT: 1029 default: 1030 goto illegal_evt; 1031 } 1032 break; 1033 case SELF_UP_PEER_COMING: 1034 switch (evt) { 1035 case PEER_ESTABL_CONTACT_EVT: 1036 state = SELF_UP_PEER_UP; 1037 break; 1038 case SELF_LOST_CONTACT_EVT: 1039 state = SELF_DOWN_PEER_DOWN; 1040 break; 1041 case SELF_ESTABL_CONTACT_EVT: 1042 case PEER_LOST_CONTACT_EVT: 1043 case NODE_SYNCH_END_EVT: 1044 case NODE_FAILOVER_BEGIN_EVT: 1045 break; 1046 case NODE_SYNCH_BEGIN_EVT: 1047 case NODE_FAILOVER_END_EVT: 1048 default: 1049 goto illegal_evt; 1050 } 1051 break; 1052 case SELF_COMING_PEER_UP: 1053 switch (evt) { 1054 case SELF_ESTABL_CONTACT_EVT: 1055 state = SELF_UP_PEER_UP; 1056 break; 1057 case PEER_LOST_CONTACT_EVT: 1058 state = SELF_DOWN_PEER_DOWN; 1059 break; 1060 case SELF_LOST_CONTACT_EVT: 1061 case PEER_ESTABL_CONTACT_EVT: 1062 break; 1063 case NODE_SYNCH_END_EVT: 1064 case NODE_SYNCH_BEGIN_EVT: 1065 case NODE_FAILOVER_BEGIN_EVT: 1066 case NODE_FAILOVER_END_EVT: 1067 default: 1068 goto illegal_evt; 1069 } 1070 break; 1071 case SELF_LEAVING_PEER_DOWN: 1072 switch (evt) { 1073 case SELF_LOST_CONTACT_EVT: 1074 state = SELF_DOWN_PEER_DOWN; 1075 break; 1076 case SELF_ESTABL_CONTACT_EVT: 1077 case PEER_ESTABL_CONTACT_EVT: 1078 case PEER_LOST_CONTACT_EVT: 1079 break; 1080 case NODE_SYNCH_END_EVT: 1081 case NODE_SYNCH_BEGIN_EVT: 1082 case NODE_FAILOVER_BEGIN_EVT: 1083 case NODE_FAILOVER_END_EVT: 1084 default: 1085 goto illegal_evt; 1086 } 1087 break; 1088 case NODE_FAILINGOVER: 1089 switch (evt) { 1090 case SELF_LOST_CONTACT_EVT: 1091 state = SELF_DOWN_PEER_LEAVING; 1092 break; 1093 case PEER_LOST_CONTACT_EVT: 1094 state = SELF_LEAVING_PEER_DOWN; 1095 break; 1096 case NODE_FAILOVER_END_EVT: 1097 state = SELF_UP_PEER_UP; 1098 break; 1099 case NODE_FAILOVER_BEGIN_EVT: 1100 case SELF_ESTABL_CONTACT_EVT: 1101 case PEER_ESTABL_CONTACT_EVT: 1102 break; 1103 case NODE_SYNCH_BEGIN_EVT: 1104 case NODE_SYNCH_END_EVT: 1105 default: 1106 goto illegal_evt; 1107 } 1108 break; 1109 case NODE_SYNCHING: 1110 switch (evt) { 1111 case SELF_LOST_CONTACT_EVT: 1112 state = SELF_DOWN_PEER_LEAVING; 1113 break; 1114 case PEER_LOST_CONTACT_EVT: 1115 state = SELF_LEAVING_PEER_DOWN; 1116 break; 1117 case NODE_SYNCH_END_EVT: 1118 state = SELF_UP_PEER_UP; 1119 break; 1120 case NODE_FAILOVER_BEGIN_EVT: 1121 state = NODE_FAILINGOVER; 1122 break; 1123 case NODE_SYNCH_BEGIN_EVT: 1124 case SELF_ESTABL_CONTACT_EVT: 1125 case PEER_ESTABL_CONTACT_EVT: 1126 break; 1127 case NODE_FAILOVER_END_EVT: 1128 default: 1129 goto illegal_evt; 1130 } 1131 break; 1132 default: 1133 pr_err("Unknown node fsm state %x\n", state); 1134 break; 1135 } 1136 n->state = state; 1137 return; 1138 1139 illegal_evt: 1140 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1141 } 1142 1143 static void node_lost_contact(struct tipc_node *n, 1144 struct sk_buff_head *inputq) 1145 { 1146 struct tipc_sock_conn *conn, *safe; 1147 struct tipc_link *l; 1148 struct list_head *conns = &n->conn_sks; 1149 struct sk_buff *skb; 1150 uint i; 1151 1152 pr_debug("Lost contact with %x\n", n->addr); 1153 1154 /* Clean up broadcast state */ 1155 tipc_bcast_remove_peer(n->net, n->bc_entry.link); 1156 1157 /* Abort any ongoing link failover */ 1158 for (i = 0; i < MAX_BEARERS; i++) { 1159 l = n->links[i].link; 1160 if (l) 1161 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); 1162 } 1163 1164 /* Notify publications from this node */ 1165 n->action_flags |= TIPC_NOTIFY_NODE_DOWN; 1166 1167 /* Notify sockets connected to node */ 1168 list_for_each_entry_safe(conn, safe, conns, list) { 1169 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 1170 SHORT_H_SIZE, 0, tipc_own_addr(n->net), 1171 conn->peer_node, conn->port, 1172 conn->peer_port, TIPC_ERR_NO_NODE); 1173 if (likely(skb)) 1174 skb_queue_tail(inputq, skb); 1175 list_del(&conn->list); 1176 kfree(conn); 1177 } 1178 } 1179 1180 /** 1181 * tipc_node_get_linkname - get the name of a link 1182 * 1183 * @bearer_id: id of the bearer 1184 * @node: peer node address 1185 * @linkname: link name output buffer 1186 * 1187 * Returns 0 on success 1188 */ 1189 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, 1190 char *linkname, size_t len) 1191 { 1192 struct tipc_link *link; 1193 int err = -EINVAL; 1194 struct tipc_node *node = tipc_node_find(net, addr); 1195 1196 if (!node) 1197 return err; 1198 1199 if (bearer_id >= MAX_BEARERS) 1200 goto exit; 1201 1202 tipc_node_read_lock(node); 1203 link = node->links[bearer_id].link; 1204 if (link) { 1205 strncpy(linkname, tipc_link_name(link), len); 1206 err = 0; 1207 } 1208 tipc_node_read_unlock(node); 1209 exit: 1210 tipc_node_put(node); 1211 return err; 1212 } 1213 1214 /* Caller should hold node lock for the passed node */ 1215 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1216 { 1217 void *hdr; 1218 struct nlattr *attrs; 1219 1220 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1221 NLM_F_MULTI, TIPC_NL_NODE_GET); 1222 if (!hdr) 1223 return -EMSGSIZE; 1224 1225 attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE); 1226 if (!attrs) 1227 goto msg_full; 1228 1229 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) 1230 goto attr_msg_full; 1231 if (node_is_up(node)) 1232 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) 1233 goto attr_msg_full; 1234 1235 nla_nest_end(msg->skb, attrs); 1236 genlmsg_end(msg->skb, hdr); 1237 1238 return 0; 1239 1240 attr_msg_full: 1241 nla_nest_cancel(msg->skb, attrs); 1242 msg_full: 1243 genlmsg_cancel(msg->skb, hdr); 1244 1245 return -EMSGSIZE; 1246 } 1247 1248 /** 1249 * tipc_node_xmit() is the general link level function for message sending 1250 * @net: the applicable net namespace 1251 * @list: chain of buffers containing message 1252 * @dnode: address of destination node 1253 * @selector: a number used for deterministic link selection 1254 * Consumes the buffer chain. 1255 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF 1256 */ 1257 int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1258 u32 dnode, int selector) 1259 { 1260 struct tipc_link_entry *le = NULL; 1261 struct tipc_node *n; 1262 struct sk_buff_head xmitq; 1263 int bearer_id; 1264 int rc; 1265 1266 if (in_own_node(net, dnode)) { 1267 tipc_sk_rcv(net, list); 1268 return 0; 1269 } 1270 1271 n = tipc_node_find(net, dnode); 1272 if (unlikely(!n)) { 1273 skb_queue_purge(list); 1274 return -EHOSTUNREACH; 1275 } 1276 1277 tipc_node_read_lock(n); 1278 bearer_id = n->active_links[selector & 1]; 1279 if (unlikely(bearer_id == INVALID_BEARER_ID)) { 1280 tipc_node_read_unlock(n); 1281 tipc_node_put(n); 1282 skb_queue_purge(list); 1283 return -EHOSTUNREACH; 1284 } 1285 1286 __skb_queue_head_init(&xmitq); 1287 le = &n->links[bearer_id]; 1288 spin_lock_bh(&le->lock); 1289 rc = tipc_link_xmit(le->link, list, &xmitq); 1290 spin_unlock_bh(&le->lock); 1291 tipc_node_read_unlock(n); 1292 1293 if (unlikely(rc == -ENOBUFS)) 1294 tipc_node_link_down(n, bearer_id, false); 1295 else 1296 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1297 1298 tipc_node_put(n); 1299 1300 return rc; 1301 } 1302 1303 /* tipc_node_xmit_skb(): send single buffer to destination 1304 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE 1305 * messages, which will not be rejected 1306 * The only exception is datagram messages rerouted after secondary 1307 * lookup, which are rare and safe to dispose of anyway. 1308 */ 1309 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 1310 u32 selector) 1311 { 1312 struct sk_buff_head head; 1313 1314 skb_queue_head_init(&head); 1315 __skb_queue_tail(&head, skb); 1316 tipc_node_xmit(net, &head, dnode, selector); 1317 return 0; 1318 } 1319 1320 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations 1321 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected 1322 */ 1323 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) 1324 { 1325 struct sk_buff *skb; 1326 u32 selector, dnode; 1327 1328 while ((skb = __skb_dequeue(xmitq))) { 1329 selector = msg_origport(buf_msg(skb)); 1330 dnode = msg_destnode(buf_msg(skb)); 1331 tipc_node_xmit_skb(net, skb, dnode, selector); 1332 } 1333 return 0; 1334 } 1335 1336 void tipc_node_broadcast(struct net *net, struct sk_buff *skb) 1337 { 1338 struct sk_buff *txskb; 1339 struct tipc_node *n; 1340 u32 dst; 1341 1342 rcu_read_lock(); 1343 list_for_each_entry_rcu(n, tipc_nodes(net), list) { 1344 dst = n->addr; 1345 if (in_own_node(net, dst)) 1346 continue; 1347 if (!node_is_up(n)) 1348 continue; 1349 txskb = pskb_copy(skb, GFP_ATOMIC); 1350 if (!txskb) 1351 break; 1352 msg_set_destnode(buf_msg(txskb), dst); 1353 tipc_node_xmit_skb(net, txskb, dst, 0); 1354 } 1355 rcu_read_unlock(); 1356 1357 kfree_skb(skb); 1358 } 1359 1360 static void tipc_node_mcast_rcv(struct tipc_node *n) 1361 { 1362 struct tipc_bclink_entry *be = &n->bc_entry; 1363 1364 /* 'arrvq' is under inputq2's lock protection */ 1365 spin_lock_bh(&be->inputq2.lock); 1366 spin_lock_bh(&be->inputq1.lock); 1367 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq); 1368 spin_unlock_bh(&be->inputq1.lock); 1369 spin_unlock_bh(&be->inputq2.lock); 1370 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2); 1371 } 1372 1373 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, 1374 int bearer_id, struct sk_buff_head *xmitq) 1375 { 1376 struct tipc_link *ucl; 1377 int rc; 1378 1379 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr); 1380 1381 if (rc & TIPC_LINK_DOWN_EVT) { 1382 tipc_node_reset_links(n); 1383 return; 1384 } 1385 1386 if (!(rc & TIPC_LINK_SND_STATE)) 1387 return; 1388 1389 /* If probe message, a STATE response will be sent anyway */ 1390 if (msg_probe(hdr)) 1391 return; 1392 1393 /* Produce a STATE message carrying broadcast NACK */ 1394 tipc_node_read_lock(n); 1395 ucl = n->links[bearer_id].link; 1396 if (ucl) 1397 tipc_link_build_state_msg(ucl, xmitq); 1398 tipc_node_read_unlock(n); 1399 } 1400 1401 /** 1402 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1403 * @net: the applicable net namespace 1404 * @skb: TIPC packet 1405 * @bearer_id: id of bearer message arrived on 1406 * 1407 * Invoked with no locks held. 1408 */ 1409 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id) 1410 { 1411 int rc; 1412 struct sk_buff_head xmitq; 1413 struct tipc_bclink_entry *be; 1414 struct tipc_link_entry *le; 1415 struct tipc_msg *hdr = buf_msg(skb); 1416 int usr = msg_user(hdr); 1417 u32 dnode = msg_destnode(hdr); 1418 struct tipc_node *n; 1419 1420 __skb_queue_head_init(&xmitq); 1421 1422 /* If NACK for other node, let rcv link for that node peek into it */ 1423 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net))) 1424 n = tipc_node_find(net, dnode); 1425 else 1426 n = tipc_node_find(net, msg_prevnode(hdr)); 1427 if (!n) { 1428 kfree_skb(skb); 1429 return; 1430 } 1431 be = &n->bc_entry; 1432 le = &n->links[bearer_id]; 1433 1434 rc = tipc_bcast_rcv(net, be->link, skb); 1435 1436 /* Broadcast ACKs are sent on a unicast link */ 1437 if (rc & TIPC_LINK_SND_STATE) { 1438 tipc_node_read_lock(n); 1439 tipc_link_build_state_msg(le->link, &xmitq); 1440 tipc_node_read_unlock(n); 1441 } 1442 1443 if (!skb_queue_empty(&xmitq)) 1444 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1445 1446 if (!skb_queue_empty(&be->inputq1)) 1447 tipc_node_mcast_rcv(n); 1448 1449 /* If reassembly or retransmission failure => reset all links to peer */ 1450 if (rc & TIPC_LINK_DOWN_EVT) 1451 tipc_node_reset_links(n); 1452 1453 tipc_node_put(n); 1454 } 1455 1456 /** 1457 * tipc_node_check_state - check and if necessary update node state 1458 * @skb: TIPC packet 1459 * @bearer_id: identity of bearer delivering the packet 1460 * Returns true if state is ok, otherwise consumes buffer and returns false 1461 */ 1462 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, 1463 int bearer_id, struct sk_buff_head *xmitq) 1464 { 1465 struct tipc_msg *hdr = buf_msg(skb); 1466 int usr = msg_user(hdr); 1467 int mtyp = msg_type(hdr); 1468 u16 oseqno = msg_seqno(hdr); 1469 u16 iseqno = msg_seqno(msg_get_wrapped(hdr)); 1470 u16 exp_pkts = msg_msgcnt(hdr); 1471 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len; 1472 int state = n->state; 1473 struct tipc_link *l, *tnl, *pl = NULL; 1474 struct tipc_media_addr *maddr; 1475 int pb_id; 1476 1477 l = n->links[bearer_id].link; 1478 if (!l) 1479 return false; 1480 rcv_nxt = tipc_link_rcv_nxt(l); 1481 1482 1483 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1484 return true; 1485 1486 /* Find parallel link, if any */ 1487 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) { 1488 if ((pb_id != bearer_id) && n->links[pb_id].link) { 1489 pl = n->links[pb_id].link; 1490 break; 1491 } 1492 } 1493 1494 /* Check and update node accesibility if applicable */ 1495 if (state == SELF_UP_PEER_COMING) { 1496 if (!tipc_link_is_up(l)) 1497 return true; 1498 if (!msg_peer_link_is_up(hdr)) 1499 return true; 1500 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT); 1501 } 1502 1503 if (state == SELF_DOWN_PEER_LEAVING) { 1504 if (msg_peer_node_is_up(hdr)) 1505 return false; 1506 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1507 return true; 1508 } 1509 1510 if (state == SELF_LEAVING_PEER_DOWN) 1511 return false; 1512 1513 /* Ignore duplicate packets */ 1514 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1515 return true; 1516 1517 /* Initiate or update failover mode if applicable */ 1518 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1519 syncpt = oseqno + exp_pkts - 1; 1520 if (pl && tipc_link_is_up(pl)) { 1521 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1522 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 1523 tipc_link_inputq(l)); 1524 } 1525 /* If pkts arrive out of order, use lowest calculated syncpt */ 1526 if (less(syncpt, n->sync_point)) 1527 n->sync_point = syncpt; 1528 } 1529 1530 /* Open parallel link when tunnel link reaches synch point */ 1531 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { 1532 if (!more(rcv_nxt, n->sync_point)) 1533 return true; 1534 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); 1535 if (pl) 1536 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); 1537 return true; 1538 } 1539 1540 /* No synching needed if only one link */ 1541 if (!pl || !tipc_link_is_up(pl)) 1542 return true; 1543 1544 /* Initiate synch mode if applicable */ 1545 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 1546 syncpt = iseqno + exp_pkts - 1; 1547 if (!tipc_link_is_up(l)) 1548 __tipc_node_link_up(n, bearer_id, xmitq); 1549 if (n->state == SELF_UP_PEER_UP) { 1550 n->sync_point = syncpt; 1551 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 1552 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 1553 } 1554 } 1555 1556 /* Open tunnel link when parallel link reaches synch point */ 1557 if (n->state == NODE_SYNCHING) { 1558 if (tipc_link_is_synching(l)) { 1559 tnl = l; 1560 } else { 1561 tnl = pl; 1562 pl = l; 1563 } 1564 inputq_len = skb_queue_len(tipc_link_inputq(pl)); 1565 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len; 1566 if (more(dlv_nxt, n->sync_point)) { 1567 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 1568 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 1569 return true; 1570 } 1571 if (l == pl) 1572 return true; 1573 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) 1574 return true; 1575 if (usr == LINK_PROTOCOL) 1576 return true; 1577 return false; 1578 } 1579 return true; 1580 } 1581 1582 /** 1583 * tipc_rcv - process TIPC packets/messages arriving from off-node 1584 * @net: the applicable net namespace 1585 * @skb: TIPC packet 1586 * @bearer: pointer to bearer message arrived on 1587 * 1588 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1589 * structure (i.e. cannot be NULL), but bearer can be inactive. 1590 */ 1591 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) 1592 { 1593 struct sk_buff_head xmitq; 1594 struct tipc_node *n; 1595 struct tipc_msg *hdr; 1596 int bearer_id = b->identity; 1597 struct tipc_link_entry *le; 1598 u32 self = tipc_own_addr(net); 1599 int usr, rc = 0; 1600 u16 bc_ack; 1601 1602 __skb_queue_head_init(&xmitq); 1603 1604 /* Ensure message is well-formed before touching the header */ 1605 if (unlikely(!tipc_msg_validate(&skb))) 1606 goto discard; 1607 hdr = buf_msg(skb); 1608 usr = msg_user(hdr); 1609 bc_ack = msg_bcast_ack(hdr); 1610 1611 /* Handle arrival of discovery or broadcast packet */ 1612 if (unlikely(msg_non_seq(hdr))) { 1613 if (unlikely(usr == LINK_CONFIG)) 1614 return tipc_disc_rcv(net, skb, b); 1615 else 1616 return tipc_node_bc_rcv(net, skb, bearer_id); 1617 } 1618 1619 /* Discard unicast link messages destined for another node */ 1620 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self))) 1621 goto discard; 1622 1623 /* Locate neighboring node that sent packet */ 1624 n = tipc_node_find(net, msg_prevnode(hdr)); 1625 if (unlikely(!n)) 1626 goto discard; 1627 le = &n->links[bearer_id]; 1628 1629 /* Ensure broadcast reception is in synch with peer's send state */ 1630 if (unlikely(usr == LINK_PROTOCOL)) 1631 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); 1632 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) 1633 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr); 1634 1635 /* Receive packet directly if conditions permit */ 1636 tipc_node_read_lock(n); 1637 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) { 1638 spin_lock_bh(&le->lock); 1639 if (le->link) { 1640 rc = tipc_link_rcv(le->link, skb, &xmitq); 1641 skb = NULL; 1642 } 1643 spin_unlock_bh(&le->lock); 1644 } 1645 tipc_node_read_unlock(n); 1646 1647 /* Check/update node state before receiving */ 1648 if (unlikely(skb)) { 1649 if (unlikely(skb_linearize(skb))) 1650 goto discard; 1651 tipc_node_write_lock(n); 1652 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { 1653 if (le->link) { 1654 rc = tipc_link_rcv(le->link, skb, &xmitq); 1655 skb = NULL; 1656 } 1657 } 1658 tipc_node_write_unlock(n); 1659 } 1660 1661 if (unlikely(rc & TIPC_LINK_UP_EVT)) 1662 tipc_node_link_up(n, bearer_id, &xmitq); 1663 1664 if (unlikely(rc & TIPC_LINK_DOWN_EVT)) 1665 tipc_node_link_down(n, bearer_id, false); 1666 1667 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) 1668 tipc_named_rcv(net, &n->bc_entry.namedq); 1669 1670 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) 1671 tipc_node_mcast_rcv(n); 1672 1673 if (!skb_queue_empty(&le->inputq)) 1674 tipc_sk_rcv(net, &le->inputq); 1675 1676 if (!skb_queue_empty(&xmitq)) 1677 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr); 1678 1679 tipc_node_put(n); 1680 discard: 1681 kfree_skb(skb); 1682 } 1683 1684 void tipc_node_apply_tolerance(struct net *net, struct tipc_bearer *b) 1685 { 1686 struct tipc_net *tn = tipc_net(net); 1687 int bearer_id = b->identity; 1688 struct sk_buff_head xmitq; 1689 struct tipc_link_entry *e; 1690 struct tipc_node *n; 1691 1692 __skb_queue_head_init(&xmitq); 1693 1694 rcu_read_lock(); 1695 1696 list_for_each_entry_rcu(n, &tn->node_list, list) { 1697 tipc_node_write_lock(n); 1698 e = &n->links[bearer_id]; 1699 if (e->link) 1700 tipc_link_set_tolerance(e->link, b->tolerance, &xmitq); 1701 tipc_node_write_unlock(n); 1702 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr); 1703 } 1704 1705 rcu_read_unlock(); 1706 } 1707 1708 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) 1709 { 1710 struct net *net = sock_net(skb->sk); 1711 struct tipc_net *tn = net_generic(net, tipc_net_id); 1712 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; 1713 struct tipc_node *peer; 1714 u32 addr; 1715 int err; 1716 int i; 1717 1718 /* We identify the peer by its net */ 1719 if (!info->attrs[TIPC_NLA_NET]) 1720 return -EINVAL; 1721 1722 err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX, 1723 info->attrs[TIPC_NLA_NET], tipc_nl_net_policy, 1724 info->extack); 1725 if (err) 1726 return err; 1727 1728 if (!attrs[TIPC_NLA_NET_ADDR]) 1729 return -EINVAL; 1730 1731 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); 1732 1733 if (in_own_node(net, addr)) 1734 return -ENOTSUPP; 1735 1736 spin_lock_bh(&tn->node_list_lock); 1737 peer = tipc_node_find(net, addr); 1738 if (!peer) { 1739 spin_unlock_bh(&tn->node_list_lock); 1740 return -ENXIO; 1741 } 1742 1743 tipc_node_write_lock(peer); 1744 if (peer->state != SELF_DOWN_PEER_DOWN && 1745 peer->state != SELF_DOWN_PEER_LEAVING) { 1746 tipc_node_write_unlock(peer); 1747 err = -EBUSY; 1748 goto err_out; 1749 } 1750 1751 for (i = 0; i < MAX_BEARERS; i++) { 1752 struct tipc_link_entry *le = &peer->links[i]; 1753 1754 if (le->link) { 1755 kfree(le->link); 1756 le->link = NULL; 1757 peer->link_cnt--; 1758 } 1759 } 1760 tipc_node_write_unlock(peer); 1761 tipc_node_delete(peer); 1762 1763 err = 0; 1764 err_out: 1765 tipc_node_put(peer); 1766 spin_unlock_bh(&tn->node_list_lock); 1767 1768 return err; 1769 } 1770 1771 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 1772 { 1773 int err; 1774 struct net *net = sock_net(skb->sk); 1775 struct tipc_net *tn = net_generic(net, tipc_net_id); 1776 int done = cb->args[0]; 1777 int last_addr = cb->args[1]; 1778 struct tipc_node *node; 1779 struct tipc_nl_msg msg; 1780 1781 if (done) 1782 return 0; 1783 1784 msg.skb = skb; 1785 msg.portid = NETLINK_CB(cb->skb).portid; 1786 msg.seq = cb->nlh->nlmsg_seq; 1787 1788 rcu_read_lock(); 1789 if (last_addr) { 1790 node = tipc_node_find(net, last_addr); 1791 if (!node) { 1792 rcu_read_unlock(); 1793 /* We never set seq or call nl_dump_check_consistent() 1794 * this means that setting prev_seq here will cause the 1795 * consistence check to fail in the netlink callback 1796 * handler. Resulting in the NLMSG_DONE message having 1797 * the NLM_F_DUMP_INTR flag set if the node state 1798 * changed while we released the lock. 1799 */ 1800 cb->prev_seq = 1; 1801 return -EPIPE; 1802 } 1803 tipc_node_put(node); 1804 } 1805 1806 list_for_each_entry_rcu(node, &tn->node_list, list) { 1807 if (last_addr) { 1808 if (node->addr == last_addr) 1809 last_addr = 0; 1810 else 1811 continue; 1812 } 1813 1814 tipc_node_read_lock(node); 1815 err = __tipc_nl_add_node(&msg, node); 1816 if (err) { 1817 last_addr = node->addr; 1818 tipc_node_read_unlock(node); 1819 goto out; 1820 } 1821 1822 tipc_node_read_unlock(node); 1823 } 1824 done = 1; 1825 out: 1826 cb->args[0] = done; 1827 cb->args[1] = last_addr; 1828 rcu_read_unlock(); 1829 1830 return skb->len; 1831 } 1832 1833 /* tipc_node_find_by_name - locate owner node of link by link's name 1834 * @net: the applicable net namespace 1835 * @name: pointer to link name string 1836 * @bearer_id: pointer to index in 'node->links' array where the link was found. 1837 * 1838 * Returns pointer to node owning the link, or 0 if no matching link is found. 1839 */ 1840 static struct tipc_node *tipc_node_find_by_name(struct net *net, 1841 const char *link_name, 1842 unsigned int *bearer_id) 1843 { 1844 struct tipc_net *tn = net_generic(net, tipc_net_id); 1845 struct tipc_link *l; 1846 struct tipc_node *n; 1847 struct tipc_node *found_node = NULL; 1848 int i; 1849 1850 *bearer_id = 0; 1851 rcu_read_lock(); 1852 list_for_each_entry_rcu(n, &tn->node_list, list) { 1853 tipc_node_read_lock(n); 1854 for (i = 0; i < MAX_BEARERS; i++) { 1855 l = n->links[i].link; 1856 if (l && !strcmp(tipc_link_name(l), link_name)) { 1857 *bearer_id = i; 1858 found_node = n; 1859 break; 1860 } 1861 } 1862 tipc_node_read_unlock(n); 1863 if (found_node) 1864 break; 1865 } 1866 rcu_read_unlock(); 1867 1868 return found_node; 1869 } 1870 1871 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) 1872 { 1873 int err; 1874 int res = 0; 1875 int bearer_id; 1876 char *name; 1877 struct tipc_link *link; 1878 struct tipc_node *node; 1879 struct sk_buff_head xmitq; 1880 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 1881 struct net *net = sock_net(skb->sk); 1882 1883 __skb_queue_head_init(&xmitq); 1884 1885 if (!info->attrs[TIPC_NLA_LINK]) 1886 return -EINVAL; 1887 1888 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 1889 info->attrs[TIPC_NLA_LINK], 1890 tipc_nl_link_policy, info->extack); 1891 if (err) 1892 return err; 1893 1894 if (!attrs[TIPC_NLA_LINK_NAME]) 1895 return -EINVAL; 1896 1897 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 1898 1899 if (strcmp(name, tipc_bclink_name) == 0) 1900 return tipc_nl_bc_link_set(net, attrs); 1901 1902 node = tipc_node_find_by_name(net, name, &bearer_id); 1903 if (!node) 1904 return -EINVAL; 1905 1906 tipc_node_read_lock(node); 1907 1908 link = node->links[bearer_id].link; 1909 if (!link) { 1910 res = -EINVAL; 1911 goto out; 1912 } 1913 1914 if (attrs[TIPC_NLA_LINK_PROP]) { 1915 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 1916 1917 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], 1918 props); 1919 if (err) { 1920 res = err; 1921 goto out; 1922 } 1923 1924 if (props[TIPC_NLA_PROP_TOL]) { 1925 u32 tol; 1926 1927 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1928 tipc_link_set_tolerance(link, tol, &xmitq); 1929 } 1930 if (props[TIPC_NLA_PROP_PRIO]) { 1931 u32 prio; 1932 1933 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 1934 tipc_link_set_prio(link, prio, &xmitq); 1935 } 1936 if (props[TIPC_NLA_PROP_WIN]) { 1937 u32 win; 1938 1939 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1940 tipc_link_set_queue_limits(link, win); 1941 } 1942 } 1943 1944 out: 1945 tipc_node_read_unlock(node); 1946 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr); 1947 return res; 1948 } 1949 1950 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) 1951 { 1952 struct net *net = genl_info_net(info); 1953 struct tipc_nl_msg msg; 1954 char *name; 1955 int err; 1956 1957 msg.portid = info->snd_portid; 1958 msg.seq = info->snd_seq; 1959 1960 if (!info->attrs[TIPC_NLA_LINK_NAME]) 1961 return -EINVAL; 1962 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); 1963 1964 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1965 if (!msg.skb) 1966 return -ENOMEM; 1967 1968 if (strcmp(name, tipc_bclink_name) == 0) { 1969 err = tipc_nl_add_bc_link(net, &msg); 1970 if (err) 1971 goto err_free; 1972 } else { 1973 int bearer_id; 1974 struct tipc_node *node; 1975 struct tipc_link *link; 1976 1977 node = tipc_node_find_by_name(net, name, &bearer_id); 1978 if (!node) { 1979 err = -EINVAL; 1980 goto err_free; 1981 } 1982 1983 tipc_node_read_lock(node); 1984 link = node->links[bearer_id].link; 1985 if (!link) { 1986 tipc_node_read_unlock(node); 1987 err = -EINVAL; 1988 goto err_free; 1989 } 1990 1991 err = __tipc_nl_add_link(net, &msg, link, 0); 1992 tipc_node_read_unlock(node); 1993 if (err) 1994 goto err_free; 1995 } 1996 1997 return genlmsg_reply(msg.skb, info); 1998 1999 err_free: 2000 nlmsg_free(msg.skb); 2001 return err; 2002 } 2003 2004 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) 2005 { 2006 int err; 2007 char *link_name; 2008 unsigned int bearer_id; 2009 struct tipc_link *link; 2010 struct tipc_node *node; 2011 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2012 struct net *net = sock_net(skb->sk); 2013 struct tipc_link_entry *le; 2014 2015 if (!info->attrs[TIPC_NLA_LINK]) 2016 return -EINVAL; 2017 2018 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 2019 info->attrs[TIPC_NLA_LINK], 2020 tipc_nl_link_policy, info->extack); 2021 if (err) 2022 return err; 2023 2024 if (!attrs[TIPC_NLA_LINK_NAME]) 2025 return -EINVAL; 2026 2027 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2028 2029 if (strcmp(link_name, tipc_bclink_name) == 0) { 2030 err = tipc_bclink_reset_stats(net); 2031 if (err) 2032 return err; 2033 return 0; 2034 } 2035 2036 node = tipc_node_find_by_name(net, link_name, &bearer_id); 2037 if (!node) 2038 return -EINVAL; 2039 2040 le = &node->links[bearer_id]; 2041 tipc_node_read_lock(node); 2042 spin_lock_bh(&le->lock); 2043 link = node->links[bearer_id].link; 2044 if (!link) { 2045 spin_unlock_bh(&le->lock); 2046 tipc_node_read_unlock(node); 2047 return -EINVAL; 2048 } 2049 tipc_link_reset_stats(link); 2050 spin_unlock_bh(&le->lock); 2051 tipc_node_read_unlock(node); 2052 return 0; 2053 } 2054 2055 /* Caller should hold node lock */ 2056 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 2057 struct tipc_node *node, u32 *prev_link) 2058 { 2059 u32 i; 2060 int err; 2061 2062 for (i = *prev_link; i < MAX_BEARERS; i++) { 2063 *prev_link = i; 2064 2065 if (!node->links[i].link) 2066 continue; 2067 2068 err = __tipc_nl_add_link(net, msg, 2069 node->links[i].link, NLM_F_MULTI); 2070 if (err) 2071 return err; 2072 } 2073 *prev_link = 0; 2074 2075 return 0; 2076 } 2077 2078 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb) 2079 { 2080 struct net *net = sock_net(skb->sk); 2081 struct tipc_net *tn = net_generic(net, tipc_net_id); 2082 struct tipc_node *node; 2083 struct tipc_nl_msg msg; 2084 u32 prev_node = cb->args[0]; 2085 u32 prev_link = cb->args[1]; 2086 int done = cb->args[2]; 2087 int err; 2088 2089 if (done) 2090 return 0; 2091 2092 msg.skb = skb; 2093 msg.portid = NETLINK_CB(cb->skb).portid; 2094 msg.seq = cb->nlh->nlmsg_seq; 2095 2096 rcu_read_lock(); 2097 if (prev_node) { 2098 node = tipc_node_find(net, prev_node); 2099 if (!node) { 2100 /* We never set seq or call nl_dump_check_consistent() 2101 * this means that setting prev_seq here will cause the 2102 * consistence check to fail in the netlink callback 2103 * handler. Resulting in the last NLMSG_DONE message 2104 * having the NLM_F_DUMP_INTR flag set. 2105 */ 2106 cb->prev_seq = 1; 2107 goto out; 2108 } 2109 tipc_node_put(node); 2110 2111 list_for_each_entry_continue_rcu(node, &tn->node_list, 2112 list) { 2113 tipc_node_read_lock(node); 2114 err = __tipc_nl_add_node_links(net, &msg, node, 2115 &prev_link); 2116 tipc_node_read_unlock(node); 2117 if (err) 2118 goto out; 2119 2120 prev_node = node->addr; 2121 } 2122 } else { 2123 err = tipc_nl_add_bc_link(net, &msg); 2124 if (err) 2125 goto out; 2126 2127 list_for_each_entry_rcu(node, &tn->node_list, list) { 2128 tipc_node_read_lock(node); 2129 err = __tipc_nl_add_node_links(net, &msg, node, 2130 &prev_link); 2131 tipc_node_read_unlock(node); 2132 if (err) 2133 goto out; 2134 2135 prev_node = node->addr; 2136 } 2137 } 2138 done = 1; 2139 out: 2140 rcu_read_unlock(); 2141 2142 cb->args[0] = prev_node; 2143 cb->args[1] = prev_link; 2144 cb->args[2] = done; 2145 2146 return skb->len; 2147 } 2148 2149 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info) 2150 { 2151 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1]; 2152 struct net *net = sock_net(skb->sk); 2153 int err; 2154 2155 if (!info->attrs[TIPC_NLA_MON]) 2156 return -EINVAL; 2157 2158 err = nla_parse_nested(attrs, TIPC_NLA_MON_MAX, 2159 info->attrs[TIPC_NLA_MON], 2160 tipc_nl_monitor_policy, info->extack); 2161 if (err) 2162 return err; 2163 2164 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) { 2165 u32 val; 2166 2167 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]); 2168 err = tipc_nl_monitor_set_threshold(net, val); 2169 if (err) 2170 return err; 2171 } 2172 2173 return 0; 2174 } 2175 2176 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg) 2177 { 2178 struct nlattr *attrs; 2179 void *hdr; 2180 u32 val; 2181 2182 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 2183 0, TIPC_NL_MON_GET); 2184 if (!hdr) 2185 return -EMSGSIZE; 2186 2187 attrs = nla_nest_start(msg->skb, TIPC_NLA_MON); 2188 if (!attrs) 2189 goto msg_full; 2190 2191 val = tipc_nl_monitor_get_threshold(net); 2192 2193 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val)) 2194 goto attr_msg_full; 2195 2196 nla_nest_end(msg->skb, attrs); 2197 genlmsg_end(msg->skb, hdr); 2198 2199 return 0; 2200 2201 attr_msg_full: 2202 nla_nest_cancel(msg->skb, attrs); 2203 msg_full: 2204 genlmsg_cancel(msg->skb, hdr); 2205 2206 return -EMSGSIZE; 2207 } 2208 2209 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info) 2210 { 2211 struct net *net = sock_net(skb->sk); 2212 struct tipc_nl_msg msg; 2213 int err; 2214 2215 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2216 if (!msg.skb) 2217 return -ENOMEM; 2218 msg.portid = info->snd_portid; 2219 msg.seq = info->snd_seq; 2220 2221 err = __tipc_nl_add_monitor_prop(net, &msg); 2222 if (err) { 2223 nlmsg_free(msg.skb); 2224 return err; 2225 } 2226 2227 return genlmsg_reply(msg.skb, info); 2228 } 2229 2230 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) 2231 { 2232 struct net *net = sock_net(skb->sk); 2233 u32 prev_bearer = cb->args[0]; 2234 struct tipc_nl_msg msg; 2235 int err; 2236 int i; 2237 2238 if (prev_bearer == MAX_BEARERS) 2239 return 0; 2240 2241 msg.skb = skb; 2242 msg.portid = NETLINK_CB(cb->skb).portid; 2243 msg.seq = cb->nlh->nlmsg_seq; 2244 2245 rtnl_lock(); 2246 for (i = prev_bearer; i < MAX_BEARERS; i++) { 2247 prev_bearer = i; 2248 err = __tipc_nl_add_monitor(net, &msg, prev_bearer); 2249 if (err) 2250 goto out; 2251 } 2252 2253 out: 2254 rtnl_unlock(); 2255 cb->args[0] = prev_bearer; 2256 2257 return skb->len; 2258 } 2259 2260 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb, 2261 struct netlink_callback *cb) 2262 { 2263 struct net *net = sock_net(skb->sk); 2264 u32 prev_node = cb->args[1]; 2265 u32 bearer_id = cb->args[2]; 2266 int done = cb->args[0]; 2267 struct tipc_nl_msg msg; 2268 int err; 2269 2270 if (!prev_node) { 2271 struct nlattr **attrs; 2272 struct nlattr *mon[TIPC_NLA_MON_MAX + 1]; 2273 2274 err = tipc_nlmsg_parse(cb->nlh, &attrs); 2275 if (err) 2276 return err; 2277 2278 if (!attrs[TIPC_NLA_MON]) 2279 return -EINVAL; 2280 2281 err = nla_parse_nested(mon, TIPC_NLA_MON_MAX, 2282 attrs[TIPC_NLA_MON], 2283 tipc_nl_monitor_policy, NULL); 2284 if (err) 2285 return err; 2286 2287 if (!mon[TIPC_NLA_MON_REF]) 2288 return -EINVAL; 2289 2290 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]); 2291 2292 if (bearer_id >= MAX_BEARERS) 2293 return -EINVAL; 2294 } 2295 2296 if (done) 2297 return 0; 2298 2299 msg.skb = skb; 2300 msg.portid = NETLINK_CB(cb->skb).portid; 2301 msg.seq = cb->nlh->nlmsg_seq; 2302 2303 rtnl_lock(); 2304 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node); 2305 if (!err) 2306 done = 1; 2307 2308 rtnl_unlock(); 2309 cb->args[0] = done; 2310 cb->args[1] = prev_node; 2311 cb->args[2] = bearer_id; 2312 2313 return skb->len; 2314 } 2315