1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "subscr.h" 39 #include "link.h" 40 #include "bcast.h" 41 #include "socket.h" 42 #include "name_distr.h" 43 #include "discover.h" 44 #include "netlink.h" 45 46 #include <linux/pkt_sched.h> 47 48 /* 49 * Error message prefixes 50 */ 51 static const char *link_co_err = "Link tunneling error, "; 52 static const char *link_rst_msg = "Resetting link "; 53 54 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { 55 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC }, 56 [TIPC_NLA_LINK_NAME] = { 57 .type = NLA_STRING, 58 .len = TIPC_MAX_LINK_NAME 59 }, 60 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 }, 61 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG }, 62 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG }, 63 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG }, 64 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED }, 65 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED }, 66 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 }, 67 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 } 68 }; 69 70 /* Properties valid for media, bearar and link */ 71 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { 72 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC }, 73 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 }, 74 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 }, 75 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 } 76 }; 77 78 /* 79 * Interval between NACKs when packets arrive out of order 80 */ 81 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2) 82 /* 83 * Out-of-range value for link session numbers 84 */ 85 #define WILDCARD_SESSION 0x10000 86 87 /* Link FSM states: 88 */ 89 enum { 90 LINK_ESTABLISHED = 0xe, 91 LINK_ESTABLISHING = 0xe << 4, 92 LINK_RESET = 0x1 << 8, 93 LINK_RESETTING = 0x2 << 12, 94 LINK_PEER_RESET = 0xd << 16, 95 LINK_FAILINGOVER = 0xf << 20, 96 LINK_SYNCHING = 0xc << 24 97 }; 98 99 /* Link FSM state checking routines 100 */ 101 static int link_is_up(struct tipc_link *l) 102 { 103 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); 104 } 105 106 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 107 struct sk_buff_head *xmitq); 108 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 109 u16 rcvgap, int tolerance, int priority, 110 struct sk_buff_head *xmitq); 111 static void link_reset_statistics(struct tipc_link *l_ptr); 112 static void link_print(struct tipc_link *l_ptr, const char *str); 113 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); 114 115 /* 116 * Simple non-static link routines (i.e. referenced outside this file) 117 */ 118 bool tipc_link_is_up(struct tipc_link *l) 119 { 120 return link_is_up(l); 121 } 122 123 bool tipc_link_is_reset(struct tipc_link *l) 124 { 125 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING); 126 } 127 128 bool tipc_link_is_synching(struct tipc_link *l) 129 { 130 return l->state == LINK_SYNCHING; 131 } 132 133 bool tipc_link_is_failingover(struct tipc_link *l) 134 { 135 return l->state == LINK_FAILINGOVER; 136 } 137 138 bool tipc_link_is_blocked(struct tipc_link *l) 139 { 140 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER); 141 } 142 143 int tipc_link_is_active(struct tipc_link *l) 144 { 145 struct tipc_node *n = l->owner; 146 147 return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l); 148 } 149 150 static u32 link_own_addr(struct tipc_link *l) 151 { 152 return msg_prevnode(l->pmsg); 153 } 154 155 /** 156 * tipc_link_create - create a new link 157 * @n: pointer to associated node 158 * @b: pointer to associated bearer 159 * @ownnode: identity of own node 160 * @peer: identity of peer node 161 * @maddr: media address to be used 162 * @inputq: queue to put messages ready for delivery 163 * @namedq: queue to put binding table update messages ready for delivery 164 * @link: return value, pointer to put the created link 165 * 166 * Returns true if link was created, otherwise false 167 */ 168 bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session, 169 u32 ownnode, u32 peer, struct tipc_media_addr *maddr, 170 struct sk_buff_head *inputq, struct sk_buff_head *namedq, 171 struct tipc_link **link) 172 { 173 struct tipc_link *l; 174 struct tipc_msg *hdr; 175 char *if_name; 176 177 l = kzalloc(sizeof(*l), GFP_ATOMIC); 178 if (!l) 179 return false; 180 *link = l; 181 182 /* Note: peer i/f name is completed by reset/activate message */ 183 if_name = strchr(b->name, ':') + 1; 184 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 185 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode), 186 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 187 188 l->addr = peer; 189 l->media_addr = maddr; 190 l->owner = n; 191 l->peer_session = WILDCARD_SESSION; 192 l->bearer_id = b->identity; 193 l->tolerance = b->tolerance; 194 l->net_plane = b->net_plane; 195 l->advertised_mtu = b->mtu; 196 l->mtu = b->mtu; 197 l->priority = b->priority; 198 tipc_link_set_queue_limits(l, b->window); 199 l->inputq = inputq; 200 l->namedq = namedq; 201 l->state = LINK_RESETTING; 202 l->pmsg = (struct tipc_msg *)&l->proto_msg; 203 hdr = l->pmsg; 204 tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer); 205 msg_set_size(hdr, sizeof(l->proto_msg)); 206 msg_set_session(hdr, session); 207 msg_set_bearer_id(hdr, l->bearer_id); 208 strcpy((char *)msg_data(hdr), if_name); 209 __skb_queue_head_init(&l->transmq); 210 __skb_queue_head_init(&l->backlogq); 211 __skb_queue_head_init(&l->deferdq); 212 skb_queue_head_init(&l->wakeupq); 213 skb_queue_head_init(l->inputq); 214 return true; 215 } 216 217 /* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints. 218 * 219 * Give a newly added peer node the sequence number where it should 220 * start receiving and acking broadcast packets. 221 */ 222 void tipc_link_build_bcast_sync_msg(struct tipc_link *l, 223 struct sk_buff_head *xmitq) 224 { 225 struct sk_buff *skb; 226 struct sk_buff_head list; 227 u16 last_sent; 228 229 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, 230 0, l->addr, link_own_addr(l), 0, 0, 0); 231 if (!skb) 232 return; 233 last_sent = tipc_bclink_get_last_sent(l->owner->net); 234 msg_set_last_bcast(buf_msg(skb), last_sent); 235 __skb_queue_head_init(&list); 236 __skb_queue_tail(&list, skb); 237 tipc_link_xmit(l, &list, xmitq); 238 } 239 240 /** 241 * tipc_link_fsm_evt - link finite state machine 242 * @l: pointer to link 243 * @evt: state machine event to be processed 244 */ 245 int tipc_link_fsm_evt(struct tipc_link *l, int evt) 246 { 247 int rc = 0; 248 249 switch (l->state) { 250 case LINK_RESETTING: 251 switch (evt) { 252 case LINK_PEER_RESET_EVT: 253 l->state = LINK_PEER_RESET; 254 break; 255 case LINK_RESET_EVT: 256 l->state = LINK_RESET; 257 break; 258 case LINK_FAILURE_EVT: 259 case LINK_FAILOVER_BEGIN_EVT: 260 case LINK_ESTABLISH_EVT: 261 case LINK_FAILOVER_END_EVT: 262 case LINK_SYNCH_BEGIN_EVT: 263 case LINK_SYNCH_END_EVT: 264 default: 265 goto illegal_evt; 266 } 267 break; 268 case LINK_RESET: 269 switch (evt) { 270 case LINK_PEER_RESET_EVT: 271 l->state = LINK_ESTABLISHING; 272 break; 273 case LINK_FAILOVER_BEGIN_EVT: 274 l->state = LINK_FAILINGOVER; 275 case LINK_FAILURE_EVT: 276 case LINK_RESET_EVT: 277 case LINK_ESTABLISH_EVT: 278 case LINK_FAILOVER_END_EVT: 279 break; 280 case LINK_SYNCH_BEGIN_EVT: 281 case LINK_SYNCH_END_EVT: 282 default: 283 goto illegal_evt; 284 } 285 break; 286 case LINK_PEER_RESET: 287 switch (evt) { 288 case LINK_RESET_EVT: 289 l->state = LINK_ESTABLISHING; 290 break; 291 case LINK_PEER_RESET_EVT: 292 case LINK_ESTABLISH_EVT: 293 case LINK_FAILURE_EVT: 294 break; 295 case LINK_SYNCH_BEGIN_EVT: 296 case LINK_SYNCH_END_EVT: 297 case LINK_FAILOVER_BEGIN_EVT: 298 case LINK_FAILOVER_END_EVT: 299 default: 300 goto illegal_evt; 301 } 302 break; 303 case LINK_FAILINGOVER: 304 switch (evt) { 305 case LINK_FAILOVER_END_EVT: 306 l->state = LINK_RESET; 307 break; 308 case LINK_PEER_RESET_EVT: 309 case LINK_RESET_EVT: 310 case LINK_ESTABLISH_EVT: 311 case LINK_FAILURE_EVT: 312 break; 313 case LINK_FAILOVER_BEGIN_EVT: 314 case LINK_SYNCH_BEGIN_EVT: 315 case LINK_SYNCH_END_EVT: 316 default: 317 goto illegal_evt; 318 } 319 break; 320 case LINK_ESTABLISHING: 321 switch (evt) { 322 case LINK_ESTABLISH_EVT: 323 l->state = LINK_ESTABLISHED; 324 rc |= TIPC_LINK_UP_EVT; 325 break; 326 case LINK_FAILOVER_BEGIN_EVT: 327 l->state = LINK_FAILINGOVER; 328 break; 329 case LINK_PEER_RESET_EVT: 330 case LINK_RESET_EVT: 331 case LINK_FAILURE_EVT: 332 case LINK_SYNCH_BEGIN_EVT: 333 case LINK_FAILOVER_END_EVT: 334 break; 335 case LINK_SYNCH_END_EVT: 336 default: 337 goto illegal_evt; 338 } 339 break; 340 case LINK_ESTABLISHED: 341 switch (evt) { 342 case LINK_PEER_RESET_EVT: 343 l->state = LINK_PEER_RESET; 344 rc |= TIPC_LINK_DOWN_EVT; 345 break; 346 case LINK_FAILURE_EVT: 347 l->state = LINK_RESETTING; 348 rc |= TIPC_LINK_DOWN_EVT; 349 break; 350 case LINK_RESET_EVT: 351 l->state = LINK_RESET; 352 break; 353 case LINK_ESTABLISH_EVT: 354 break; 355 case LINK_SYNCH_BEGIN_EVT: 356 l->state = LINK_SYNCHING; 357 break; 358 case LINK_SYNCH_END_EVT: 359 case LINK_FAILOVER_BEGIN_EVT: 360 case LINK_FAILOVER_END_EVT: 361 default: 362 goto illegal_evt; 363 } 364 break; 365 case LINK_SYNCHING: 366 switch (evt) { 367 case LINK_PEER_RESET_EVT: 368 l->state = LINK_PEER_RESET; 369 rc |= TIPC_LINK_DOWN_EVT; 370 break; 371 case LINK_FAILURE_EVT: 372 l->state = LINK_RESETTING; 373 rc |= TIPC_LINK_DOWN_EVT; 374 break; 375 case LINK_RESET_EVT: 376 l->state = LINK_RESET; 377 break; 378 case LINK_ESTABLISH_EVT: 379 case LINK_SYNCH_BEGIN_EVT: 380 break; 381 case LINK_SYNCH_END_EVT: 382 l->state = LINK_ESTABLISHED; 383 break; 384 case LINK_FAILOVER_BEGIN_EVT: 385 case LINK_FAILOVER_END_EVT: 386 default: 387 goto illegal_evt; 388 } 389 break; 390 default: 391 pr_err("Unknown FSM state %x in %s\n", l->state, l->name); 392 } 393 return rc; 394 illegal_evt: 395 pr_err("Illegal FSM event %x in state %x on link %s\n", 396 evt, l->state, l->name); 397 return rc; 398 } 399 400 /* link_profile_stats - update statistical profiling of traffic 401 */ 402 static void link_profile_stats(struct tipc_link *l) 403 { 404 struct sk_buff *skb; 405 struct tipc_msg *msg; 406 int length; 407 408 /* Update counters used in statistical profiling of send traffic */ 409 l->stats.accu_queue_sz += skb_queue_len(&l->transmq); 410 l->stats.queue_sz_counts++; 411 412 skb = skb_peek(&l->transmq); 413 if (!skb) 414 return; 415 msg = buf_msg(skb); 416 length = msg_size(msg); 417 418 if (msg_user(msg) == MSG_FRAGMENTER) { 419 if (msg_type(msg) != FIRST_FRAGMENT) 420 return; 421 length = msg_size(msg_get_wrapped(msg)); 422 } 423 l->stats.msg_lengths_total += length; 424 l->stats.msg_length_counts++; 425 if (length <= 64) 426 l->stats.msg_length_profile[0]++; 427 else if (length <= 256) 428 l->stats.msg_length_profile[1]++; 429 else if (length <= 1024) 430 l->stats.msg_length_profile[2]++; 431 else if (length <= 4096) 432 l->stats.msg_length_profile[3]++; 433 else if (length <= 16384) 434 l->stats.msg_length_profile[4]++; 435 else if (length <= 32768) 436 l->stats.msg_length_profile[5]++; 437 else 438 l->stats.msg_length_profile[6]++; 439 } 440 441 /* tipc_link_timeout - perform periodic task as instructed from node timeout 442 */ 443 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) 444 { 445 int rc = 0; 446 int mtyp = STATE_MSG; 447 bool xmit = false; 448 bool prb = false; 449 450 link_profile_stats(l); 451 452 switch (l->state) { 453 case LINK_ESTABLISHED: 454 case LINK_SYNCHING: 455 if (!l->silent_intv_cnt) { 456 if (tipc_bclink_acks_missing(l->owner)) 457 xmit = true; 458 } else if (l->silent_intv_cnt <= l->abort_limit) { 459 xmit = true; 460 prb = true; 461 } else { 462 rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 463 } 464 l->silent_intv_cnt++; 465 break; 466 case LINK_RESET: 467 xmit = true; 468 mtyp = RESET_MSG; 469 break; 470 case LINK_ESTABLISHING: 471 xmit = true; 472 mtyp = ACTIVATE_MSG; 473 break; 474 case LINK_PEER_RESET: 475 case LINK_RESETTING: 476 case LINK_FAILINGOVER: 477 break; 478 default: 479 break; 480 } 481 482 if (xmit) 483 tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq); 484 485 return rc; 486 } 487 488 /** 489 * link_schedule_user - schedule a message sender for wakeup after congestion 490 * @link: congested link 491 * @list: message that was attempted sent 492 * Create pseudo msg to send back to user when congestion abates 493 * Does not consume buffer list 494 */ 495 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list) 496 { 497 struct tipc_msg *msg = buf_msg(skb_peek(list)); 498 int imp = msg_importance(msg); 499 u32 oport = msg_origport(msg); 500 u32 addr = link_own_addr(link); 501 struct sk_buff *skb; 502 503 /* This really cannot happen... */ 504 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { 505 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); 506 return -ENOBUFS; 507 } 508 /* Non-blocking sender: */ 509 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending) 510 return -ELINKCONG; 511 512 /* Create and schedule wakeup pseudo message */ 513 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, 514 addr, addr, oport, 0, 0); 515 if (!skb) 516 return -ENOBUFS; 517 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list); 518 TIPC_SKB_CB(skb)->chain_imp = imp; 519 skb_queue_tail(&link->wakeupq, skb); 520 link->stats.link_congs++; 521 return -ELINKCONG; 522 } 523 524 /** 525 * link_prepare_wakeup - prepare users for wakeup after congestion 526 * @link: congested link 527 * Move a number of waiting users, as permitted by available space in 528 * the send queue, from link wait queue to node wait queue for wakeup 529 */ 530 void link_prepare_wakeup(struct tipc_link *l) 531 { 532 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,}; 533 int imp, lim; 534 struct sk_buff *skb, *tmp; 535 536 skb_queue_walk_safe(&l->wakeupq, skb, tmp) { 537 imp = TIPC_SKB_CB(skb)->chain_imp; 538 lim = l->window + l->backlog[imp].limit; 539 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; 540 if ((pnd[imp] + l->backlog[imp].len) >= lim) 541 break; 542 skb_unlink(skb, &l->wakeupq); 543 skb_queue_tail(l->inputq, skb); 544 } 545 } 546 547 /** 548 * tipc_link_reset_fragments - purge link's inbound message fragments queue 549 * @l_ptr: pointer to link 550 */ 551 void tipc_link_reset_fragments(struct tipc_link *l_ptr) 552 { 553 kfree_skb(l_ptr->reasm_buf); 554 l_ptr->reasm_buf = NULL; 555 } 556 557 void tipc_link_purge_backlog(struct tipc_link *l) 558 { 559 __skb_queue_purge(&l->backlogq); 560 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; 561 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; 562 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; 563 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; 564 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; 565 } 566 567 /** 568 * tipc_link_purge_queues - purge all pkt queues associated with link 569 * @l_ptr: pointer to link 570 */ 571 void tipc_link_purge_queues(struct tipc_link *l_ptr) 572 { 573 __skb_queue_purge(&l_ptr->deferdq); 574 __skb_queue_purge(&l_ptr->transmq); 575 tipc_link_purge_backlog(l_ptr); 576 tipc_link_reset_fragments(l_ptr); 577 } 578 579 void tipc_link_reset(struct tipc_link *l) 580 { 581 tipc_link_fsm_evt(l, LINK_RESET_EVT); 582 583 /* Link is down, accept any session */ 584 l->peer_session = WILDCARD_SESSION; 585 586 /* If peer is up, it only accepts an incremented session number */ 587 msg_set_session(l->pmsg, msg_session(l->pmsg) + 1); 588 589 /* Prepare for renewed mtu size negotiation */ 590 l->mtu = l->advertised_mtu; 591 592 /* Clean up all queues: */ 593 __skb_queue_purge(&l->transmq); 594 __skb_queue_purge(&l->deferdq); 595 skb_queue_splice_init(&l->wakeupq, l->inputq); 596 597 tipc_link_purge_backlog(l); 598 kfree_skb(l->reasm_buf); 599 kfree_skb(l->failover_reasm_skb); 600 l->reasm_buf = NULL; 601 l->failover_reasm_skb = NULL; 602 l->rcv_unacked = 0; 603 l->snd_nxt = 1; 604 l->rcv_nxt = 1; 605 l->silent_intv_cnt = 0; 606 l->stats.recv_info = 0; 607 l->stale_count = 0; 608 link_reset_statistics(l); 609 } 610 611 /** 612 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked 613 * @link: link to use 614 * @list: chain of buffers containing message 615 * 616 * Consumes the buffer chain, except when returning an error code, 617 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS 618 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted 619 */ 620 int __tipc_link_xmit(struct net *net, struct tipc_link *link, 621 struct sk_buff_head *list) 622 { 623 struct tipc_msg *msg = buf_msg(skb_peek(list)); 624 unsigned int maxwin = link->window; 625 unsigned int i, imp = msg_importance(msg); 626 uint mtu = link->mtu; 627 u16 ack = mod(link->rcv_nxt - 1); 628 u16 seqno = link->snd_nxt; 629 u16 bc_last_in = link->owner->bclink.last_in; 630 struct tipc_media_addr *addr = link->media_addr; 631 struct sk_buff_head *transmq = &link->transmq; 632 struct sk_buff_head *backlogq = &link->backlogq; 633 struct sk_buff *skb, *bskb; 634 635 /* Match msg importance against this and all higher backlog limits: */ 636 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { 637 if (unlikely(link->backlog[i].len >= link->backlog[i].limit)) 638 return link_schedule_user(link, list); 639 } 640 if (unlikely(msg_size(msg) > mtu)) 641 return -EMSGSIZE; 642 643 /* Prepare each packet for sending, and add to relevant queue: */ 644 while (skb_queue_len(list)) { 645 skb = skb_peek(list); 646 msg = buf_msg(skb); 647 msg_set_seqno(msg, seqno); 648 msg_set_ack(msg, ack); 649 msg_set_bcast_ack(msg, bc_last_in); 650 651 if (likely(skb_queue_len(transmq) < maxwin)) { 652 __skb_dequeue(list); 653 __skb_queue_tail(transmq, skb); 654 tipc_bearer_send(net, link->bearer_id, skb, addr); 655 link->rcv_unacked = 0; 656 seqno++; 657 continue; 658 } 659 if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) { 660 kfree_skb(__skb_dequeue(list)); 661 link->stats.sent_bundled++; 662 continue; 663 } 664 if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) { 665 kfree_skb(__skb_dequeue(list)); 666 __skb_queue_tail(backlogq, bskb); 667 link->backlog[msg_importance(buf_msg(bskb))].len++; 668 link->stats.sent_bundled++; 669 link->stats.sent_bundles++; 670 continue; 671 } 672 link->backlog[imp].len += skb_queue_len(list); 673 skb_queue_splice_tail_init(list, backlogq); 674 } 675 link->snd_nxt = seqno; 676 return 0; 677 } 678 679 /** 680 * tipc_link_xmit(): enqueue buffer list according to queue situation 681 * @link: link to use 682 * @list: chain of buffers containing message 683 * @xmitq: returned list of packets to be sent by caller 684 * 685 * Consumes the buffer chain, except when returning -ELINKCONG, 686 * since the caller then may want to make more send attempts. 687 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS 688 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted 689 */ 690 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, 691 struct sk_buff_head *xmitq) 692 { 693 struct tipc_msg *hdr = buf_msg(skb_peek(list)); 694 unsigned int maxwin = l->window; 695 unsigned int i, imp = msg_importance(hdr); 696 unsigned int mtu = l->mtu; 697 u16 ack = l->rcv_nxt - 1; 698 u16 seqno = l->snd_nxt; 699 u16 bc_last_in = l->owner->bclink.last_in; 700 struct sk_buff_head *transmq = &l->transmq; 701 struct sk_buff_head *backlogq = &l->backlogq; 702 struct sk_buff *skb, *_skb, *bskb; 703 704 /* Match msg importance against this and all higher backlog limits: */ 705 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { 706 if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) 707 return link_schedule_user(l, list); 708 } 709 if (unlikely(msg_size(hdr) > mtu)) 710 return -EMSGSIZE; 711 712 /* Prepare each packet for sending, and add to relevant queue: */ 713 while (skb_queue_len(list)) { 714 skb = skb_peek(list); 715 hdr = buf_msg(skb); 716 msg_set_seqno(hdr, seqno); 717 msg_set_ack(hdr, ack); 718 msg_set_bcast_ack(hdr, bc_last_in); 719 720 if (likely(skb_queue_len(transmq) < maxwin)) { 721 _skb = skb_clone(skb, GFP_ATOMIC); 722 if (!_skb) 723 return -ENOBUFS; 724 __skb_dequeue(list); 725 __skb_queue_tail(transmq, skb); 726 __skb_queue_tail(xmitq, _skb); 727 l->rcv_unacked = 0; 728 seqno++; 729 continue; 730 } 731 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) { 732 kfree_skb(__skb_dequeue(list)); 733 l->stats.sent_bundled++; 734 continue; 735 } 736 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) { 737 kfree_skb(__skb_dequeue(list)); 738 __skb_queue_tail(backlogq, bskb); 739 l->backlog[msg_importance(buf_msg(bskb))].len++; 740 l->stats.sent_bundled++; 741 l->stats.sent_bundles++; 742 continue; 743 } 744 l->backlog[imp].len += skb_queue_len(list); 745 skb_queue_splice_tail_init(list, backlogq); 746 } 747 l->snd_nxt = seqno; 748 return 0; 749 } 750 751 /* 752 * tipc_link_sync_rcv - synchronize broadcast link endpoints. 753 * Receive the sequence number where we should start receiving and 754 * acking broadcast packets from a newly added peer node, and open 755 * up for reception of such packets. 756 * 757 * Called with node locked 758 */ 759 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf) 760 { 761 struct tipc_msg *msg = buf_msg(buf); 762 763 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg); 764 n->bclink.recv_permitted = true; 765 kfree_skb(buf); 766 } 767 768 /* 769 * tipc_link_push_packets - push unsent packets to bearer 770 * 771 * Push out the unsent messages of a link where congestion 772 * has abated. Node is locked. 773 * 774 * Called with node locked 775 */ 776 void tipc_link_push_packets(struct tipc_link *link) 777 { 778 struct sk_buff *skb; 779 struct tipc_msg *msg; 780 u16 seqno = link->snd_nxt; 781 u16 ack = mod(link->rcv_nxt - 1); 782 783 while (skb_queue_len(&link->transmq) < link->window) { 784 skb = __skb_dequeue(&link->backlogq); 785 if (!skb) 786 break; 787 msg = buf_msg(skb); 788 link->backlog[msg_importance(msg)].len--; 789 msg_set_ack(msg, ack); 790 msg_set_seqno(msg, seqno); 791 seqno = mod(seqno + 1); 792 msg_set_bcast_ack(msg, link->owner->bclink.last_in); 793 link->rcv_unacked = 0; 794 __skb_queue_tail(&link->transmq, skb); 795 tipc_bearer_send(link->owner->net, link->bearer_id, 796 skb, link->media_addr); 797 } 798 link->snd_nxt = seqno; 799 } 800 801 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq) 802 { 803 struct sk_buff *skb, *_skb; 804 struct tipc_msg *hdr; 805 u16 seqno = l->snd_nxt; 806 u16 ack = l->rcv_nxt - 1; 807 808 while (skb_queue_len(&l->transmq) < l->window) { 809 skb = skb_peek(&l->backlogq); 810 if (!skb) 811 break; 812 _skb = skb_clone(skb, GFP_ATOMIC); 813 if (!_skb) 814 break; 815 __skb_dequeue(&l->backlogq); 816 hdr = buf_msg(skb); 817 l->backlog[msg_importance(hdr)].len--; 818 __skb_queue_tail(&l->transmq, skb); 819 __skb_queue_tail(xmitq, _skb); 820 msg_set_ack(hdr, ack); 821 msg_set_seqno(hdr, seqno); 822 msg_set_bcast_ack(hdr, l->owner->bclink.last_in); 823 l->rcv_unacked = 0; 824 seqno++; 825 } 826 l->snd_nxt = seqno; 827 } 828 829 static void link_retransmit_failure(struct tipc_link *l_ptr, 830 struct sk_buff *buf) 831 { 832 struct tipc_msg *msg = buf_msg(buf); 833 struct net *net = l_ptr->owner->net; 834 835 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); 836 837 if (l_ptr->addr) { 838 /* Handle failure on standard link */ 839 link_print(l_ptr, "Resetting link "); 840 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", 841 msg_user(msg), msg_type(msg), msg_size(msg), 842 msg_errcode(msg)); 843 pr_info("sqno %u, prev: %x, src: %x\n", 844 msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg)); 845 } else { 846 /* Handle failure on broadcast link */ 847 struct tipc_node *n_ptr; 848 char addr_string[16]; 849 850 pr_info("Msg seq number: %u, ", msg_seqno(msg)); 851 pr_cont("Outstanding acks: %lu\n", 852 (unsigned long) TIPC_SKB_CB(buf)->handle); 853 854 n_ptr = tipc_bclink_retransmit_to(net); 855 856 tipc_addr_string_fill(addr_string, n_ptr->addr); 857 pr_info("Broadcast link info for %s\n", addr_string); 858 pr_info("Reception permitted: %d, Acked: %u\n", 859 n_ptr->bclink.recv_permitted, 860 n_ptr->bclink.acked); 861 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", 862 n_ptr->bclink.last_in, 863 n_ptr->bclink.oos_state, 864 n_ptr->bclink.last_sent); 865 866 n_ptr->action_flags |= TIPC_BCAST_RESET; 867 l_ptr->stale_count = 0; 868 } 869 } 870 871 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, 872 u32 retransmits) 873 { 874 struct tipc_msg *msg; 875 876 if (!skb) 877 return; 878 879 msg = buf_msg(skb); 880 881 /* Detect repeated retransmit failures */ 882 if (l_ptr->last_retransm == msg_seqno(msg)) { 883 if (++l_ptr->stale_count > 100) { 884 link_retransmit_failure(l_ptr, skb); 885 return; 886 } 887 } else { 888 l_ptr->last_retransm = msg_seqno(msg); 889 l_ptr->stale_count = 1; 890 } 891 892 skb_queue_walk_from(&l_ptr->transmq, skb) { 893 if (!retransmits) 894 break; 895 msg = buf_msg(skb); 896 msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1)); 897 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 898 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb, 899 l_ptr->media_addr); 900 retransmits--; 901 l_ptr->stats.retransmitted++; 902 } 903 } 904 905 static int tipc_link_retransm(struct tipc_link *l, int retransm, 906 struct sk_buff_head *xmitq) 907 { 908 struct sk_buff *_skb, *skb = skb_peek(&l->transmq); 909 struct tipc_msg *hdr; 910 911 if (!skb) 912 return 0; 913 914 /* Detect repeated retransmit failures on same packet */ 915 if (likely(l->last_retransm != buf_seqno(skb))) { 916 l->last_retransm = buf_seqno(skb); 917 l->stale_count = 1; 918 } else if (++l->stale_count > 100) { 919 link_retransmit_failure(l, skb); 920 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 921 } 922 skb_queue_walk(&l->transmq, skb) { 923 if (!retransm) 924 return 0; 925 hdr = buf_msg(skb); 926 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); 927 if (!_skb) 928 return 0; 929 hdr = buf_msg(_skb); 930 msg_set_ack(hdr, l->rcv_nxt - 1); 931 msg_set_bcast_ack(hdr, l->owner->bclink.last_in); 932 _skb->priority = TC_PRIO_CONTROL; 933 __skb_queue_tail(xmitq, _skb); 934 retransm--; 935 l->stats.retransmitted++; 936 } 937 return 0; 938 } 939 940 /* tipc_data_input - deliver data and name distr msgs to upper layer 941 * 942 * Consumes buffer if message is of right type 943 * Node lock must be held 944 */ 945 static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb, 946 struct sk_buff_head *inputq) 947 { 948 struct tipc_node *node = link->owner; 949 950 switch (msg_user(buf_msg(skb))) { 951 case TIPC_LOW_IMPORTANCE: 952 case TIPC_MEDIUM_IMPORTANCE: 953 case TIPC_HIGH_IMPORTANCE: 954 case TIPC_CRITICAL_IMPORTANCE: 955 case CONN_MANAGER: 956 __skb_queue_tail(inputq, skb); 957 return true; 958 case NAME_DISTRIBUTOR: 959 node->bclink.recv_permitted = true; 960 skb_queue_tail(link->namedq, skb); 961 return true; 962 case MSG_BUNDLER: 963 case TUNNEL_PROTOCOL: 964 case MSG_FRAGMENTER: 965 case BCAST_PROTOCOL: 966 return false; 967 default: 968 pr_warn("Dropping received illegal msg type\n"); 969 kfree_skb(skb); 970 return false; 971 }; 972 } 973 974 /* tipc_link_input - process packet that has passed link protocol check 975 * 976 * Consumes buffer 977 */ 978 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, 979 struct sk_buff_head *inputq) 980 { 981 struct tipc_node *node = l->owner; 982 struct tipc_msg *hdr = buf_msg(skb); 983 struct sk_buff **reasm_skb = &l->reasm_buf; 984 struct sk_buff *iskb; 985 int usr = msg_user(hdr); 986 int rc = 0; 987 int pos = 0; 988 int ipos = 0; 989 990 if (unlikely(usr == TUNNEL_PROTOCOL)) { 991 if (msg_type(hdr) == SYNCH_MSG) { 992 __skb_queue_purge(&l->deferdq); 993 goto drop; 994 } 995 if (!tipc_msg_extract(skb, &iskb, &ipos)) 996 return rc; 997 kfree_skb(skb); 998 skb = iskb; 999 hdr = buf_msg(skb); 1000 if (less(msg_seqno(hdr), l->drop_point)) 1001 goto drop; 1002 if (tipc_data_input(l, skb, inputq)) 1003 return rc; 1004 usr = msg_user(hdr); 1005 reasm_skb = &l->failover_reasm_skb; 1006 } 1007 1008 if (usr == MSG_BUNDLER) { 1009 l->stats.recv_bundles++; 1010 l->stats.recv_bundled += msg_msgcnt(hdr); 1011 while (tipc_msg_extract(skb, &iskb, &pos)) 1012 tipc_data_input(l, iskb, inputq); 1013 return 0; 1014 } else if (usr == MSG_FRAGMENTER) { 1015 l->stats.recv_fragments++; 1016 if (tipc_buf_append(reasm_skb, &skb)) { 1017 l->stats.recv_fragmented++; 1018 tipc_data_input(l, skb, inputq); 1019 } else if (!*reasm_skb) { 1020 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1021 } 1022 return 0; 1023 } else if (usr == BCAST_PROTOCOL) { 1024 tipc_link_sync_rcv(node, skb); 1025 return 0; 1026 } 1027 drop: 1028 kfree_skb(skb); 1029 return 0; 1030 } 1031 1032 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) 1033 { 1034 bool released = false; 1035 struct sk_buff *skb, *tmp; 1036 1037 skb_queue_walk_safe(&l->transmq, skb, tmp) { 1038 if (more(buf_seqno(skb), acked)) 1039 break; 1040 __skb_unlink(skb, &l->transmq); 1041 kfree_skb(skb); 1042 released = true; 1043 } 1044 return released; 1045 } 1046 1047 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node 1048 * @link: the link that should handle the message 1049 * @skb: TIPC packet 1050 * @xmitq: queue to place packets to be sent after this call 1051 */ 1052 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, 1053 struct sk_buff_head *xmitq) 1054 { 1055 struct sk_buff_head *arrvq = &l->deferdq; 1056 struct sk_buff_head tmpq; 1057 struct tipc_msg *hdr; 1058 u16 seqno, rcv_nxt; 1059 int rc = 0; 1060 1061 __skb_queue_head_init(&tmpq); 1062 1063 if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) { 1064 if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV)) 1065 tipc_link_build_proto_msg(l, STATE_MSG, 0, 1066 0, 0, 0, xmitq); 1067 return rc; 1068 } 1069 1070 while ((skb = skb_peek(arrvq))) { 1071 hdr = buf_msg(skb); 1072 1073 /* Verify and update link state */ 1074 if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) { 1075 __skb_dequeue(arrvq); 1076 rc = tipc_link_proto_rcv(l, skb, xmitq); 1077 continue; 1078 } 1079 1080 if (unlikely(!link_is_up(l))) { 1081 rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); 1082 if (!link_is_up(l)) { 1083 kfree_skb(__skb_dequeue(arrvq)); 1084 goto exit; 1085 } 1086 } 1087 1088 l->silent_intv_cnt = 0; 1089 1090 /* Forward queues and wake up waiting users */ 1091 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { 1092 tipc_link_advance_backlog(l, xmitq); 1093 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1094 link_prepare_wakeup(l); 1095 } 1096 1097 /* Defer reception if there is a gap in the sequence */ 1098 seqno = msg_seqno(hdr); 1099 rcv_nxt = l->rcv_nxt; 1100 if (unlikely(less(rcv_nxt, seqno))) { 1101 l->stats.deferred_recv++; 1102 goto exit; 1103 } 1104 1105 __skb_dequeue(arrvq); 1106 1107 /* Drop if packet already received */ 1108 if (unlikely(more(rcv_nxt, seqno))) { 1109 l->stats.duplicates++; 1110 kfree_skb(skb); 1111 goto exit; 1112 } 1113 1114 /* Packet can be delivered */ 1115 l->rcv_nxt++; 1116 l->stats.recv_info++; 1117 if (unlikely(!tipc_data_input(l, skb, &tmpq))) 1118 rc = tipc_link_input(l, skb, &tmpq); 1119 1120 /* Ack at regular intervals */ 1121 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) { 1122 l->rcv_unacked = 0; 1123 l->stats.sent_acks++; 1124 tipc_link_build_proto_msg(l, STATE_MSG, 1125 0, 0, 0, 0, xmitq); 1126 } 1127 } 1128 exit: 1129 tipc_skb_queue_splice_tail(&tmpq, l->inputq); 1130 return rc; 1131 } 1132 1133 /** 1134 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue 1135 * 1136 * Returns increase in queue length (i.e. 0 or 1) 1137 */ 1138 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb) 1139 { 1140 struct sk_buff *skb1; 1141 u16 seq_no = buf_seqno(skb); 1142 1143 /* Empty queue ? */ 1144 if (skb_queue_empty(list)) { 1145 __skb_queue_tail(list, skb); 1146 return 1; 1147 } 1148 1149 /* Last ? */ 1150 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) { 1151 __skb_queue_tail(list, skb); 1152 return 1; 1153 } 1154 1155 /* Locate insertion point in queue, then insert; discard if duplicate */ 1156 skb_queue_walk(list, skb1) { 1157 u16 curr_seqno = buf_seqno(skb1); 1158 1159 if (seq_no == curr_seqno) { 1160 kfree_skb(skb); 1161 return 0; 1162 } 1163 1164 if (less(seq_no, curr_seqno)) 1165 break; 1166 } 1167 1168 __skb_queue_before(list, skb1, skb); 1169 return 1; 1170 } 1171 1172 /* 1173 * Send protocol message to the other endpoint. 1174 */ 1175 void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg, 1176 u32 gap, u32 tolerance, u32 priority) 1177 { 1178 struct sk_buff *skb = NULL; 1179 struct sk_buff_head xmitq; 1180 1181 __skb_queue_head_init(&xmitq); 1182 tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap, 1183 tolerance, priority, &xmitq); 1184 skb = __skb_dequeue(&xmitq); 1185 if (!skb) 1186 return; 1187 tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr); 1188 l->rcv_unacked = 0; 1189 kfree_skb(skb); 1190 } 1191 1192 /* tipc_link_build_proto_msg: prepare link protocol message for transmission 1193 */ 1194 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 1195 u16 rcvgap, int tolerance, int priority, 1196 struct sk_buff_head *xmitq) 1197 { 1198 struct sk_buff *skb = NULL; 1199 struct tipc_msg *hdr = l->pmsg; 1200 u16 snd_nxt = l->snd_nxt; 1201 u16 rcv_nxt = l->rcv_nxt; 1202 u16 rcv_last = rcv_nxt - 1; 1203 int node_up = l->owner->bclink.recv_permitted; 1204 1205 /* Don't send protocol message during reset or link failover */ 1206 if (tipc_link_is_blocked(l)) 1207 return; 1208 1209 msg_set_type(hdr, mtyp); 1210 msg_set_net_plane(hdr, l->net_plane); 1211 msg_set_bcast_ack(hdr, l->owner->bclink.last_in); 1212 msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net)); 1213 msg_set_link_tolerance(hdr, tolerance); 1214 msg_set_linkprio(hdr, priority); 1215 msg_set_redundant_link(hdr, node_up); 1216 msg_set_seq_gap(hdr, 0); 1217 1218 /* Compatibility: created msg must not be in sequence with pkt flow */ 1219 msg_set_seqno(hdr, snd_nxt + U16_MAX / 2); 1220 1221 if (mtyp == STATE_MSG) { 1222 if (!tipc_link_is_up(l)) 1223 return; 1224 msg_set_next_sent(hdr, snd_nxt); 1225 1226 /* Override rcvgap if there are packets in deferred queue */ 1227 if (!skb_queue_empty(&l->deferdq)) 1228 rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt; 1229 if (rcvgap) { 1230 msg_set_seq_gap(hdr, rcvgap); 1231 l->stats.sent_nacks++; 1232 } 1233 msg_set_ack(hdr, rcv_last); 1234 msg_set_probe(hdr, probe); 1235 if (probe) 1236 l->stats.sent_probes++; 1237 l->stats.sent_states++; 1238 } else { 1239 /* RESET_MSG or ACTIVATE_MSG */ 1240 msg_set_max_pkt(hdr, l->advertised_mtu); 1241 msg_set_ack(hdr, l->rcv_nxt - 1); 1242 msg_set_next_sent(hdr, 1); 1243 } 1244 skb = tipc_buf_acquire(msg_size(hdr)); 1245 if (!skb) 1246 return; 1247 skb_copy_to_linear_data(skb, hdr, msg_size(hdr)); 1248 skb->priority = TC_PRIO_CONTROL; 1249 __skb_queue_tail(xmitq, skb); 1250 } 1251 1252 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets 1253 * with contents of the link's tranmsit and backlog queues. 1254 */ 1255 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, 1256 int mtyp, struct sk_buff_head *xmitq) 1257 { 1258 struct sk_buff *skb, *tnlskb; 1259 struct tipc_msg *hdr, tnlhdr; 1260 struct sk_buff_head *queue = &l->transmq; 1261 struct sk_buff_head tmpxq, tnlq; 1262 u16 pktlen, pktcnt, seqno = l->snd_nxt; 1263 1264 if (!tnl) 1265 return; 1266 1267 skb_queue_head_init(&tnlq); 1268 skb_queue_head_init(&tmpxq); 1269 1270 /* At least one packet required for safe algorithm => add dummy */ 1271 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, 1272 BASIC_H_SIZE, 0, l->addr, link_own_addr(l), 1273 0, 0, TIPC_ERR_NO_PORT); 1274 if (!skb) { 1275 pr_warn("%sunable to create tunnel packet\n", link_co_err); 1276 return; 1277 } 1278 skb_queue_tail(&tnlq, skb); 1279 tipc_link_xmit(l, &tnlq, &tmpxq); 1280 __skb_queue_purge(&tmpxq); 1281 1282 /* Initialize reusable tunnel packet header */ 1283 tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL, 1284 mtyp, INT_H_SIZE, l->addr); 1285 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq); 1286 msg_set_msgcnt(&tnlhdr, pktcnt); 1287 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id); 1288 tnl: 1289 /* Wrap each packet into a tunnel packet */ 1290 skb_queue_walk(queue, skb) { 1291 hdr = buf_msg(skb); 1292 if (queue == &l->backlogq) 1293 msg_set_seqno(hdr, seqno++); 1294 pktlen = msg_size(hdr); 1295 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); 1296 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE); 1297 if (!tnlskb) { 1298 pr_warn("%sunable to send packet\n", link_co_err); 1299 return; 1300 } 1301 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE); 1302 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen); 1303 __skb_queue_tail(&tnlq, tnlskb); 1304 } 1305 if (queue != &l->backlogq) { 1306 queue = &l->backlogq; 1307 goto tnl; 1308 } 1309 1310 tipc_link_xmit(tnl, &tnlq, xmitq); 1311 1312 if (mtyp == FAILOVER_MSG) { 1313 tnl->drop_point = l->rcv_nxt; 1314 tnl->failover_reasm_skb = l->reasm_buf; 1315 l->reasm_buf = NULL; 1316 } 1317 } 1318 1319 /* tipc_link_proto_rcv(): receive link level protocol message : 1320 * Note that network plane id propagates through the network, and may 1321 * change at any time. The node with lowest numerical id determines 1322 * network plane 1323 */ 1324 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, 1325 struct sk_buff_head *xmitq) 1326 { 1327 struct tipc_msg *hdr = buf_msg(skb); 1328 u16 rcvgap = 0; 1329 u16 nacked_gap = msg_seq_gap(hdr); 1330 u16 peers_snd_nxt = msg_next_sent(hdr); 1331 u16 peers_tol = msg_link_tolerance(hdr); 1332 u16 peers_prio = msg_linkprio(hdr); 1333 char *if_name; 1334 int rc = 0; 1335 1336 if (tipc_link_is_blocked(l)) 1337 goto exit; 1338 1339 if (link_own_addr(l) > msg_prevnode(hdr)) 1340 l->net_plane = msg_net_plane(hdr); 1341 1342 switch (msg_type(hdr)) { 1343 case RESET_MSG: 1344 1345 /* Ignore duplicate RESET with old session number */ 1346 if ((less_eq(msg_session(hdr), l->peer_session)) && 1347 (l->peer_session != WILDCARD_SESSION)) 1348 break; 1349 /* fall thru' */ 1350 1351 case ACTIVATE_MSG: 1352 1353 /* Complete own link name with peer's interface name */ 1354 if_name = strrchr(l->name, ':') + 1; 1355 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) 1356 break; 1357 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) 1358 break; 1359 strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME); 1360 1361 /* Update own tolerance if peer indicates a non-zero value */ 1362 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1363 l->tolerance = peers_tol; 1364 1365 /* Update own priority if peer's priority is higher */ 1366 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) 1367 l->priority = peers_prio; 1368 1369 if (msg_type(hdr) == RESET_MSG) { 1370 rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1371 } else if (!link_is_up(l)) { 1372 tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); 1373 rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); 1374 } 1375 l->peer_session = msg_session(hdr); 1376 l->peer_bearer_id = msg_bearer_id(hdr); 1377 if (l->mtu > msg_max_pkt(hdr)) 1378 l->mtu = msg_max_pkt(hdr); 1379 break; 1380 1381 case STATE_MSG: 1382 1383 /* Update own tolerance if peer indicates a non-zero value */ 1384 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1385 l->tolerance = peers_tol; 1386 1387 l->silent_intv_cnt = 0; 1388 l->stats.recv_states++; 1389 if (msg_probe(hdr)) 1390 l->stats.recv_probes++; 1391 rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); 1392 if (!link_is_up(l)) 1393 break; 1394 1395 /* Send NACK if peer has sent pkts we haven't received yet */ 1396 if (more(peers_snd_nxt, l->rcv_nxt)) 1397 rcvgap = peers_snd_nxt - l->rcv_nxt; 1398 if (rcvgap || (msg_probe(hdr))) 1399 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap, 1400 0, 0, xmitq); 1401 tipc_link_release_pkts(l, msg_ack(hdr)); 1402 1403 /* If NACK, retransmit will now start at right position */ 1404 if (nacked_gap) { 1405 rc = tipc_link_retransm(l, nacked_gap, xmitq); 1406 l->stats.recv_nacks++; 1407 } 1408 1409 tipc_link_advance_backlog(l, xmitq); 1410 if (unlikely(!skb_queue_empty(&l->wakeupq))) 1411 link_prepare_wakeup(l); 1412 } 1413 exit: 1414 kfree_skb(skb); 1415 return rc; 1416 } 1417 1418 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) 1419 { 1420 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); 1421 1422 l->window = win; 1423 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; 1424 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win; 1425 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3; 1426 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2; 1427 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; 1428 } 1429 1430 /* tipc_link_find_owner - locate owner node of link by link's name 1431 * @net: the applicable net namespace 1432 * @name: pointer to link name string 1433 * @bearer_id: pointer to index in 'node->links' array where the link was found. 1434 * 1435 * Returns pointer to node owning the link, or 0 if no matching link is found. 1436 */ 1437 static struct tipc_node *tipc_link_find_owner(struct net *net, 1438 const char *link_name, 1439 unsigned int *bearer_id) 1440 { 1441 struct tipc_net *tn = net_generic(net, tipc_net_id); 1442 struct tipc_link *l_ptr; 1443 struct tipc_node *n_ptr; 1444 struct tipc_node *found_node = NULL; 1445 int i; 1446 1447 *bearer_id = 0; 1448 rcu_read_lock(); 1449 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { 1450 tipc_node_lock(n_ptr); 1451 for (i = 0; i < MAX_BEARERS; i++) { 1452 l_ptr = n_ptr->links[i].link; 1453 if (l_ptr && !strcmp(l_ptr->name, link_name)) { 1454 *bearer_id = i; 1455 found_node = n_ptr; 1456 break; 1457 } 1458 } 1459 tipc_node_unlock(n_ptr); 1460 if (found_node) 1461 break; 1462 } 1463 rcu_read_unlock(); 1464 1465 return found_node; 1466 } 1467 1468 /** 1469 * link_reset_statistics - reset link statistics 1470 * @l_ptr: pointer to link 1471 */ 1472 static void link_reset_statistics(struct tipc_link *l_ptr) 1473 { 1474 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 1475 l_ptr->stats.sent_info = l_ptr->snd_nxt; 1476 l_ptr->stats.recv_info = l_ptr->rcv_nxt; 1477 } 1478 1479 static void link_print(struct tipc_link *l, const char *str) 1480 { 1481 struct sk_buff *hskb = skb_peek(&l->transmq); 1482 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt; 1483 u16 tail = l->snd_nxt - 1; 1484 1485 pr_info("%s Link <%s> state %x\n", str, l->name, l->state); 1486 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n", 1487 skb_queue_len(&l->transmq), head, tail, 1488 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt); 1489 } 1490 1491 /* Parse and validate nested (link) properties valid for media, bearer and link 1492 */ 1493 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]) 1494 { 1495 int err; 1496 1497 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop, 1498 tipc_nl_prop_policy); 1499 if (err) 1500 return err; 1501 1502 if (props[TIPC_NLA_PROP_PRIO]) { 1503 u32 prio; 1504 1505 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 1506 if (prio > TIPC_MAX_LINK_PRI) 1507 return -EINVAL; 1508 } 1509 1510 if (props[TIPC_NLA_PROP_TOL]) { 1511 u32 tol; 1512 1513 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1514 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL)) 1515 return -EINVAL; 1516 } 1517 1518 if (props[TIPC_NLA_PROP_WIN]) { 1519 u32 win; 1520 1521 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1522 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN)) 1523 return -EINVAL; 1524 } 1525 1526 return 0; 1527 } 1528 1529 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) 1530 { 1531 int err; 1532 int res = 0; 1533 int bearer_id; 1534 char *name; 1535 struct tipc_link *link; 1536 struct tipc_node *node; 1537 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 1538 struct net *net = sock_net(skb->sk); 1539 1540 if (!info->attrs[TIPC_NLA_LINK]) 1541 return -EINVAL; 1542 1543 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 1544 info->attrs[TIPC_NLA_LINK], 1545 tipc_nl_link_policy); 1546 if (err) 1547 return err; 1548 1549 if (!attrs[TIPC_NLA_LINK_NAME]) 1550 return -EINVAL; 1551 1552 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 1553 1554 if (strcmp(name, tipc_bclink_name) == 0) 1555 return tipc_nl_bc_link_set(net, attrs); 1556 1557 node = tipc_link_find_owner(net, name, &bearer_id); 1558 if (!node) 1559 return -EINVAL; 1560 1561 tipc_node_lock(node); 1562 1563 link = node->links[bearer_id].link; 1564 if (!link) { 1565 res = -EINVAL; 1566 goto out; 1567 } 1568 1569 if (attrs[TIPC_NLA_LINK_PROP]) { 1570 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 1571 1572 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], 1573 props); 1574 if (err) { 1575 res = err; 1576 goto out; 1577 } 1578 1579 if (props[TIPC_NLA_PROP_TOL]) { 1580 u32 tol; 1581 1582 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1583 link->tolerance = tol; 1584 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0); 1585 } 1586 if (props[TIPC_NLA_PROP_PRIO]) { 1587 u32 prio; 1588 1589 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 1590 link->priority = prio; 1591 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio); 1592 } 1593 if (props[TIPC_NLA_PROP_WIN]) { 1594 u32 win; 1595 1596 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1597 tipc_link_set_queue_limits(link, win); 1598 } 1599 } 1600 1601 out: 1602 tipc_node_unlock(node); 1603 1604 return res; 1605 } 1606 1607 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) 1608 { 1609 int i; 1610 struct nlattr *stats; 1611 1612 struct nla_map { 1613 u32 key; 1614 u32 val; 1615 }; 1616 1617 struct nla_map map[] = { 1618 {TIPC_NLA_STATS_RX_INFO, s->recv_info}, 1619 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, 1620 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, 1621 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, 1622 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, 1623 {TIPC_NLA_STATS_TX_INFO, s->sent_info}, 1624 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, 1625 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, 1626 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, 1627 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled}, 1628 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ? 1629 s->msg_length_counts : 1}, 1630 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts}, 1631 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total}, 1632 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]}, 1633 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]}, 1634 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]}, 1635 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]}, 1636 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]}, 1637 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]}, 1638 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]}, 1639 {TIPC_NLA_STATS_RX_STATES, s->recv_states}, 1640 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes}, 1641 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks}, 1642 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv}, 1643 {TIPC_NLA_STATS_TX_STATES, s->sent_states}, 1644 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes}, 1645 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks}, 1646 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks}, 1647 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted}, 1648 {TIPC_NLA_STATS_DUPLICATES, s->duplicates}, 1649 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs}, 1650 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz}, 1651 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ? 1652 (s->accu_queue_sz / s->queue_sz_counts) : 0} 1653 }; 1654 1655 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS); 1656 if (!stats) 1657 return -EMSGSIZE; 1658 1659 for (i = 0; i < ARRAY_SIZE(map); i++) 1660 if (nla_put_u32(skb, map[i].key, map[i].val)) 1661 goto msg_full; 1662 1663 nla_nest_end(skb, stats); 1664 1665 return 0; 1666 msg_full: 1667 nla_nest_cancel(skb, stats); 1668 1669 return -EMSGSIZE; 1670 } 1671 1672 /* Caller should hold appropriate locks to protect the link */ 1673 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, 1674 struct tipc_link *link, int nlflags) 1675 { 1676 int err; 1677 void *hdr; 1678 struct nlattr *attrs; 1679 struct nlattr *prop; 1680 struct tipc_net *tn = net_generic(net, tipc_net_id); 1681 1682 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1683 nlflags, TIPC_NL_LINK_GET); 1684 if (!hdr) 1685 return -EMSGSIZE; 1686 1687 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 1688 if (!attrs) 1689 goto msg_full; 1690 1691 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) 1692 goto attr_msg_full; 1693 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, 1694 tipc_cluster_mask(tn->own_addr))) 1695 goto attr_msg_full; 1696 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) 1697 goto attr_msg_full; 1698 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt)) 1699 goto attr_msg_full; 1700 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt)) 1701 goto attr_msg_full; 1702 1703 if (tipc_link_is_up(link)) 1704 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) 1705 goto attr_msg_full; 1706 if (tipc_link_is_active(link)) 1707 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) 1708 goto attr_msg_full; 1709 1710 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 1711 if (!prop) 1712 goto attr_msg_full; 1713 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 1714 goto prop_msg_full; 1715 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) 1716 goto prop_msg_full; 1717 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, 1718 link->window)) 1719 goto prop_msg_full; 1720 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 1721 goto prop_msg_full; 1722 nla_nest_end(msg->skb, prop); 1723 1724 err = __tipc_nl_add_stats(msg->skb, &link->stats); 1725 if (err) 1726 goto attr_msg_full; 1727 1728 nla_nest_end(msg->skb, attrs); 1729 genlmsg_end(msg->skb, hdr); 1730 1731 return 0; 1732 1733 prop_msg_full: 1734 nla_nest_cancel(msg->skb, prop); 1735 attr_msg_full: 1736 nla_nest_cancel(msg->skb, attrs); 1737 msg_full: 1738 genlmsg_cancel(msg->skb, hdr); 1739 1740 return -EMSGSIZE; 1741 } 1742 1743 /* Caller should hold node lock */ 1744 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 1745 struct tipc_node *node, u32 *prev_link) 1746 { 1747 u32 i; 1748 int err; 1749 1750 for (i = *prev_link; i < MAX_BEARERS; i++) { 1751 *prev_link = i; 1752 1753 if (!node->links[i].link) 1754 continue; 1755 1756 err = __tipc_nl_add_link(net, msg, 1757 node->links[i].link, NLM_F_MULTI); 1758 if (err) 1759 return err; 1760 } 1761 *prev_link = 0; 1762 1763 return 0; 1764 } 1765 1766 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) 1767 { 1768 struct net *net = sock_net(skb->sk); 1769 struct tipc_net *tn = net_generic(net, tipc_net_id); 1770 struct tipc_node *node; 1771 struct tipc_nl_msg msg; 1772 u32 prev_node = cb->args[0]; 1773 u32 prev_link = cb->args[1]; 1774 int done = cb->args[2]; 1775 int err; 1776 1777 if (done) 1778 return 0; 1779 1780 msg.skb = skb; 1781 msg.portid = NETLINK_CB(cb->skb).portid; 1782 msg.seq = cb->nlh->nlmsg_seq; 1783 1784 rcu_read_lock(); 1785 if (prev_node) { 1786 node = tipc_node_find(net, prev_node); 1787 if (!node) { 1788 /* We never set seq or call nl_dump_check_consistent() 1789 * this means that setting prev_seq here will cause the 1790 * consistence check to fail in the netlink callback 1791 * handler. Resulting in the last NLMSG_DONE message 1792 * having the NLM_F_DUMP_INTR flag set. 1793 */ 1794 cb->prev_seq = 1; 1795 goto out; 1796 } 1797 tipc_node_put(node); 1798 1799 list_for_each_entry_continue_rcu(node, &tn->node_list, 1800 list) { 1801 tipc_node_lock(node); 1802 err = __tipc_nl_add_node_links(net, &msg, node, 1803 &prev_link); 1804 tipc_node_unlock(node); 1805 if (err) 1806 goto out; 1807 1808 prev_node = node->addr; 1809 } 1810 } else { 1811 err = tipc_nl_add_bc_link(net, &msg); 1812 if (err) 1813 goto out; 1814 1815 list_for_each_entry_rcu(node, &tn->node_list, list) { 1816 tipc_node_lock(node); 1817 err = __tipc_nl_add_node_links(net, &msg, node, 1818 &prev_link); 1819 tipc_node_unlock(node); 1820 if (err) 1821 goto out; 1822 1823 prev_node = node->addr; 1824 } 1825 } 1826 done = 1; 1827 out: 1828 rcu_read_unlock(); 1829 1830 cb->args[0] = prev_node; 1831 cb->args[1] = prev_link; 1832 cb->args[2] = done; 1833 1834 return skb->len; 1835 } 1836 1837 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) 1838 { 1839 struct net *net = genl_info_net(info); 1840 struct tipc_nl_msg msg; 1841 char *name; 1842 int err; 1843 1844 msg.portid = info->snd_portid; 1845 msg.seq = info->snd_seq; 1846 1847 if (!info->attrs[TIPC_NLA_LINK_NAME]) 1848 return -EINVAL; 1849 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); 1850 1851 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1852 if (!msg.skb) 1853 return -ENOMEM; 1854 1855 if (strcmp(name, tipc_bclink_name) == 0) { 1856 err = tipc_nl_add_bc_link(net, &msg); 1857 if (err) { 1858 nlmsg_free(msg.skb); 1859 return err; 1860 } 1861 } else { 1862 int bearer_id; 1863 struct tipc_node *node; 1864 struct tipc_link *link; 1865 1866 node = tipc_link_find_owner(net, name, &bearer_id); 1867 if (!node) 1868 return -EINVAL; 1869 1870 tipc_node_lock(node); 1871 link = node->links[bearer_id].link; 1872 if (!link) { 1873 tipc_node_unlock(node); 1874 nlmsg_free(msg.skb); 1875 return -EINVAL; 1876 } 1877 1878 err = __tipc_nl_add_link(net, &msg, link, 0); 1879 tipc_node_unlock(node); 1880 if (err) { 1881 nlmsg_free(msg.skb); 1882 return err; 1883 } 1884 } 1885 1886 return genlmsg_reply(msg.skb, info); 1887 } 1888 1889 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info) 1890 { 1891 int err; 1892 char *link_name; 1893 unsigned int bearer_id; 1894 struct tipc_link *link; 1895 struct tipc_node *node; 1896 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 1897 struct net *net = sock_net(skb->sk); 1898 1899 if (!info->attrs[TIPC_NLA_LINK]) 1900 return -EINVAL; 1901 1902 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, 1903 info->attrs[TIPC_NLA_LINK], 1904 tipc_nl_link_policy); 1905 if (err) 1906 return err; 1907 1908 if (!attrs[TIPC_NLA_LINK_NAME]) 1909 return -EINVAL; 1910 1911 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 1912 1913 if (strcmp(link_name, tipc_bclink_name) == 0) { 1914 err = tipc_bclink_reset_stats(net); 1915 if (err) 1916 return err; 1917 return 0; 1918 } 1919 1920 node = tipc_link_find_owner(net, link_name, &bearer_id); 1921 if (!node) 1922 return -EINVAL; 1923 1924 tipc_node_lock(node); 1925 1926 link = node->links[bearer_id].link; 1927 if (!link) { 1928 tipc_node_unlock(node); 1929 return -EINVAL; 1930 } 1931 1932 link_reset_statistics(link); 1933 1934 tipc_node_unlock(node); 1935 1936 return 0; 1937 } 1938