1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "port.h" 40 #include "name_distr.h" 41 #include "discover.h" 42 #include "config.h" 43 44 #include <linux/pkt_sched.h> 45 46 /* 47 * Error message prefixes 48 */ 49 static const char *link_co_err = "Link changeover error, "; 50 static const char *link_rst_msg = "Resetting link "; 51 static const char *link_unk_evt = "Unknown link event "; 52 53 /* 54 * Out-of-range value for link session numbers 55 */ 56 #define INVALID_SESSION 0x10000 57 58 /* 59 * Link state events: 60 */ 61 #define STARTING_EVT 856384768 /* link processing trigger */ 62 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ 63 #define TIMEOUT_EVT 560817u /* link timer expired */ 64 65 /* 66 * The following two 'message types' is really just implementation 67 * data conveniently stored in the message header. 68 * They must not be considered part of the protocol 69 */ 70 #define OPEN_MSG 0 71 #define CLOSED_MSG 1 72 73 /* 74 * State value stored in 'exp_msg_count' 75 */ 76 #define START_CHANGEOVER 100000u 77 78 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 79 struct sk_buff *buf); 80 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf); 81 static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr, 82 struct sk_buff **buf); 83 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance); 84 static int link_send_sections_long(struct tipc_port *sender, 85 struct iovec const *msg_sect, 86 unsigned int len, u32 destnode); 87 static void link_state_event(struct tipc_link *l_ptr, u32 event); 88 static void link_reset_statistics(struct tipc_link *l_ptr); 89 static void link_print(struct tipc_link *l_ptr, const char *str); 90 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf); 91 static void tipc_link_send_sync(struct tipc_link *l); 92 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf); 93 94 /* 95 * Simple link routines 96 */ 97 static unsigned int align(unsigned int i) 98 { 99 return (i + 3) & ~3u; 100 } 101 102 static void link_init_max_pkt(struct tipc_link *l_ptr) 103 { 104 u32 max_pkt; 105 106 max_pkt = (l_ptr->b_ptr->mtu & ~3); 107 if (max_pkt > MAX_MSG_SIZE) 108 max_pkt = MAX_MSG_SIZE; 109 110 l_ptr->max_pkt_target = max_pkt; 111 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) 112 l_ptr->max_pkt = l_ptr->max_pkt_target; 113 else 114 l_ptr->max_pkt = MAX_PKT_DEFAULT; 115 116 l_ptr->max_pkt_probes = 0; 117 } 118 119 static u32 link_next_sent(struct tipc_link *l_ptr) 120 { 121 if (l_ptr->next_out) 122 return buf_seqno(l_ptr->next_out); 123 return mod(l_ptr->next_out_no); 124 } 125 126 static u32 link_last_sent(struct tipc_link *l_ptr) 127 { 128 return mod(link_next_sent(l_ptr) - 1); 129 } 130 131 /* 132 * Simple non-static link routines (i.e. referenced outside this file) 133 */ 134 int tipc_link_is_up(struct tipc_link *l_ptr) 135 { 136 if (!l_ptr) 137 return 0; 138 return link_working_working(l_ptr) || link_working_unknown(l_ptr); 139 } 140 141 int tipc_link_is_active(struct tipc_link *l_ptr) 142 { 143 return (l_ptr->owner->active_links[0] == l_ptr) || 144 (l_ptr->owner->active_links[1] == l_ptr); 145 } 146 147 /** 148 * link_timeout - handle expiration of link timer 149 * @l_ptr: pointer to link 150 * 151 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict 152 * with tipc_link_delete(). (There is no risk that the node will be deleted by 153 * another thread because tipc_link_delete() always cancels the link timer before 154 * tipc_node_delete() is called.) 155 */ 156 static void link_timeout(struct tipc_link *l_ptr) 157 { 158 tipc_node_lock(l_ptr->owner); 159 160 /* update counters used in statistical profiling of send traffic */ 161 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; 162 l_ptr->stats.queue_sz_counts++; 163 164 if (l_ptr->first_out) { 165 struct tipc_msg *msg = buf_msg(l_ptr->first_out); 166 u32 length = msg_size(msg); 167 168 if ((msg_user(msg) == MSG_FRAGMENTER) && 169 (msg_type(msg) == FIRST_FRAGMENT)) { 170 length = msg_size(msg_get_wrapped(msg)); 171 } 172 if (length) { 173 l_ptr->stats.msg_lengths_total += length; 174 l_ptr->stats.msg_length_counts++; 175 if (length <= 64) 176 l_ptr->stats.msg_length_profile[0]++; 177 else if (length <= 256) 178 l_ptr->stats.msg_length_profile[1]++; 179 else if (length <= 1024) 180 l_ptr->stats.msg_length_profile[2]++; 181 else if (length <= 4096) 182 l_ptr->stats.msg_length_profile[3]++; 183 else if (length <= 16384) 184 l_ptr->stats.msg_length_profile[4]++; 185 else if (length <= 32768) 186 l_ptr->stats.msg_length_profile[5]++; 187 else 188 l_ptr->stats.msg_length_profile[6]++; 189 } 190 } 191 192 /* do all other link processing performed on a periodic basis */ 193 194 link_state_event(l_ptr, TIMEOUT_EVT); 195 196 if (l_ptr->next_out) 197 tipc_link_push_queue(l_ptr); 198 199 tipc_node_unlock(l_ptr->owner); 200 } 201 202 static void link_set_timer(struct tipc_link *l_ptr, u32 time) 203 { 204 k_start_timer(&l_ptr->timer, time); 205 } 206 207 /** 208 * tipc_link_create - create a new link 209 * @n_ptr: pointer to associated node 210 * @b_ptr: pointer to associated bearer 211 * @media_addr: media address to use when sending messages over link 212 * 213 * Returns pointer to link. 214 */ 215 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, 216 struct tipc_bearer *b_ptr, 217 const struct tipc_media_addr *media_addr) 218 { 219 struct tipc_link *l_ptr; 220 struct tipc_msg *msg; 221 char *if_name; 222 char addr_string[16]; 223 u32 peer = n_ptr->addr; 224 225 if (n_ptr->link_cnt >= 2) { 226 tipc_addr_string_fill(addr_string, n_ptr->addr); 227 pr_err("Attempt to establish third link to %s\n", addr_string); 228 return NULL; 229 } 230 231 if (n_ptr->links[b_ptr->identity]) { 232 tipc_addr_string_fill(addr_string, n_ptr->addr); 233 pr_err("Attempt to establish second link on <%s> to %s\n", 234 b_ptr->name, addr_string); 235 return NULL; 236 } 237 238 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC); 239 if (!l_ptr) { 240 pr_warn("Link creation failed, no memory\n"); 241 return NULL; 242 } 243 244 l_ptr->addr = peer; 245 if_name = strchr(b_ptr->name, ':') + 1; 246 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 247 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 248 tipc_node(tipc_own_addr), 249 if_name, 250 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 251 /* note: peer i/f name is updated by reset/activate message */ 252 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr)); 253 l_ptr->owner = n_ptr; 254 l_ptr->checkpoint = 1; 255 l_ptr->peer_session = INVALID_SESSION; 256 l_ptr->b_ptr = b_ptr; 257 link_set_supervision_props(l_ptr, b_ptr->tolerance); 258 l_ptr->state = RESET_UNKNOWN; 259 260 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; 261 msg = l_ptr->pmsg; 262 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); 263 msg_set_size(msg, sizeof(l_ptr->proto_msg)); 264 msg_set_session(msg, (tipc_random & 0xffff)); 265 msg_set_bearer_id(msg, b_ptr->identity); 266 strcpy((char *)msg_data(msg), if_name); 267 268 l_ptr->priority = b_ptr->priority; 269 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 270 271 link_init_max_pkt(l_ptr); 272 273 l_ptr->next_out_no = 1; 274 INIT_LIST_HEAD(&l_ptr->waiting_ports); 275 276 link_reset_statistics(l_ptr); 277 278 tipc_node_attach_link(n_ptr, l_ptr); 279 280 k_init_timer(&l_ptr->timer, (Handler)link_timeout, 281 (unsigned long)l_ptr); 282 list_add_tail(&l_ptr->link_list, &b_ptr->links); 283 284 link_state_event(l_ptr, STARTING_EVT); 285 286 return l_ptr; 287 } 288 289 /** 290 * tipc_link_delete - delete a link 291 * @l_ptr: pointer to link 292 * 293 * Note: 'tipc_net_lock' is write_locked, bearer is locked. 294 * This routine must not grab the node lock until after link timer cancellation 295 * to avoid a potential deadlock situation. 296 */ 297 void tipc_link_delete(struct tipc_link *l_ptr) 298 { 299 if (!l_ptr) { 300 pr_err("Attempt to delete non-existent link\n"); 301 return; 302 } 303 304 k_cancel_timer(&l_ptr->timer); 305 306 tipc_node_lock(l_ptr->owner); 307 tipc_link_reset(l_ptr); 308 tipc_node_detach_link(l_ptr->owner, l_ptr); 309 tipc_link_purge_queues(l_ptr); 310 list_del_init(&l_ptr->link_list); 311 tipc_node_unlock(l_ptr->owner); 312 k_term_timer(&l_ptr->timer); 313 kfree(l_ptr); 314 } 315 316 317 /** 318 * link_schedule_port - schedule port for deferred sending 319 * @l_ptr: pointer to link 320 * @origport: reference to sending port 321 * @sz: amount of data to be sent 322 * 323 * Schedules port for renewed sending of messages after link congestion 324 * has abated. 325 */ 326 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) 327 { 328 struct tipc_port *p_ptr; 329 330 spin_lock_bh(&tipc_port_list_lock); 331 p_ptr = tipc_port_lock(origport); 332 if (p_ptr) { 333 if (!p_ptr->wakeup) 334 goto exit; 335 if (!list_empty(&p_ptr->wait_list)) 336 goto exit; 337 p_ptr->congested = 1; 338 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); 339 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); 340 l_ptr->stats.link_congs++; 341 exit: 342 tipc_port_unlock(p_ptr); 343 } 344 spin_unlock_bh(&tipc_port_list_lock); 345 return -ELINKCONG; 346 } 347 348 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all) 349 { 350 struct tipc_port *p_ptr; 351 struct tipc_port *temp_p_ptr; 352 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size; 353 354 if (all) 355 win = 100000; 356 if (win <= 0) 357 return; 358 if (!spin_trylock_bh(&tipc_port_list_lock)) 359 return; 360 if (link_congested(l_ptr)) 361 goto exit; 362 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 363 wait_list) { 364 if (win <= 0) 365 break; 366 list_del_init(&p_ptr->wait_list); 367 spin_lock_bh(p_ptr->lock); 368 p_ptr->congested = 0; 369 p_ptr->wakeup(p_ptr); 370 win -= p_ptr->waiting_pkts; 371 spin_unlock_bh(p_ptr->lock); 372 } 373 374 exit: 375 spin_unlock_bh(&tipc_port_list_lock); 376 } 377 378 /** 379 * link_release_outqueue - purge link's outbound message queue 380 * @l_ptr: pointer to link 381 */ 382 static void link_release_outqueue(struct tipc_link *l_ptr) 383 { 384 kfree_skb_list(l_ptr->first_out); 385 l_ptr->first_out = NULL; 386 l_ptr->out_queue_size = 0; 387 } 388 389 /** 390 * tipc_link_reset_fragments - purge link's inbound message fragments queue 391 * @l_ptr: pointer to link 392 */ 393 void tipc_link_reset_fragments(struct tipc_link *l_ptr) 394 { 395 kfree_skb(l_ptr->reasm_head); 396 l_ptr->reasm_head = NULL; 397 l_ptr->reasm_tail = NULL; 398 } 399 400 /** 401 * tipc_link_purge_queues - purge all pkt queues associated with link 402 * @l_ptr: pointer to link 403 */ 404 void tipc_link_purge_queues(struct tipc_link *l_ptr) 405 { 406 kfree_skb_list(l_ptr->oldest_deferred_in); 407 kfree_skb_list(l_ptr->first_out); 408 tipc_link_reset_fragments(l_ptr); 409 kfree_skb(l_ptr->proto_msg_queue); 410 l_ptr->proto_msg_queue = NULL; 411 } 412 413 void tipc_link_reset(struct tipc_link *l_ptr) 414 { 415 u32 prev_state = l_ptr->state; 416 u32 checkpoint = l_ptr->next_in_no; 417 int was_active_link = tipc_link_is_active(l_ptr); 418 419 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); 420 421 /* Link is down, accept any session */ 422 l_ptr->peer_session = INVALID_SESSION; 423 424 /* Prepare for max packet size negotiation */ 425 link_init_max_pkt(l_ptr); 426 427 l_ptr->state = RESET_UNKNOWN; 428 429 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET)) 430 return; 431 432 tipc_node_link_down(l_ptr->owner, l_ptr); 433 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); 434 435 if (was_active_link && tipc_node_active_links(l_ptr->owner)) { 436 l_ptr->reset_checkpoint = checkpoint; 437 l_ptr->exp_msg_count = START_CHANGEOVER; 438 } 439 440 /* Clean up all queues: */ 441 link_release_outqueue(l_ptr); 442 kfree_skb(l_ptr->proto_msg_queue); 443 l_ptr->proto_msg_queue = NULL; 444 kfree_skb_list(l_ptr->oldest_deferred_in); 445 if (!list_empty(&l_ptr->waiting_ports)) 446 tipc_link_wakeup_ports(l_ptr, 1); 447 448 l_ptr->retransm_queue_head = 0; 449 l_ptr->retransm_queue_size = 0; 450 l_ptr->last_out = NULL; 451 l_ptr->first_out = NULL; 452 l_ptr->next_out = NULL; 453 l_ptr->unacked_window = 0; 454 l_ptr->checkpoint = 1; 455 l_ptr->next_out_no = 1; 456 l_ptr->deferred_inqueue_sz = 0; 457 l_ptr->oldest_deferred_in = NULL; 458 l_ptr->newest_deferred_in = NULL; 459 l_ptr->fsm_msg_cnt = 0; 460 l_ptr->stale_count = 0; 461 link_reset_statistics(l_ptr); 462 } 463 464 465 static void link_activate(struct tipc_link *l_ptr) 466 { 467 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 468 tipc_node_link_up(l_ptr->owner, l_ptr); 469 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 470 } 471 472 /** 473 * link_state_event - link finite state machine 474 * @l_ptr: pointer to link 475 * @event: state machine event to process 476 */ 477 static void link_state_event(struct tipc_link *l_ptr, unsigned int event) 478 { 479 struct tipc_link *other; 480 u32 cont_intv = l_ptr->continuity_interval; 481 482 if (!l_ptr->started && (event != STARTING_EVT)) 483 return; /* Not yet. */ 484 485 /* Check whether changeover is going on */ 486 if (l_ptr->exp_msg_count) { 487 if (event == TIMEOUT_EVT) 488 link_set_timer(l_ptr, cont_intv); 489 return; 490 } 491 492 switch (l_ptr->state) { 493 case WORKING_WORKING: 494 switch (event) { 495 case TRAFFIC_MSG_EVT: 496 case ACTIVATE_MSG: 497 break; 498 case TIMEOUT_EVT: 499 if (l_ptr->next_in_no != l_ptr->checkpoint) { 500 l_ptr->checkpoint = l_ptr->next_in_no; 501 if (tipc_bclink_acks_missing(l_ptr->owner)) { 502 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 503 0, 0, 0, 0, 0); 504 l_ptr->fsm_msg_cnt++; 505 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { 506 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 507 1, 0, 0, 0, 0); 508 l_ptr->fsm_msg_cnt++; 509 } 510 link_set_timer(l_ptr, cont_intv); 511 break; 512 } 513 l_ptr->state = WORKING_UNKNOWN; 514 l_ptr->fsm_msg_cnt = 0; 515 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 516 l_ptr->fsm_msg_cnt++; 517 link_set_timer(l_ptr, cont_intv / 4); 518 break; 519 case RESET_MSG: 520 pr_info("%s<%s>, requested by peer\n", link_rst_msg, 521 l_ptr->name); 522 tipc_link_reset(l_ptr); 523 l_ptr->state = RESET_RESET; 524 l_ptr->fsm_msg_cnt = 0; 525 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 526 l_ptr->fsm_msg_cnt++; 527 link_set_timer(l_ptr, cont_intv); 528 break; 529 default: 530 pr_err("%s%u in WW state\n", link_unk_evt, event); 531 } 532 break; 533 case WORKING_UNKNOWN: 534 switch (event) { 535 case TRAFFIC_MSG_EVT: 536 case ACTIVATE_MSG: 537 l_ptr->state = WORKING_WORKING; 538 l_ptr->fsm_msg_cnt = 0; 539 link_set_timer(l_ptr, cont_intv); 540 break; 541 case RESET_MSG: 542 pr_info("%s<%s>, requested by peer while probing\n", 543 link_rst_msg, l_ptr->name); 544 tipc_link_reset(l_ptr); 545 l_ptr->state = RESET_RESET; 546 l_ptr->fsm_msg_cnt = 0; 547 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 548 l_ptr->fsm_msg_cnt++; 549 link_set_timer(l_ptr, cont_intv); 550 break; 551 case TIMEOUT_EVT: 552 if (l_ptr->next_in_no != l_ptr->checkpoint) { 553 l_ptr->state = WORKING_WORKING; 554 l_ptr->fsm_msg_cnt = 0; 555 l_ptr->checkpoint = l_ptr->next_in_no; 556 if (tipc_bclink_acks_missing(l_ptr->owner)) { 557 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 558 0, 0, 0, 0, 0); 559 l_ptr->fsm_msg_cnt++; 560 } 561 link_set_timer(l_ptr, cont_intv); 562 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { 563 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 564 1, 0, 0, 0, 0); 565 l_ptr->fsm_msg_cnt++; 566 link_set_timer(l_ptr, cont_intv / 4); 567 } else { /* Link has failed */ 568 pr_warn("%s<%s>, peer not responding\n", 569 link_rst_msg, l_ptr->name); 570 tipc_link_reset(l_ptr); 571 l_ptr->state = RESET_UNKNOWN; 572 l_ptr->fsm_msg_cnt = 0; 573 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 574 0, 0, 0, 0, 0); 575 l_ptr->fsm_msg_cnt++; 576 link_set_timer(l_ptr, cont_intv); 577 } 578 break; 579 default: 580 pr_err("%s%u in WU state\n", link_unk_evt, event); 581 } 582 break; 583 case RESET_UNKNOWN: 584 switch (event) { 585 case TRAFFIC_MSG_EVT: 586 break; 587 case ACTIVATE_MSG: 588 other = l_ptr->owner->active_links[0]; 589 if (other && link_working_unknown(other)) 590 break; 591 l_ptr->state = WORKING_WORKING; 592 l_ptr->fsm_msg_cnt = 0; 593 link_activate(l_ptr); 594 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 595 l_ptr->fsm_msg_cnt++; 596 if (l_ptr->owner->working_links == 1) 597 tipc_link_send_sync(l_ptr); 598 link_set_timer(l_ptr, cont_intv); 599 break; 600 case RESET_MSG: 601 l_ptr->state = RESET_RESET; 602 l_ptr->fsm_msg_cnt = 0; 603 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0); 604 l_ptr->fsm_msg_cnt++; 605 link_set_timer(l_ptr, cont_intv); 606 break; 607 case STARTING_EVT: 608 l_ptr->started = 1; 609 /* fall through */ 610 case TIMEOUT_EVT: 611 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 612 l_ptr->fsm_msg_cnt++; 613 link_set_timer(l_ptr, cont_intv); 614 break; 615 default: 616 pr_err("%s%u in RU state\n", link_unk_evt, event); 617 } 618 break; 619 case RESET_RESET: 620 switch (event) { 621 case TRAFFIC_MSG_EVT: 622 case ACTIVATE_MSG: 623 other = l_ptr->owner->active_links[0]; 624 if (other && link_working_unknown(other)) 625 break; 626 l_ptr->state = WORKING_WORKING; 627 l_ptr->fsm_msg_cnt = 0; 628 link_activate(l_ptr); 629 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 630 l_ptr->fsm_msg_cnt++; 631 if (l_ptr->owner->working_links == 1) 632 tipc_link_send_sync(l_ptr); 633 link_set_timer(l_ptr, cont_intv); 634 break; 635 case RESET_MSG: 636 break; 637 case TIMEOUT_EVT: 638 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 639 l_ptr->fsm_msg_cnt++; 640 link_set_timer(l_ptr, cont_intv); 641 break; 642 default: 643 pr_err("%s%u in RR state\n", link_unk_evt, event); 644 } 645 break; 646 default: 647 pr_err("Unknown link state %u/%u\n", l_ptr->state, event); 648 } 649 } 650 651 /* 652 * link_bundle_buf(): Append contents of a buffer to 653 * the tail of an existing one. 654 */ 655 static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler, 656 struct sk_buff *buf) 657 { 658 struct tipc_msg *bundler_msg = buf_msg(bundler); 659 struct tipc_msg *msg = buf_msg(buf); 660 u32 size = msg_size(msg); 661 u32 bundle_size = msg_size(bundler_msg); 662 u32 to_pos = align(bundle_size); 663 u32 pad = to_pos - bundle_size; 664 665 if (msg_user(bundler_msg) != MSG_BUNDLER) 666 return 0; 667 if (msg_type(bundler_msg) != OPEN_MSG) 668 return 0; 669 if (skb_tailroom(bundler) < (pad + size)) 670 return 0; 671 if (l_ptr->max_pkt < (to_pos + size)) 672 return 0; 673 674 skb_put(bundler, pad + size); 675 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size); 676 msg_set_size(bundler_msg, to_pos + size); 677 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); 678 kfree_skb(buf); 679 l_ptr->stats.sent_bundled++; 680 return 1; 681 } 682 683 static void link_add_to_outqueue(struct tipc_link *l_ptr, 684 struct sk_buff *buf, 685 struct tipc_msg *msg) 686 { 687 u32 ack = mod(l_ptr->next_in_no - 1); 688 u32 seqno = mod(l_ptr->next_out_no++); 689 690 msg_set_word(msg, 2, ((ack << 16) | seqno)); 691 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 692 buf->next = NULL; 693 if (l_ptr->first_out) { 694 l_ptr->last_out->next = buf; 695 l_ptr->last_out = buf; 696 } else 697 l_ptr->first_out = l_ptr->last_out = buf; 698 699 l_ptr->out_queue_size++; 700 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz) 701 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size; 702 } 703 704 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr, 705 struct sk_buff *buf_chain, 706 u32 long_msgno) 707 { 708 struct sk_buff *buf; 709 struct tipc_msg *msg; 710 711 if (!l_ptr->next_out) 712 l_ptr->next_out = buf_chain; 713 while (buf_chain) { 714 buf = buf_chain; 715 buf_chain = buf_chain->next; 716 717 msg = buf_msg(buf); 718 msg_set_long_msgno(msg, long_msgno); 719 link_add_to_outqueue(l_ptr, buf, msg); 720 } 721 } 722 723 /* 724 * tipc_link_send_buf() is the 'full path' for messages, called from 725 * inside TIPC when the 'fast path' in tipc_send_buf 726 * has failed, and from link_send() 727 */ 728 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 729 { 730 struct tipc_msg *msg = buf_msg(buf); 731 u32 size = msg_size(msg); 732 u32 dsz = msg_data_sz(msg); 733 u32 queue_size = l_ptr->out_queue_size; 734 u32 imp = tipc_msg_tot_importance(msg); 735 u32 queue_limit = l_ptr->queue_limit[imp]; 736 u32 max_packet = l_ptr->max_pkt; 737 738 /* Match msg importance against queue limits: */ 739 if (unlikely(queue_size >= queue_limit)) { 740 if (imp <= TIPC_CRITICAL_IMPORTANCE) { 741 link_schedule_port(l_ptr, msg_origport(msg), size); 742 kfree_skb(buf); 743 return -ELINKCONG; 744 } 745 kfree_skb(buf); 746 if (imp > CONN_MANAGER) { 747 pr_warn("%s<%s>, send queue full", link_rst_msg, 748 l_ptr->name); 749 tipc_link_reset(l_ptr); 750 } 751 return dsz; 752 } 753 754 /* Fragmentation needed ? */ 755 if (size > max_packet) 756 return link_send_long_buf(l_ptr, buf); 757 758 /* Packet can be queued or sent. */ 759 if (likely(!link_congested(l_ptr))) { 760 link_add_to_outqueue(l_ptr, buf, msg); 761 762 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 763 l_ptr->unacked_window = 0; 764 return dsz; 765 } 766 /* Congestion: can message be bundled ? */ 767 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && 768 (msg_user(msg) != MSG_FRAGMENTER)) { 769 770 /* Try adding message to an existing bundle */ 771 if (l_ptr->next_out && 772 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) 773 return dsz; 774 775 /* Try creating a new bundle */ 776 if (size <= max_packet * 2 / 3) { 777 struct sk_buff *bundler = tipc_buf_acquire(max_packet); 778 struct tipc_msg bundler_hdr; 779 780 if (bundler) { 781 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, 782 INT_H_SIZE, l_ptr->addr); 783 skb_copy_to_linear_data(bundler, &bundler_hdr, 784 INT_H_SIZE); 785 skb_trim(bundler, INT_H_SIZE); 786 link_bundle_buf(l_ptr, bundler, buf); 787 buf = bundler; 788 msg = buf_msg(buf); 789 l_ptr->stats.sent_bundles++; 790 } 791 } 792 } 793 if (!l_ptr->next_out) 794 l_ptr->next_out = buf; 795 link_add_to_outqueue(l_ptr, buf, msg); 796 return dsz; 797 } 798 799 /* 800 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has 801 * not been selected yet, and the the owner node is not locked 802 * Called by TIPC internal users, e.g. the name distributor 803 */ 804 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) 805 { 806 struct tipc_link *l_ptr; 807 struct tipc_node *n_ptr; 808 int res = -ELINKCONG; 809 810 read_lock_bh(&tipc_net_lock); 811 n_ptr = tipc_node_find(dest); 812 if (n_ptr) { 813 tipc_node_lock(n_ptr); 814 l_ptr = n_ptr->active_links[selector & 1]; 815 if (l_ptr) 816 res = tipc_link_send_buf(l_ptr, buf); 817 else 818 kfree_skb(buf); 819 tipc_node_unlock(n_ptr); 820 } else { 821 kfree_skb(buf); 822 } 823 read_unlock_bh(&tipc_net_lock); 824 return res; 825 } 826 827 /* 828 * tipc_link_send_sync - synchronize broadcast link endpoints. 829 * 830 * Give a newly added peer node the sequence number where it should 831 * start receiving and acking broadcast packets. 832 * 833 * Called with node locked 834 */ 835 static void tipc_link_send_sync(struct tipc_link *l) 836 { 837 struct sk_buff *buf; 838 struct tipc_msg *msg; 839 840 buf = tipc_buf_acquire(INT_H_SIZE); 841 if (!buf) 842 return; 843 844 msg = buf_msg(buf); 845 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr); 846 msg_set_last_bcast(msg, l->owner->bclink.acked); 847 link_add_chain_to_outqueue(l, buf, 0); 848 tipc_link_push_queue(l); 849 } 850 851 /* 852 * tipc_link_recv_sync - synchronize broadcast link endpoints. 853 * Receive the sequence number where we should start receiving and 854 * acking broadcast packets from a newly added peer node, and open 855 * up for reception of such packets. 856 * 857 * Called with node locked 858 */ 859 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf) 860 { 861 struct tipc_msg *msg = buf_msg(buf); 862 863 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg); 864 n->bclink.recv_permitted = true; 865 kfree_skb(buf); 866 } 867 868 /* 869 * tipc_link_send_names - send name table entries to new neighbor 870 * 871 * Send routine for bulk delivery of name table messages when contact 872 * with a new neighbor occurs. No link congestion checking is performed 873 * because name table messages *must* be delivered. The messages must be 874 * small enough not to require fragmentation. 875 * Called without any locks held. 876 */ 877 void tipc_link_send_names(struct list_head *message_list, u32 dest) 878 { 879 struct tipc_node *n_ptr; 880 struct tipc_link *l_ptr; 881 struct sk_buff *buf; 882 struct sk_buff *temp_buf; 883 884 if (list_empty(message_list)) 885 return; 886 887 read_lock_bh(&tipc_net_lock); 888 n_ptr = tipc_node_find(dest); 889 if (n_ptr) { 890 tipc_node_lock(n_ptr); 891 l_ptr = n_ptr->active_links[0]; 892 if (l_ptr) { 893 /* convert circular list to linear list */ 894 ((struct sk_buff *)message_list->prev)->next = NULL; 895 link_add_chain_to_outqueue(l_ptr, 896 (struct sk_buff *)message_list->next, 0); 897 tipc_link_push_queue(l_ptr); 898 INIT_LIST_HEAD(message_list); 899 } 900 tipc_node_unlock(n_ptr); 901 } 902 read_unlock_bh(&tipc_net_lock); 903 904 /* discard the messages if they couldn't be sent */ 905 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 906 list_del((struct list_head *)buf); 907 kfree_skb(buf); 908 } 909 } 910 911 /* 912 * link_send_buf_fast: Entry for data messages where the 913 * destination link is known and the header is complete, 914 * inclusive total message length. Very time critical. 915 * Link is locked. Returns user data length. 916 */ 917 static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, 918 u32 *used_max_pkt) 919 { 920 struct tipc_msg *msg = buf_msg(buf); 921 int res = msg_data_sz(msg); 922 923 if (likely(!link_congested(l_ptr))) { 924 if (likely(msg_size(msg) <= l_ptr->max_pkt)) { 925 link_add_to_outqueue(l_ptr, buf, msg); 926 tipc_bearer_send(l_ptr->b_ptr, buf, 927 &l_ptr->media_addr); 928 l_ptr->unacked_window = 0; 929 return res; 930 } 931 else 932 *used_max_pkt = l_ptr->max_pkt; 933 } 934 return tipc_link_send_buf(l_ptr, buf); /* All other cases */ 935 } 936 937 /* 938 * tipc_link_send_sections_fast: Entry for messages where the 939 * destination processor is known and the header is complete, 940 * except for total message length. 941 * Returns user data length or errno. 942 */ 943 int tipc_link_send_sections_fast(struct tipc_port *sender, 944 struct iovec const *msg_sect, 945 unsigned int len, u32 destaddr) 946 { 947 struct tipc_msg *hdr = &sender->phdr; 948 struct tipc_link *l_ptr; 949 struct sk_buff *buf; 950 struct tipc_node *node; 951 int res; 952 u32 selector = msg_origport(hdr) & 1; 953 954 again: 955 /* 956 * Try building message using port's max_pkt hint. 957 * (Must not hold any locks while building message.) 958 */ 959 res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf); 960 /* Exit if build request was invalid */ 961 if (unlikely(res < 0)) 962 return res; 963 964 read_lock_bh(&tipc_net_lock); 965 node = tipc_node_find(destaddr); 966 if (likely(node)) { 967 tipc_node_lock(node); 968 l_ptr = node->active_links[selector]; 969 if (likely(l_ptr)) { 970 if (likely(buf)) { 971 res = link_send_buf_fast(l_ptr, buf, 972 &sender->max_pkt); 973 exit: 974 tipc_node_unlock(node); 975 read_unlock_bh(&tipc_net_lock); 976 return res; 977 } 978 979 /* Exit if link (or bearer) is congested */ 980 if (link_congested(l_ptr)) { 981 res = link_schedule_port(l_ptr, 982 sender->ref, res); 983 goto exit; 984 } 985 986 /* 987 * Message size exceeds max_pkt hint; update hint, 988 * then re-try fast path or fragment the message 989 */ 990 sender->max_pkt = l_ptr->max_pkt; 991 tipc_node_unlock(node); 992 read_unlock_bh(&tipc_net_lock); 993 994 995 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 996 goto again; 997 998 return link_send_sections_long(sender, msg_sect, len, 999 destaddr); 1000 } 1001 tipc_node_unlock(node); 1002 } 1003 read_unlock_bh(&tipc_net_lock); 1004 1005 /* Couldn't find a link to the destination node */ 1006 if (buf) 1007 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1008 if (res >= 0) 1009 return tipc_port_reject_sections(sender, hdr, msg_sect, 1010 len, TIPC_ERR_NO_NODE); 1011 return res; 1012 } 1013 1014 /* 1015 * link_send_sections_long(): Entry for long messages where the 1016 * destination node is known and the header is complete, 1017 * inclusive total message length. 1018 * Link and bearer congestion status have been checked to be ok, 1019 * and are ignored if they change. 1020 * 1021 * Note that fragments do not use the full link MTU so that they won't have 1022 * to undergo refragmentation if link changeover causes them to be sent 1023 * over another link with an additional tunnel header added as prefix. 1024 * (Refragmentation will still occur if the other link has a smaller MTU.) 1025 * 1026 * Returns user data length or errno. 1027 */ 1028 static int link_send_sections_long(struct tipc_port *sender, 1029 struct iovec const *msg_sect, 1030 unsigned int len, u32 destaddr) 1031 { 1032 struct tipc_link *l_ptr; 1033 struct tipc_node *node; 1034 struct tipc_msg *hdr = &sender->phdr; 1035 u32 dsz = len; 1036 u32 max_pkt, fragm_sz, rest; 1037 struct tipc_msg fragm_hdr; 1038 struct sk_buff *buf, *buf_chain, *prev; 1039 u32 fragm_crs, fragm_rest, hsz, sect_rest; 1040 const unchar __user *sect_crs; 1041 int curr_sect; 1042 u32 fragm_no; 1043 int res = 0; 1044 1045 again: 1046 fragm_no = 1; 1047 max_pkt = sender->max_pkt - INT_H_SIZE; 1048 /* leave room for tunnel header in case of link changeover */ 1049 fragm_sz = max_pkt - INT_H_SIZE; 1050 /* leave room for fragmentation header in each fragment */ 1051 rest = dsz; 1052 fragm_crs = 0; 1053 fragm_rest = 0; 1054 sect_rest = 0; 1055 sect_crs = NULL; 1056 curr_sect = -1; 1057 1058 /* Prepare reusable fragment header */ 1059 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1060 INT_H_SIZE, msg_destnode(hdr)); 1061 msg_set_size(&fragm_hdr, max_pkt); 1062 msg_set_fragm_no(&fragm_hdr, 1); 1063 1064 /* Prepare header of first fragment */ 1065 buf_chain = buf = tipc_buf_acquire(max_pkt); 1066 if (!buf) 1067 return -ENOMEM; 1068 buf->next = NULL; 1069 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1070 hsz = msg_hdr_sz(hdr); 1071 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); 1072 1073 /* Chop up message */ 1074 fragm_crs = INT_H_SIZE + hsz; 1075 fragm_rest = fragm_sz - hsz; 1076 1077 do { /* For all sections */ 1078 u32 sz; 1079 1080 if (!sect_rest) { 1081 sect_rest = msg_sect[++curr_sect].iov_len; 1082 sect_crs = msg_sect[curr_sect].iov_base; 1083 } 1084 1085 if (sect_rest < fragm_rest) 1086 sz = sect_rest; 1087 else 1088 sz = fragm_rest; 1089 1090 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) { 1091 res = -EFAULT; 1092 error: 1093 kfree_skb_list(buf_chain); 1094 return res; 1095 } 1096 sect_crs += sz; 1097 sect_rest -= sz; 1098 fragm_crs += sz; 1099 fragm_rest -= sz; 1100 rest -= sz; 1101 1102 if (!fragm_rest && rest) { 1103 1104 /* Initiate new fragment: */ 1105 if (rest <= fragm_sz) { 1106 fragm_sz = rest; 1107 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 1108 } else { 1109 msg_set_type(&fragm_hdr, FRAGMENT); 1110 } 1111 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 1112 msg_set_fragm_no(&fragm_hdr, ++fragm_no); 1113 prev = buf; 1114 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 1115 if (!buf) { 1116 res = -ENOMEM; 1117 goto error; 1118 } 1119 1120 buf->next = NULL; 1121 prev->next = buf; 1122 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1123 fragm_crs = INT_H_SIZE; 1124 fragm_rest = fragm_sz; 1125 } 1126 } while (rest > 0); 1127 1128 /* 1129 * Now we have a buffer chain. Select a link and check 1130 * that packet size is still OK 1131 */ 1132 node = tipc_node_find(destaddr); 1133 if (likely(node)) { 1134 tipc_node_lock(node); 1135 l_ptr = node->active_links[sender->ref & 1]; 1136 if (!l_ptr) { 1137 tipc_node_unlock(node); 1138 goto reject; 1139 } 1140 if (l_ptr->max_pkt < max_pkt) { 1141 sender->max_pkt = l_ptr->max_pkt; 1142 tipc_node_unlock(node); 1143 kfree_skb_list(buf_chain); 1144 goto again; 1145 } 1146 } else { 1147 reject: 1148 kfree_skb_list(buf_chain); 1149 return tipc_port_reject_sections(sender, hdr, msg_sect, 1150 len, TIPC_ERR_NO_NODE); 1151 } 1152 1153 /* Append chain of fragments to send queue & send them */ 1154 l_ptr->long_msg_seq_no++; 1155 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 1156 l_ptr->stats.sent_fragments += fragm_no; 1157 l_ptr->stats.sent_fragmented++; 1158 tipc_link_push_queue(l_ptr); 1159 tipc_node_unlock(node); 1160 return dsz; 1161 } 1162 1163 /* 1164 * tipc_link_push_packet: Push one unsent packet to the media 1165 */ 1166 static u32 tipc_link_push_packet(struct tipc_link *l_ptr) 1167 { 1168 struct sk_buff *buf = l_ptr->first_out; 1169 u32 r_q_size = l_ptr->retransm_queue_size; 1170 u32 r_q_head = l_ptr->retransm_queue_head; 1171 1172 /* Step to position where retransmission failed, if any, */ 1173 /* consider that buffers may have been released in meantime */ 1174 if (r_q_size && buf) { 1175 u32 last = lesser(mod(r_q_head + r_q_size), 1176 link_last_sent(l_ptr)); 1177 u32 first = buf_seqno(buf); 1178 1179 while (buf && less(first, r_q_head)) { 1180 first = mod(first + 1); 1181 buf = buf->next; 1182 } 1183 l_ptr->retransm_queue_head = r_q_head = first; 1184 l_ptr->retransm_queue_size = r_q_size = mod(last - first); 1185 } 1186 1187 /* Continue retransmission now, if there is anything: */ 1188 if (r_q_size && buf) { 1189 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1190 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1191 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1192 l_ptr->retransm_queue_head = mod(++r_q_head); 1193 l_ptr->retransm_queue_size = --r_q_size; 1194 l_ptr->stats.retransmitted++; 1195 return 0; 1196 } 1197 1198 /* Send deferred protocol message, if any: */ 1199 buf = l_ptr->proto_msg_queue; 1200 if (buf) { 1201 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1202 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1203 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1204 l_ptr->unacked_window = 0; 1205 kfree_skb(buf); 1206 l_ptr->proto_msg_queue = NULL; 1207 return 0; 1208 } 1209 1210 /* Send one deferred data message, if send window not full: */ 1211 buf = l_ptr->next_out; 1212 if (buf) { 1213 struct tipc_msg *msg = buf_msg(buf); 1214 u32 next = msg_seqno(msg); 1215 u32 first = buf_seqno(l_ptr->first_out); 1216 1217 if (mod(next - first) < l_ptr->queue_limit[0]) { 1218 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1219 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1220 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1221 if (msg_user(msg) == MSG_BUNDLER) 1222 msg_set_type(msg, CLOSED_MSG); 1223 l_ptr->next_out = buf->next; 1224 return 0; 1225 } 1226 } 1227 return 1; 1228 } 1229 1230 /* 1231 * push_queue(): push out the unsent messages of a link where 1232 * congestion has abated. Node is locked 1233 */ 1234 void tipc_link_push_queue(struct tipc_link *l_ptr) 1235 { 1236 u32 res; 1237 1238 do { 1239 res = tipc_link_push_packet(l_ptr); 1240 } while (!res); 1241 } 1242 1243 static void link_reset_all(unsigned long addr) 1244 { 1245 struct tipc_node *n_ptr; 1246 char addr_string[16]; 1247 u32 i; 1248 1249 read_lock_bh(&tipc_net_lock); 1250 n_ptr = tipc_node_find((u32)addr); 1251 if (!n_ptr) { 1252 read_unlock_bh(&tipc_net_lock); 1253 return; /* node no longer exists */ 1254 } 1255 1256 tipc_node_lock(n_ptr); 1257 1258 pr_warn("Resetting all links to %s\n", 1259 tipc_addr_string_fill(addr_string, n_ptr->addr)); 1260 1261 for (i = 0; i < MAX_BEARERS; i++) { 1262 if (n_ptr->links[i]) { 1263 link_print(n_ptr->links[i], "Resetting link\n"); 1264 tipc_link_reset(n_ptr->links[i]); 1265 } 1266 } 1267 1268 tipc_node_unlock(n_ptr); 1269 read_unlock_bh(&tipc_net_lock); 1270 } 1271 1272 static void link_retransmit_failure(struct tipc_link *l_ptr, 1273 struct sk_buff *buf) 1274 { 1275 struct tipc_msg *msg = buf_msg(buf); 1276 1277 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); 1278 1279 if (l_ptr->addr) { 1280 /* Handle failure on standard link */ 1281 link_print(l_ptr, "Resetting link\n"); 1282 tipc_link_reset(l_ptr); 1283 1284 } else { 1285 /* Handle failure on broadcast link */ 1286 struct tipc_node *n_ptr; 1287 char addr_string[16]; 1288 1289 pr_info("Msg seq number: %u, ", msg_seqno(msg)); 1290 pr_cont("Outstanding acks: %lu\n", 1291 (unsigned long) TIPC_SKB_CB(buf)->handle); 1292 1293 n_ptr = tipc_bclink_retransmit_to(); 1294 tipc_node_lock(n_ptr); 1295 1296 tipc_addr_string_fill(addr_string, n_ptr->addr); 1297 pr_info("Broadcast link info for %s\n", addr_string); 1298 pr_info("Reception permitted: %d, Acked: %u\n", 1299 n_ptr->bclink.recv_permitted, 1300 n_ptr->bclink.acked); 1301 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", 1302 n_ptr->bclink.last_in, 1303 n_ptr->bclink.oos_state, 1304 n_ptr->bclink.last_sent); 1305 1306 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr); 1307 1308 tipc_node_unlock(n_ptr); 1309 1310 l_ptr->stale_count = 0; 1311 } 1312 } 1313 1314 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, 1315 u32 retransmits) 1316 { 1317 struct tipc_msg *msg; 1318 1319 if (!buf) 1320 return; 1321 1322 msg = buf_msg(buf); 1323 1324 /* Detect repeated retransmit failures */ 1325 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1326 if (++l_ptr->stale_count > 100) { 1327 link_retransmit_failure(l_ptr, buf); 1328 return; 1329 } 1330 } else { 1331 l_ptr->last_retransmitted = msg_seqno(msg); 1332 l_ptr->stale_count = 1; 1333 } 1334 1335 while (retransmits && (buf != l_ptr->next_out) && buf) { 1336 msg = buf_msg(buf); 1337 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1338 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1339 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1340 buf = buf->next; 1341 retransmits--; 1342 l_ptr->stats.retransmitted++; 1343 } 1344 1345 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1346 } 1347 1348 /** 1349 * link_insert_deferred_queue - insert deferred messages back into receive chain 1350 */ 1351 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, 1352 struct sk_buff *buf) 1353 { 1354 u32 seq_no; 1355 1356 if (l_ptr->oldest_deferred_in == NULL) 1357 return buf; 1358 1359 seq_no = buf_seqno(l_ptr->oldest_deferred_in); 1360 if (seq_no == mod(l_ptr->next_in_no)) { 1361 l_ptr->newest_deferred_in->next = buf; 1362 buf = l_ptr->oldest_deferred_in; 1363 l_ptr->oldest_deferred_in = NULL; 1364 l_ptr->deferred_inqueue_sz = 0; 1365 } 1366 return buf; 1367 } 1368 1369 /** 1370 * link_recv_buf_validate - validate basic format of received message 1371 * 1372 * This routine ensures a TIPC message has an acceptable header, and at least 1373 * as much data as the header indicates it should. The routine also ensures 1374 * that the entire message header is stored in the main fragment of the message 1375 * buffer, to simplify future access to message header fields. 1376 * 1377 * Note: Having extra info present in the message header or data areas is OK. 1378 * TIPC will ignore the excess, under the assumption that it is optional info 1379 * introduced by a later release of the protocol. 1380 */ 1381 static int link_recv_buf_validate(struct sk_buff *buf) 1382 { 1383 static u32 min_data_hdr_size[8] = { 1384 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE, 1385 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE 1386 }; 1387 1388 struct tipc_msg *msg; 1389 u32 tipc_hdr[2]; 1390 u32 size; 1391 u32 hdr_size; 1392 u32 min_hdr_size; 1393 1394 /* If this packet comes from the defer queue, the skb has already 1395 * been validated 1396 */ 1397 if (unlikely(TIPC_SKB_CB(buf)->deferred)) 1398 return 1; 1399 1400 if (unlikely(buf->len < MIN_H_SIZE)) 1401 return 0; 1402 1403 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); 1404 if (msg == NULL) 1405 return 0; 1406 1407 if (unlikely(msg_version(msg) != TIPC_VERSION)) 1408 return 0; 1409 1410 size = msg_size(msg); 1411 hdr_size = msg_hdr_sz(msg); 1412 min_hdr_size = msg_isdata(msg) ? 1413 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; 1414 1415 if (unlikely((hdr_size < min_hdr_size) || 1416 (size < hdr_size) || 1417 (buf->len < size) || 1418 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) 1419 return 0; 1420 1421 return pskb_may_pull(buf, hdr_size); 1422 } 1423 1424 /** 1425 * tipc_rcv - process TIPC packets/messages arriving from off-node 1426 * @head: pointer to message buffer chain 1427 * @tb_ptr: pointer to bearer message arrived on 1428 * 1429 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1430 * structure (i.e. cannot be NULL), but bearer can be inactive. 1431 */ 1432 void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) 1433 { 1434 read_lock_bh(&tipc_net_lock); 1435 while (head) { 1436 struct tipc_node *n_ptr; 1437 struct tipc_link *l_ptr; 1438 struct sk_buff *crs; 1439 struct sk_buff *buf = head; 1440 struct tipc_msg *msg; 1441 u32 seq_no; 1442 u32 ackd; 1443 u32 released = 0; 1444 int type; 1445 1446 head = head->next; 1447 buf->next = NULL; 1448 1449 /* Ensure bearer is still enabled */ 1450 if (unlikely(!b_ptr->active)) 1451 goto discard; 1452 1453 /* Ensure message is well-formed */ 1454 if (unlikely(!link_recv_buf_validate(buf))) 1455 goto discard; 1456 1457 /* Ensure message data is a single contiguous unit */ 1458 if (unlikely(skb_linearize(buf))) 1459 goto discard; 1460 1461 /* Handle arrival of a non-unicast link message */ 1462 msg = buf_msg(buf); 1463 1464 if (unlikely(msg_non_seq(msg))) { 1465 if (msg_user(msg) == LINK_CONFIG) 1466 tipc_disc_recv_msg(buf, b_ptr); 1467 else 1468 tipc_bclink_recv_pkt(buf); 1469 continue; 1470 } 1471 1472 /* Discard unicast link messages destined for another node */ 1473 if (unlikely(!msg_short(msg) && 1474 (msg_destnode(msg) != tipc_own_addr))) 1475 goto discard; 1476 1477 /* Locate neighboring node that sent message */ 1478 n_ptr = tipc_node_find(msg_prevnode(msg)); 1479 if (unlikely(!n_ptr)) 1480 goto discard; 1481 tipc_node_lock(n_ptr); 1482 1483 /* Locate unicast link endpoint that should handle message */ 1484 l_ptr = n_ptr->links[b_ptr->identity]; 1485 if (unlikely(!l_ptr)) 1486 goto unlock_discard; 1487 1488 /* Verify that communication with node is currently allowed */ 1489 if ((n_ptr->block_setup & WAIT_PEER_DOWN) && 1490 msg_user(msg) == LINK_PROTOCOL && 1491 (msg_type(msg) == RESET_MSG || 1492 msg_type(msg) == ACTIVATE_MSG) && 1493 !msg_redundant_link(msg)) 1494 n_ptr->block_setup &= ~WAIT_PEER_DOWN; 1495 1496 if (n_ptr->block_setup) 1497 goto unlock_discard; 1498 1499 /* Validate message sequence number info */ 1500 seq_no = msg_seqno(msg); 1501 ackd = msg_ack(msg); 1502 1503 /* Release acked messages */ 1504 if (n_ptr->bclink.recv_permitted) 1505 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1506 1507 crs = l_ptr->first_out; 1508 while ((crs != l_ptr->next_out) && 1509 less_eq(buf_seqno(crs), ackd)) { 1510 struct sk_buff *next = crs->next; 1511 1512 kfree_skb(crs); 1513 crs = next; 1514 released++; 1515 } 1516 if (released) { 1517 l_ptr->first_out = crs; 1518 l_ptr->out_queue_size -= released; 1519 } 1520 1521 /* Try sending any messages link endpoint has pending */ 1522 if (unlikely(l_ptr->next_out)) 1523 tipc_link_push_queue(l_ptr); 1524 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1525 tipc_link_wakeup_ports(l_ptr, 0); 1526 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { 1527 l_ptr->stats.sent_acks++; 1528 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1529 } 1530 1531 /* Now (finally!) process the incoming message */ 1532 protocol_check: 1533 if (unlikely(!link_working_working(l_ptr))) { 1534 if (msg_user(msg) == LINK_PROTOCOL) { 1535 link_recv_proto_msg(l_ptr, buf); 1536 head = link_insert_deferred_queue(l_ptr, head); 1537 tipc_node_unlock(n_ptr); 1538 continue; 1539 } 1540 1541 /* Traffic message. Conditionally activate link */ 1542 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1543 1544 if (link_working_working(l_ptr)) { 1545 /* Re-insert buffer in front of queue */ 1546 buf->next = head; 1547 head = buf; 1548 tipc_node_unlock(n_ptr); 1549 continue; 1550 } 1551 goto unlock_discard; 1552 } 1553 1554 /* Link is now in state WORKING_WORKING */ 1555 if (unlikely(seq_no != mod(l_ptr->next_in_no))) { 1556 link_handle_out_of_seq_msg(l_ptr, buf); 1557 head = link_insert_deferred_queue(l_ptr, head); 1558 tipc_node_unlock(n_ptr); 1559 continue; 1560 } 1561 l_ptr->next_in_no++; 1562 if (unlikely(l_ptr->oldest_deferred_in)) 1563 head = link_insert_deferred_queue(l_ptr, head); 1564 deliver: 1565 if (likely(msg_isdata(msg))) { 1566 tipc_node_unlock(n_ptr); 1567 tipc_port_recv_msg(buf); 1568 continue; 1569 } 1570 switch (msg_user(msg)) { 1571 int ret; 1572 case MSG_BUNDLER: 1573 l_ptr->stats.recv_bundles++; 1574 l_ptr->stats.recv_bundled += msg_msgcnt(msg); 1575 tipc_node_unlock(n_ptr); 1576 tipc_link_recv_bundle(buf); 1577 continue; 1578 case NAME_DISTRIBUTOR: 1579 n_ptr->bclink.recv_permitted = true; 1580 tipc_node_unlock(n_ptr); 1581 tipc_named_recv(buf); 1582 continue; 1583 case BCAST_PROTOCOL: 1584 tipc_link_recv_sync(n_ptr, buf); 1585 tipc_node_unlock(n_ptr); 1586 continue; 1587 case CONN_MANAGER: 1588 tipc_node_unlock(n_ptr); 1589 tipc_port_recv_proto_msg(buf); 1590 continue; 1591 case MSG_FRAGMENTER: 1592 l_ptr->stats.recv_fragments++; 1593 ret = tipc_link_recv_fragment(&l_ptr->reasm_head, 1594 &l_ptr->reasm_tail, 1595 &buf); 1596 if (ret == LINK_REASM_COMPLETE) { 1597 l_ptr->stats.recv_fragmented++; 1598 msg = buf_msg(buf); 1599 goto deliver; 1600 } 1601 if (ret == LINK_REASM_ERROR) 1602 tipc_link_reset(l_ptr); 1603 tipc_node_unlock(n_ptr); 1604 continue; 1605 case CHANGEOVER_PROTOCOL: 1606 type = msg_type(msg); 1607 if (tipc_link_tunnel_rcv(&l_ptr, &buf)) { 1608 msg = buf_msg(buf); 1609 seq_no = msg_seqno(msg); 1610 if (type == ORIGINAL_MSG) 1611 goto deliver; 1612 goto protocol_check; 1613 } 1614 break; 1615 default: 1616 kfree_skb(buf); 1617 buf = NULL; 1618 break; 1619 } 1620 tipc_node_unlock(n_ptr); 1621 tipc_net_route_msg(buf); 1622 continue; 1623 unlock_discard: 1624 1625 tipc_node_unlock(n_ptr); 1626 discard: 1627 kfree_skb(buf); 1628 } 1629 read_unlock_bh(&tipc_net_lock); 1630 } 1631 1632 /** 1633 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue 1634 * 1635 * Returns increase in queue length (i.e. 0 or 1) 1636 */ 1637 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, 1638 struct sk_buff *buf) 1639 { 1640 struct sk_buff *queue_buf; 1641 struct sk_buff **prev; 1642 u32 seq_no = buf_seqno(buf); 1643 1644 buf->next = NULL; 1645 1646 /* Empty queue ? */ 1647 if (*head == NULL) { 1648 *head = *tail = buf; 1649 return 1; 1650 } 1651 1652 /* Last ? */ 1653 if (less(buf_seqno(*tail), seq_no)) { 1654 (*tail)->next = buf; 1655 *tail = buf; 1656 return 1; 1657 } 1658 1659 /* Locate insertion point in queue, then insert; discard if duplicate */ 1660 prev = head; 1661 queue_buf = *head; 1662 for (;;) { 1663 u32 curr_seqno = buf_seqno(queue_buf); 1664 1665 if (seq_no == curr_seqno) { 1666 kfree_skb(buf); 1667 return 0; 1668 } 1669 1670 if (less(seq_no, curr_seqno)) 1671 break; 1672 1673 prev = &queue_buf->next; 1674 queue_buf = queue_buf->next; 1675 } 1676 1677 buf->next = queue_buf; 1678 *prev = buf; 1679 return 1; 1680 } 1681 1682 /* 1683 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet 1684 */ 1685 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 1686 struct sk_buff *buf) 1687 { 1688 u32 seq_no = buf_seqno(buf); 1689 1690 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) { 1691 link_recv_proto_msg(l_ptr, buf); 1692 return; 1693 } 1694 1695 /* Record OOS packet arrival (force mismatch on next timeout) */ 1696 l_ptr->checkpoint--; 1697 1698 /* 1699 * Discard packet if a duplicate; otherwise add it to deferred queue 1700 * and notify peer of gap as per protocol specification 1701 */ 1702 if (less(seq_no, mod(l_ptr->next_in_no))) { 1703 l_ptr->stats.duplicates++; 1704 kfree_skb(buf); 1705 return; 1706 } 1707 1708 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in, 1709 &l_ptr->newest_deferred_in, buf)) { 1710 l_ptr->deferred_inqueue_sz++; 1711 l_ptr->stats.deferred_recv++; 1712 TIPC_SKB_CB(buf)->deferred = true; 1713 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1714 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1715 } else 1716 l_ptr->stats.duplicates++; 1717 } 1718 1719 /* 1720 * Send protocol message to the other endpoint. 1721 */ 1722 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, 1723 int probe_msg, u32 gap, u32 tolerance, 1724 u32 priority, u32 ack_mtu) 1725 { 1726 struct sk_buff *buf = NULL; 1727 struct tipc_msg *msg = l_ptr->pmsg; 1728 u32 msg_size = sizeof(l_ptr->proto_msg); 1729 int r_flag; 1730 1731 /* Discard any previous message that was deferred due to congestion */ 1732 if (l_ptr->proto_msg_queue) { 1733 kfree_skb(l_ptr->proto_msg_queue); 1734 l_ptr->proto_msg_queue = NULL; 1735 } 1736 1737 /* Don't send protocol message during link changeover */ 1738 if (l_ptr->exp_msg_count) 1739 return; 1740 1741 /* Abort non-RESET send if communication with node is prohibited */ 1742 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1743 return; 1744 1745 /* Create protocol message with "out-of-sequence" sequence number */ 1746 msg_set_type(msg, msg_typ); 1747 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1748 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1749 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1750 1751 if (msg_typ == STATE_MSG) { 1752 u32 next_sent = mod(l_ptr->next_out_no); 1753 1754 if (!tipc_link_is_up(l_ptr)) 1755 return; 1756 if (l_ptr->next_out) 1757 next_sent = buf_seqno(l_ptr->next_out); 1758 msg_set_next_sent(msg, next_sent); 1759 if (l_ptr->oldest_deferred_in) { 1760 u32 rec = buf_seqno(l_ptr->oldest_deferred_in); 1761 gap = mod(rec - mod(l_ptr->next_in_no)); 1762 } 1763 msg_set_seq_gap(msg, gap); 1764 if (gap) 1765 l_ptr->stats.sent_nacks++; 1766 msg_set_link_tolerance(msg, tolerance); 1767 msg_set_linkprio(msg, priority); 1768 msg_set_max_pkt(msg, ack_mtu); 1769 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1770 msg_set_probe(msg, probe_msg != 0); 1771 if (probe_msg) { 1772 u32 mtu = l_ptr->max_pkt; 1773 1774 if ((mtu < l_ptr->max_pkt_target) && 1775 link_working_working(l_ptr) && 1776 l_ptr->fsm_msg_cnt) { 1777 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1778 if (l_ptr->max_pkt_probes == 10) { 1779 l_ptr->max_pkt_target = (msg_size - 4); 1780 l_ptr->max_pkt_probes = 0; 1781 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1782 } 1783 l_ptr->max_pkt_probes++; 1784 } 1785 1786 l_ptr->stats.sent_probes++; 1787 } 1788 l_ptr->stats.sent_states++; 1789 } else { /* RESET_MSG or ACTIVATE_MSG */ 1790 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); 1791 msg_set_seq_gap(msg, 0); 1792 msg_set_next_sent(msg, 1); 1793 msg_set_probe(msg, 0); 1794 msg_set_link_tolerance(msg, l_ptr->tolerance); 1795 msg_set_linkprio(msg, l_ptr->priority); 1796 msg_set_max_pkt(msg, l_ptr->max_pkt_target); 1797 } 1798 1799 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); 1800 msg_set_redundant_link(msg, r_flag); 1801 msg_set_linkprio(msg, l_ptr->priority); 1802 msg_set_size(msg, msg_size); 1803 1804 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2))); 1805 1806 buf = tipc_buf_acquire(msg_size); 1807 if (!buf) 1808 return; 1809 1810 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1811 buf->priority = TC_PRIO_CONTROL; 1812 1813 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1814 l_ptr->unacked_window = 0; 1815 kfree_skb(buf); 1816 } 1817 1818 /* 1819 * Receive protocol message : 1820 * Note that network plane id propagates through the network, and may 1821 * change at any time. The node with lowest address rules 1822 */ 1823 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) 1824 { 1825 u32 rec_gap = 0; 1826 u32 max_pkt_info; 1827 u32 max_pkt_ack; 1828 u32 msg_tol; 1829 struct tipc_msg *msg = buf_msg(buf); 1830 1831 /* Discard protocol message during link changeover */ 1832 if (l_ptr->exp_msg_count) 1833 goto exit; 1834 1835 /* record unnumbered packet arrival (force mismatch on next timeout) */ 1836 l_ptr->checkpoint--; 1837 1838 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg)) 1839 if (tipc_own_addr > msg_prevnode(msg)) 1840 l_ptr->b_ptr->net_plane = msg_net_plane(msg); 1841 1842 switch (msg_type(msg)) { 1843 1844 case RESET_MSG: 1845 if (!link_working_unknown(l_ptr) && 1846 (l_ptr->peer_session != INVALID_SESSION)) { 1847 if (less_eq(msg_session(msg), l_ptr->peer_session)) 1848 break; /* duplicate or old reset: ignore */ 1849 } 1850 1851 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) || 1852 link_working_unknown(l_ptr))) { 1853 /* 1854 * peer has lost contact -- don't allow peer's links 1855 * to reactivate before we recognize loss & clean up 1856 */ 1857 l_ptr->owner->block_setup = WAIT_NODE_DOWN; 1858 } 1859 1860 link_state_event(l_ptr, RESET_MSG); 1861 1862 /* fall thru' */ 1863 case ACTIVATE_MSG: 1864 /* Update link settings according other endpoint's values */ 1865 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); 1866 1867 msg_tol = msg_link_tolerance(msg); 1868 if (msg_tol > l_ptr->tolerance) 1869 link_set_supervision_props(l_ptr, msg_tol); 1870 1871 if (msg_linkprio(msg) > l_ptr->priority) 1872 l_ptr->priority = msg_linkprio(msg); 1873 1874 max_pkt_info = msg_max_pkt(msg); 1875 if (max_pkt_info) { 1876 if (max_pkt_info < l_ptr->max_pkt_target) 1877 l_ptr->max_pkt_target = max_pkt_info; 1878 if (l_ptr->max_pkt > l_ptr->max_pkt_target) 1879 l_ptr->max_pkt = l_ptr->max_pkt_target; 1880 } else { 1881 l_ptr->max_pkt = l_ptr->max_pkt_target; 1882 } 1883 1884 /* Synchronize broadcast link info, if not done previously */ 1885 if (!tipc_node_is_up(l_ptr->owner)) { 1886 l_ptr->owner->bclink.last_sent = 1887 l_ptr->owner->bclink.last_in = 1888 msg_last_bcast(msg); 1889 l_ptr->owner->bclink.oos_state = 0; 1890 } 1891 1892 l_ptr->peer_session = msg_session(msg); 1893 l_ptr->peer_bearer_id = msg_bearer_id(msg); 1894 1895 if (msg_type(msg) == ACTIVATE_MSG) 1896 link_state_event(l_ptr, ACTIVATE_MSG); 1897 break; 1898 case STATE_MSG: 1899 1900 msg_tol = msg_link_tolerance(msg); 1901 if (msg_tol) 1902 link_set_supervision_props(l_ptr, msg_tol); 1903 1904 if (msg_linkprio(msg) && 1905 (msg_linkprio(msg) != l_ptr->priority)) { 1906 pr_warn("%s<%s>, priority change %u->%u\n", 1907 link_rst_msg, l_ptr->name, l_ptr->priority, 1908 msg_linkprio(msg)); 1909 l_ptr->priority = msg_linkprio(msg); 1910 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 1911 break; 1912 } 1913 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1914 l_ptr->stats.recv_states++; 1915 if (link_reset_unknown(l_ptr)) 1916 break; 1917 1918 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) { 1919 rec_gap = mod(msg_next_sent(msg) - 1920 mod(l_ptr->next_in_no)); 1921 } 1922 1923 max_pkt_ack = msg_max_pkt(msg); 1924 if (max_pkt_ack > l_ptr->max_pkt) { 1925 l_ptr->max_pkt = max_pkt_ack; 1926 l_ptr->max_pkt_probes = 0; 1927 } 1928 1929 max_pkt_ack = 0; 1930 if (msg_probe(msg)) { 1931 l_ptr->stats.recv_probes++; 1932 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) 1933 max_pkt_ack = msg_size(msg); 1934 } 1935 1936 /* Protocol message before retransmits, reduce loss risk */ 1937 if (l_ptr->owner->bclink.recv_permitted) 1938 tipc_bclink_update_link_state(l_ptr->owner, 1939 msg_last_bcast(msg)); 1940 1941 if (rec_gap || (msg_probe(msg))) { 1942 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1943 0, rec_gap, 0, 0, max_pkt_ack); 1944 } 1945 if (msg_seq_gap(msg)) { 1946 l_ptr->stats.recv_nacks++; 1947 tipc_link_retransmit(l_ptr, l_ptr->first_out, 1948 msg_seq_gap(msg)); 1949 } 1950 break; 1951 } 1952 exit: 1953 kfree_skb(buf); 1954 } 1955 1956 1957 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to 1958 * a different bearer. Owner node is locked. 1959 */ 1960 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, 1961 struct tipc_msg *tunnel_hdr, 1962 struct tipc_msg *msg, 1963 u32 selector) 1964 { 1965 struct tipc_link *tunnel; 1966 struct sk_buff *buf; 1967 u32 length = msg_size(msg); 1968 1969 tunnel = l_ptr->owner->active_links[selector & 1]; 1970 if (!tipc_link_is_up(tunnel)) { 1971 pr_warn("%stunnel link no longer available\n", link_co_err); 1972 return; 1973 } 1974 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 1975 buf = tipc_buf_acquire(length + INT_H_SIZE); 1976 if (!buf) { 1977 pr_warn("%sunable to send tunnel msg\n", link_co_err); 1978 return; 1979 } 1980 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); 1981 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); 1982 tipc_link_send_buf(tunnel, buf); 1983 } 1984 1985 1986 /* tipc_link_failover_send_queue(): A link has gone down, but a second 1987 * link is still active. We can do failover. Tunnel the failing link's 1988 * whole send queue via the remaining link. This way, we don't lose 1989 * any packets, and sequence order is preserved for subsequent traffic 1990 * sent over the remaining link. Owner node is locked. 1991 */ 1992 void tipc_link_failover_send_queue(struct tipc_link *l_ptr) 1993 { 1994 u32 msgcount = l_ptr->out_queue_size; 1995 struct sk_buff *crs = l_ptr->first_out; 1996 struct tipc_link *tunnel = l_ptr->owner->active_links[0]; 1997 struct tipc_msg tunnel_hdr; 1998 int split_bundles; 1999 2000 if (!tunnel) 2001 return; 2002 2003 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2004 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); 2005 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2006 msg_set_msgcnt(&tunnel_hdr, msgcount); 2007 2008 if (!l_ptr->first_out) { 2009 struct sk_buff *buf; 2010 2011 buf = tipc_buf_acquire(INT_H_SIZE); 2012 if (buf) { 2013 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE); 2014 msg_set_size(&tunnel_hdr, INT_H_SIZE); 2015 tipc_link_send_buf(tunnel, buf); 2016 } else { 2017 pr_warn("%sunable to send changeover msg\n", 2018 link_co_err); 2019 } 2020 return; 2021 } 2022 2023 split_bundles = (l_ptr->owner->active_links[0] != 2024 l_ptr->owner->active_links[1]); 2025 2026 while (crs) { 2027 struct tipc_msg *msg = buf_msg(crs); 2028 2029 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { 2030 struct tipc_msg *m = msg_get_wrapped(msg); 2031 unchar *pos = (unchar *)m; 2032 2033 msgcount = msg_msgcnt(msg); 2034 while (msgcount--) { 2035 msg_set_seqno(m, msg_seqno(msg)); 2036 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m, 2037 msg_link_selector(m)); 2038 pos += align(msg_size(m)); 2039 m = (struct tipc_msg *)pos; 2040 } 2041 } else { 2042 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg, 2043 msg_link_selector(msg)); 2044 } 2045 crs = crs->next; 2046 } 2047 } 2048 2049 /* tipc_link_dup_send_queue(): A second link has become active. Tunnel a 2050 * duplicate of the first link's send queue via the new link. This way, we 2051 * are guaranteed that currently queued packets from a socket are delivered 2052 * before future traffic from the same socket, even if this is using the 2053 * new link. The last arriving copy of each duplicate packet is dropped at 2054 * the receiving end by the regular protocol check, so packet cardinality 2055 * and sequence order is preserved per sender/receiver socket pair. 2056 * Owner node is locked. 2057 */ 2058 void tipc_link_dup_send_queue(struct tipc_link *l_ptr, 2059 struct tipc_link *tunnel) 2060 { 2061 struct sk_buff *iter; 2062 struct tipc_msg tunnel_hdr; 2063 2064 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2065 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); 2066 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); 2067 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2068 iter = l_ptr->first_out; 2069 while (iter) { 2070 struct sk_buff *outbuf; 2071 struct tipc_msg *msg = buf_msg(iter); 2072 u32 length = msg_size(msg); 2073 2074 if (msg_user(msg) == MSG_BUNDLER) 2075 msg_set_type(msg, CLOSED_MSG); 2076 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ 2077 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 2078 msg_set_size(&tunnel_hdr, length + INT_H_SIZE); 2079 outbuf = tipc_buf_acquire(length + INT_H_SIZE); 2080 if (outbuf == NULL) { 2081 pr_warn("%sunable to send duplicate msg\n", 2082 link_co_err); 2083 return; 2084 } 2085 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); 2086 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data, 2087 length); 2088 tipc_link_send_buf(tunnel, outbuf); 2089 if (!tipc_link_is_up(l_ptr)) 2090 return; 2091 iter = iter->next; 2092 } 2093 } 2094 2095 /** 2096 * buf_extract - extracts embedded TIPC message from another message 2097 * @skb: encapsulating message buffer 2098 * @from_pos: offset to extract from 2099 * 2100 * Returns a new message buffer containing an embedded message. The 2101 * encapsulating message itself is left unchanged. 2102 */ 2103 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) 2104 { 2105 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); 2106 u32 size = msg_size(msg); 2107 struct sk_buff *eb; 2108 2109 eb = tipc_buf_acquire(size); 2110 if (eb) 2111 skb_copy_to_linear_data(eb, msg, size); 2112 return eb; 2113 } 2114 2115 /* tipc_link_tunnel_rcv(): Receive a tunneled packet, sent 2116 * via other link as result of a failover (ORIGINAL_MSG) or 2117 * a new active link (DUPLICATE_MSG). Failover packets are 2118 * returned to the active link for delivery upwards. 2119 * Owner node is locked. 2120 */ 2121 static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr, 2122 struct sk_buff **buf) 2123 { 2124 struct sk_buff *tunnel_buf = *buf; 2125 struct tipc_link *dest_link; 2126 struct tipc_msg *msg; 2127 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf); 2128 u32 msg_typ = msg_type(tunnel_msg); 2129 u32 msg_count = msg_msgcnt(tunnel_msg); 2130 u32 bearer_id = msg_bearer_id(tunnel_msg); 2131 2132 if (bearer_id >= MAX_BEARERS) 2133 goto exit; 2134 dest_link = (*l_ptr)->owner->links[bearer_id]; 2135 if (!dest_link) 2136 goto exit; 2137 if (dest_link == *l_ptr) { 2138 pr_err("Unexpected changeover message on link <%s>\n", 2139 (*l_ptr)->name); 2140 goto exit; 2141 } 2142 *l_ptr = dest_link; 2143 msg = msg_get_wrapped(tunnel_msg); 2144 2145 if (msg_typ == DUPLICATE_MSG) { 2146 if (less(msg_seqno(msg), mod(dest_link->next_in_no))) 2147 goto exit; 2148 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2149 if (*buf == NULL) { 2150 pr_warn("%sduplicate msg dropped\n", link_co_err); 2151 goto exit; 2152 } 2153 kfree_skb(tunnel_buf); 2154 return 1; 2155 } 2156 2157 /* First original message ?: */ 2158 if (tipc_link_is_up(dest_link)) { 2159 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg, 2160 dest_link->name); 2161 tipc_link_reset(dest_link); 2162 dest_link->exp_msg_count = msg_count; 2163 if (!msg_count) 2164 goto exit; 2165 } else if (dest_link->exp_msg_count == START_CHANGEOVER) { 2166 dest_link->exp_msg_count = msg_count; 2167 if (!msg_count) 2168 goto exit; 2169 } 2170 2171 /* Receive original message */ 2172 if (dest_link->exp_msg_count == 0) { 2173 pr_warn("%sgot too many tunnelled messages\n", link_co_err); 2174 goto exit; 2175 } 2176 dest_link->exp_msg_count--; 2177 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) { 2178 goto exit; 2179 } else { 2180 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2181 if (*buf != NULL) { 2182 kfree_skb(tunnel_buf); 2183 return 1; 2184 } else { 2185 pr_warn("%soriginal msg dropped\n", link_co_err); 2186 } 2187 } 2188 exit: 2189 *buf = NULL; 2190 kfree_skb(tunnel_buf); 2191 return 0; 2192 } 2193 2194 /* 2195 * Bundler functionality: 2196 */ 2197 void tipc_link_recv_bundle(struct sk_buff *buf) 2198 { 2199 u32 msgcount = msg_msgcnt(buf_msg(buf)); 2200 u32 pos = INT_H_SIZE; 2201 struct sk_buff *obuf; 2202 2203 while (msgcount--) { 2204 obuf = buf_extract(buf, pos); 2205 if (obuf == NULL) { 2206 pr_warn("Link unable to unbundle message(s)\n"); 2207 break; 2208 } 2209 pos += align(msg_size(buf_msg(obuf))); 2210 tipc_net_route_msg(obuf); 2211 } 2212 kfree_skb(buf); 2213 } 2214 2215 /* 2216 * Fragmentation/defragmentation: 2217 */ 2218 2219 /* 2220 * link_send_long_buf: Entry for buffers needing fragmentation. 2221 * The buffer is complete, inclusive total message length. 2222 * Returns user data length. 2223 */ 2224 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 2225 { 2226 struct sk_buff *buf_chain = NULL; 2227 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain; 2228 struct tipc_msg *inmsg = buf_msg(buf); 2229 struct tipc_msg fragm_hdr; 2230 u32 insize = msg_size(inmsg); 2231 u32 dsz = msg_data_sz(inmsg); 2232 unchar *crs = buf->data; 2233 u32 rest = insize; 2234 u32 pack_sz = l_ptr->max_pkt; 2235 u32 fragm_sz = pack_sz - INT_H_SIZE; 2236 u32 fragm_no = 0; 2237 u32 destaddr; 2238 2239 if (msg_short(inmsg)) 2240 destaddr = l_ptr->addr; 2241 else 2242 destaddr = msg_destnode(inmsg); 2243 2244 /* Prepare reusable fragment header: */ 2245 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2246 INT_H_SIZE, destaddr); 2247 2248 /* Chop up message: */ 2249 while (rest > 0) { 2250 struct sk_buff *fragm; 2251 2252 if (rest <= fragm_sz) { 2253 fragm_sz = rest; 2254 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 2255 } 2256 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 2257 if (fragm == NULL) { 2258 kfree_skb(buf); 2259 kfree_skb_list(buf_chain); 2260 return -ENOMEM; 2261 } 2262 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 2263 fragm_no++; 2264 msg_set_fragm_no(&fragm_hdr, fragm_no); 2265 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE); 2266 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs, 2267 fragm_sz); 2268 buf_chain_tail->next = fragm; 2269 buf_chain_tail = fragm; 2270 2271 rest -= fragm_sz; 2272 crs += fragm_sz; 2273 msg_set_type(&fragm_hdr, FRAGMENT); 2274 } 2275 kfree_skb(buf); 2276 2277 /* Append chain of fragments to send queue & send them */ 2278 l_ptr->long_msg_seq_no++; 2279 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 2280 l_ptr->stats.sent_fragments += fragm_no; 2281 l_ptr->stats.sent_fragmented++; 2282 tipc_link_push_queue(l_ptr); 2283 2284 return dsz; 2285 } 2286 2287 /* 2288 * tipc_link_recv_fragment(): Called with node lock on. Returns 2289 * the reassembled buffer if message is complete. 2290 */ 2291 int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail, 2292 struct sk_buff **fbuf) 2293 { 2294 struct sk_buff *frag = *fbuf; 2295 struct tipc_msg *msg = buf_msg(frag); 2296 u32 fragid = msg_type(msg); 2297 bool headstolen; 2298 int delta; 2299 2300 skb_pull(frag, msg_hdr_sz(msg)); 2301 if (fragid == FIRST_FRAGMENT) { 2302 if (*head || skb_unclone(frag, GFP_ATOMIC)) 2303 goto out_free; 2304 *head = frag; 2305 skb_frag_list_init(*head); 2306 return 0; 2307 } else if (*head && 2308 skb_try_coalesce(*head, frag, &headstolen, &delta)) { 2309 kfree_skb_partial(frag, headstolen); 2310 } else { 2311 if (!*head) 2312 goto out_free; 2313 if (!skb_has_frag_list(*head)) 2314 skb_shinfo(*head)->frag_list = frag; 2315 else 2316 (*tail)->next = frag; 2317 *tail = frag; 2318 (*head)->truesize += frag->truesize; 2319 } 2320 if (fragid == LAST_FRAGMENT) { 2321 *fbuf = *head; 2322 *tail = *head = NULL; 2323 return LINK_REASM_COMPLETE; 2324 } 2325 return 0; 2326 out_free: 2327 pr_warn_ratelimited("Link unable to reassemble fragmented message\n"); 2328 kfree_skb(*fbuf); 2329 return LINK_REASM_ERROR; 2330 } 2331 2332 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2333 { 2334 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2335 return; 2336 2337 l_ptr->tolerance = tolerance; 2338 l_ptr->continuity_interval = 2339 ((tolerance / 4) > 500) ? 500 : tolerance / 4; 2340 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); 2341 } 2342 2343 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) 2344 { 2345 /* Data messages from this node, inclusive FIRST_FRAGM */ 2346 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; 2347 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; 2348 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; 2349 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; 2350 /* Transiting data messages,inclusive FIRST_FRAGM */ 2351 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; 2352 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; 2353 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; 2354 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; 2355 l_ptr->queue_limit[CONN_MANAGER] = 1200; 2356 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; 2357 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; 2358 /* FRAGMENT and LAST_FRAGMENT packets */ 2359 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; 2360 } 2361 2362 /** 2363 * link_find_link - locate link by name 2364 * @name: ptr to link name string 2365 * @node: ptr to area to be filled with ptr to associated node 2366 * 2367 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted; 2368 * this also prevents link deletion. 2369 * 2370 * Returns pointer to link (or 0 if invalid link name). 2371 */ 2372 static struct tipc_link *link_find_link(const char *name, 2373 struct tipc_node **node) 2374 { 2375 struct tipc_link *l_ptr; 2376 struct tipc_node *n_ptr; 2377 int i; 2378 2379 list_for_each_entry(n_ptr, &tipc_node_list, list) { 2380 for (i = 0; i < MAX_BEARERS; i++) { 2381 l_ptr = n_ptr->links[i]; 2382 if (l_ptr && !strcmp(l_ptr->name, name)) 2383 goto found; 2384 } 2385 } 2386 l_ptr = NULL; 2387 n_ptr = NULL; 2388 found: 2389 *node = n_ptr; 2390 return l_ptr; 2391 } 2392 2393 /** 2394 * link_value_is_valid -- validate proposed link tolerance/priority/window 2395 * 2396 * @cmd: value type (TIPC_CMD_SET_LINK_*) 2397 * @new_value: the new value 2398 * 2399 * Returns 1 if value is within range, 0 if not. 2400 */ 2401 static int link_value_is_valid(u16 cmd, u32 new_value) 2402 { 2403 switch (cmd) { 2404 case TIPC_CMD_SET_LINK_TOL: 2405 return (new_value >= TIPC_MIN_LINK_TOL) && 2406 (new_value <= TIPC_MAX_LINK_TOL); 2407 case TIPC_CMD_SET_LINK_PRI: 2408 return (new_value <= TIPC_MAX_LINK_PRI); 2409 case TIPC_CMD_SET_LINK_WINDOW: 2410 return (new_value >= TIPC_MIN_LINK_WIN) && 2411 (new_value <= TIPC_MAX_LINK_WIN); 2412 } 2413 return 0; 2414 } 2415 2416 /** 2417 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media 2418 * @name: ptr to link, bearer, or media name 2419 * @new_value: new value of link, bearer, or media setting 2420 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2421 * 2422 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2423 * 2424 * Returns 0 if value updated and negative value on error. 2425 */ 2426 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) 2427 { 2428 struct tipc_node *node; 2429 struct tipc_link *l_ptr; 2430 struct tipc_bearer *b_ptr; 2431 struct tipc_media *m_ptr; 2432 int res = 0; 2433 2434 l_ptr = link_find_link(name, &node); 2435 if (l_ptr) { 2436 /* 2437 * acquire node lock for tipc_link_send_proto_msg(). 2438 * see "TIPC locking policy" in net.c. 2439 */ 2440 tipc_node_lock(node); 2441 switch (cmd) { 2442 case TIPC_CMD_SET_LINK_TOL: 2443 link_set_supervision_props(l_ptr, new_value); 2444 tipc_link_send_proto_msg(l_ptr, 2445 STATE_MSG, 0, 0, new_value, 0, 0); 2446 break; 2447 case TIPC_CMD_SET_LINK_PRI: 2448 l_ptr->priority = new_value; 2449 tipc_link_send_proto_msg(l_ptr, 2450 STATE_MSG, 0, 0, 0, new_value, 0); 2451 break; 2452 case TIPC_CMD_SET_LINK_WINDOW: 2453 tipc_link_set_queue_limits(l_ptr, new_value); 2454 break; 2455 default: 2456 res = -EINVAL; 2457 break; 2458 } 2459 tipc_node_unlock(node); 2460 return res; 2461 } 2462 2463 b_ptr = tipc_bearer_find(name); 2464 if (b_ptr) { 2465 switch (cmd) { 2466 case TIPC_CMD_SET_LINK_TOL: 2467 b_ptr->tolerance = new_value; 2468 break; 2469 case TIPC_CMD_SET_LINK_PRI: 2470 b_ptr->priority = new_value; 2471 break; 2472 case TIPC_CMD_SET_LINK_WINDOW: 2473 b_ptr->window = new_value; 2474 break; 2475 default: 2476 res = -EINVAL; 2477 break; 2478 } 2479 return res; 2480 } 2481 2482 m_ptr = tipc_media_find(name); 2483 if (!m_ptr) 2484 return -ENODEV; 2485 switch (cmd) { 2486 case TIPC_CMD_SET_LINK_TOL: 2487 m_ptr->tolerance = new_value; 2488 break; 2489 case TIPC_CMD_SET_LINK_PRI: 2490 m_ptr->priority = new_value; 2491 break; 2492 case TIPC_CMD_SET_LINK_WINDOW: 2493 m_ptr->window = new_value; 2494 break; 2495 default: 2496 res = -EINVAL; 2497 break; 2498 } 2499 return res; 2500 } 2501 2502 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, 2503 u16 cmd) 2504 { 2505 struct tipc_link_config *args; 2506 u32 new_value; 2507 int res; 2508 2509 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) 2510 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2511 2512 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area); 2513 new_value = ntohl(args->value); 2514 2515 if (!link_value_is_valid(cmd, new_value)) 2516 return tipc_cfg_reply_error_string( 2517 "cannot change, value invalid"); 2518 2519 if (!strcmp(args->name, tipc_bclink_name)) { 2520 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && 2521 (tipc_bclink_set_queue_limits(new_value) == 0)) 2522 return tipc_cfg_reply_none(); 2523 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 2524 " (cannot change setting on broadcast link)"); 2525 } 2526 2527 read_lock_bh(&tipc_net_lock); 2528 res = link_cmd_set_value(args->name, new_value, cmd); 2529 read_unlock_bh(&tipc_net_lock); 2530 if (res) 2531 return tipc_cfg_reply_error_string("cannot change link setting"); 2532 2533 return tipc_cfg_reply_none(); 2534 } 2535 2536 /** 2537 * link_reset_statistics - reset link statistics 2538 * @l_ptr: pointer to link 2539 */ 2540 static void link_reset_statistics(struct tipc_link *l_ptr) 2541 { 2542 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 2543 l_ptr->stats.sent_info = l_ptr->next_out_no; 2544 l_ptr->stats.recv_info = l_ptr->next_in_no; 2545 } 2546 2547 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) 2548 { 2549 char *link_name; 2550 struct tipc_link *l_ptr; 2551 struct tipc_node *node; 2552 2553 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2554 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2555 2556 link_name = (char *)TLV_DATA(req_tlv_area); 2557 if (!strcmp(link_name, tipc_bclink_name)) { 2558 if (tipc_bclink_reset_stats()) 2559 return tipc_cfg_reply_error_string("link not found"); 2560 return tipc_cfg_reply_none(); 2561 } 2562 2563 read_lock_bh(&tipc_net_lock); 2564 l_ptr = link_find_link(link_name, &node); 2565 if (!l_ptr) { 2566 read_unlock_bh(&tipc_net_lock); 2567 return tipc_cfg_reply_error_string("link not found"); 2568 } 2569 2570 tipc_node_lock(node); 2571 link_reset_statistics(l_ptr); 2572 tipc_node_unlock(node); 2573 read_unlock_bh(&tipc_net_lock); 2574 return tipc_cfg_reply_none(); 2575 } 2576 2577 /** 2578 * percent - convert count to a percentage of total (rounding up or down) 2579 */ 2580 static u32 percent(u32 count, u32 total) 2581 { 2582 return (count * 100 + (total / 2)) / total; 2583 } 2584 2585 /** 2586 * tipc_link_stats - print link statistics 2587 * @name: link name 2588 * @buf: print buffer area 2589 * @buf_size: size of print buffer area 2590 * 2591 * Returns length of print buffer data string (or 0 if error) 2592 */ 2593 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) 2594 { 2595 struct tipc_link *l; 2596 struct tipc_stats *s; 2597 struct tipc_node *node; 2598 char *status; 2599 u32 profile_total = 0; 2600 int ret; 2601 2602 if (!strcmp(name, tipc_bclink_name)) 2603 return tipc_bclink_stats(buf, buf_size); 2604 2605 read_lock_bh(&tipc_net_lock); 2606 l = link_find_link(name, &node); 2607 if (!l) { 2608 read_unlock_bh(&tipc_net_lock); 2609 return 0; 2610 } 2611 tipc_node_lock(node); 2612 s = &l->stats; 2613 2614 if (tipc_link_is_active(l)) 2615 status = "ACTIVE"; 2616 else if (tipc_link_is_up(l)) 2617 status = "STANDBY"; 2618 else 2619 status = "DEFUNCT"; 2620 2621 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n" 2622 " %s MTU:%u Priority:%u Tolerance:%u ms" 2623 " Window:%u packets\n", 2624 l->name, status, l->max_pkt, l->priority, 2625 l->tolerance, l->queue_limit[0]); 2626 2627 ret += tipc_snprintf(buf + ret, buf_size - ret, 2628 " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 2629 l->next_in_no - s->recv_info, s->recv_fragments, 2630 s->recv_fragmented, s->recv_bundles, 2631 s->recv_bundled); 2632 2633 ret += tipc_snprintf(buf + ret, buf_size - ret, 2634 " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 2635 l->next_out_no - s->sent_info, s->sent_fragments, 2636 s->sent_fragmented, s->sent_bundles, 2637 s->sent_bundled); 2638 2639 profile_total = s->msg_length_counts; 2640 if (!profile_total) 2641 profile_total = 1; 2642 2643 ret += tipc_snprintf(buf + ret, buf_size - ret, 2644 " TX profile sample:%u packets average:%u octets\n" 2645 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% " 2646 "-16384:%u%% -32768:%u%% -66000:%u%%\n", 2647 s->msg_length_counts, 2648 s->msg_lengths_total / profile_total, 2649 percent(s->msg_length_profile[0], profile_total), 2650 percent(s->msg_length_profile[1], profile_total), 2651 percent(s->msg_length_profile[2], profile_total), 2652 percent(s->msg_length_profile[3], profile_total), 2653 percent(s->msg_length_profile[4], profile_total), 2654 percent(s->msg_length_profile[5], profile_total), 2655 percent(s->msg_length_profile[6], profile_total)); 2656 2657 ret += tipc_snprintf(buf + ret, buf_size - ret, 2658 " RX states:%u probes:%u naks:%u defs:%u" 2659 " dups:%u\n", s->recv_states, s->recv_probes, 2660 s->recv_nacks, s->deferred_recv, s->duplicates); 2661 2662 ret += tipc_snprintf(buf + ret, buf_size - ret, 2663 " TX states:%u probes:%u naks:%u acks:%u" 2664 " dups:%u\n", s->sent_states, s->sent_probes, 2665 s->sent_nacks, s->sent_acks, s->retransmitted); 2666 2667 ret += tipc_snprintf(buf + ret, buf_size - ret, 2668 " Congestion link:%u Send queue" 2669 " max:%u avg:%u\n", s->link_congs, 2670 s->max_queue_sz, s->queue_sz_counts ? 2671 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2672 2673 tipc_node_unlock(node); 2674 read_unlock_bh(&tipc_net_lock); 2675 return ret; 2676 } 2677 2678 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) 2679 { 2680 struct sk_buff *buf; 2681 struct tlv_desc *rep_tlv; 2682 int str_len; 2683 int pb_len; 2684 char *pb; 2685 2686 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2687 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2688 2689 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); 2690 if (!buf) 2691 return NULL; 2692 2693 rep_tlv = (struct tlv_desc *)buf->data; 2694 pb = TLV_DATA(rep_tlv); 2695 pb_len = ULTRA_STRING_MAX_LEN; 2696 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), 2697 pb, pb_len); 2698 if (!str_len) { 2699 kfree_skb(buf); 2700 return tipc_cfg_reply_error_string("link not found"); 2701 } 2702 str_len += 1; /* for "\0" */ 2703 skb_put(buf, TLV_SPACE(str_len)); 2704 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 2705 2706 return buf; 2707 } 2708 2709 /** 2710 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination 2711 * @dest: network address of destination node 2712 * @selector: used to select from set of active links 2713 * 2714 * If no active link can be found, uses default maximum packet size. 2715 */ 2716 u32 tipc_link_get_max_pkt(u32 dest, u32 selector) 2717 { 2718 struct tipc_node *n_ptr; 2719 struct tipc_link *l_ptr; 2720 u32 res = MAX_PKT_DEFAULT; 2721 2722 if (dest == tipc_own_addr) 2723 return MAX_MSG_SIZE; 2724 2725 read_lock_bh(&tipc_net_lock); 2726 n_ptr = tipc_node_find(dest); 2727 if (n_ptr) { 2728 tipc_node_lock(n_ptr); 2729 l_ptr = n_ptr->active_links[selector & 1]; 2730 if (l_ptr) 2731 res = l_ptr->max_pkt; 2732 tipc_node_unlock(n_ptr); 2733 } 2734 read_unlock_bh(&tipc_net_lock); 2735 return res; 2736 } 2737 2738 static void link_print(struct tipc_link *l_ptr, const char *str) 2739 { 2740 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name); 2741 2742 if (link_working_unknown(l_ptr)) 2743 pr_cont(":WU\n"); 2744 else if (link_reset_reset(l_ptr)) 2745 pr_cont(":RR\n"); 2746 else if (link_reset_unknown(l_ptr)) 2747 pr_cont(":RU\n"); 2748 else if (link_working_working(l_ptr)) 2749 pr_cont(":WW\n"); 2750 else 2751 pr_cont("\n"); 2752 } 2753