1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, 2012, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "port.h" 40 #include "name_distr.h" 41 #include "discover.h" 42 #include "config.h" 43 44 #include <linux/pkt_sched.h> 45 46 /* 47 * Error message prefixes 48 */ 49 static const char *link_co_err = "Link changeover error, "; 50 static const char *link_rst_msg = "Resetting link "; 51 static const char *link_unk_evt = "Unknown link event "; 52 53 /* 54 * Out-of-range value for link session numbers 55 */ 56 #define INVALID_SESSION 0x10000 57 58 /* 59 * Link state events: 60 */ 61 #define STARTING_EVT 856384768 /* link processing trigger */ 62 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ 63 #define TIMEOUT_EVT 560817u /* link timer expired */ 64 65 /* 66 * The following two 'message types' is really just implementation 67 * data conveniently stored in the message header. 68 * They must not be considered part of the protocol 69 */ 70 #define OPEN_MSG 0 71 #define CLOSED_MSG 1 72 73 /* 74 * State value stored in 'exp_msg_count' 75 */ 76 #define START_CHANGEOVER 100000u 77 78 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 79 struct sk_buff *buf); 80 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf); 81 static int link_recv_changeover_msg(struct tipc_link **l_ptr, 82 struct sk_buff **buf); 83 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance); 84 static int link_send_sections_long(struct tipc_port *sender, 85 struct iovec const *msg_sect, 86 unsigned int len, u32 destnode); 87 static void link_state_event(struct tipc_link *l_ptr, u32 event); 88 static void link_reset_statistics(struct tipc_link *l_ptr); 89 static void link_print(struct tipc_link *l_ptr, const char *str); 90 static void link_start(struct tipc_link *l_ptr); 91 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf); 92 static void tipc_link_send_sync(struct tipc_link *l); 93 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf); 94 95 /* 96 * Simple link routines 97 */ 98 static unsigned int align(unsigned int i) 99 { 100 return (i + 3) & ~3u; 101 } 102 103 static void link_init_max_pkt(struct tipc_link *l_ptr) 104 { 105 u32 max_pkt; 106 107 max_pkt = (l_ptr->b_ptr->mtu & ~3); 108 if (max_pkt > MAX_MSG_SIZE) 109 max_pkt = MAX_MSG_SIZE; 110 111 l_ptr->max_pkt_target = max_pkt; 112 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) 113 l_ptr->max_pkt = l_ptr->max_pkt_target; 114 else 115 l_ptr->max_pkt = MAX_PKT_DEFAULT; 116 117 l_ptr->max_pkt_probes = 0; 118 } 119 120 static u32 link_next_sent(struct tipc_link *l_ptr) 121 { 122 if (l_ptr->next_out) 123 return buf_seqno(l_ptr->next_out); 124 return mod(l_ptr->next_out_no); 125 } 126 127 static u32 link_last_sent(struct tipc_link *l_ptr) 128 { 129 return mod(link_next_sent(l_ptr) - 1); 130 } 131 132 /* 133 * Simple non-static link routines (i.e. referenced outside this file) 134 */ 135 int tipc_link_is_up(struct tipc_link *l_ptr) 136 { 137 if (!l_ptr) 138 return 0; 139 return link_working_working(l_ptr) || link_working_unknown(l_ptr); 140 } 141 142 int tipc_link_is_active(struct tipc_link *l_ptr) 143 { 144 return (l_ptr->owner->active_links[0] == l_ptr) || 145 (l_ptr->owner->active_links[1] == l_ptr); 146 } 147 148 /** 149 * link_timeout - handle expiration of link timer 150 * @l_ptr: pointer to link 151 * 152 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict 153 * with tipc_link_delete(). (There is no risk that the node will be deleted by 154 * another thread because tipc_link_delete() always cancels the link timer before 155 * tipc_node_delete() is called.) 156 */ 157 static void link_timeout(struct tipc_link *l_ptr) 158 { 159 tipc_node_lock(l_ptr->owner); 160 161 /* update counters used in statistical profiling of send traffic */ 162 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; 163 l_ptr->stats.queue_sz_counts++; 164 165 if (l_ptr->first_out) { 166 struct tipc_msg *msg = buf_msg(l_ptr->first_out); 167 u32 length = msg_size(msg); 168 169 if ((msg_user(msg) == MSG_FRAGMENTER) && 170 (msg_type(msg) == FIRST_FRAGMENT)) { 171 length = msg_size(msg_get_wrapped(msg)); 172 } 173 if (length) { 174 l_ptr->stats.msg_lengths_total += length; 175 l_ptr->stats.msg_length_counts++; 176 if (length <= 64) 177 l_ptr->stats.msg_length_profile[0]++; 178 else if (length <= 256) 179 l_ptr->stats.msg_length_profile[1]++; 180 else if (length <= 1024) 181 l_ptr->stats.msg_length_profile[2]++; 182 else if (length <= 4096) 183 l_ptr->stats.msg_length_profile[3]++; 184 else if (length <= 16384) 185 l_ptr->stats.msg_length_profile[4]++; 186 else if (length <= 32768) 187 l_ptr->stats.msg_length_profile[5]++; 188 else 189 l_ptr->stats.msg_length_profile[6]++; 190 } 191 } 192 193 /* do all other link processing performed on a periodic basis */ 194 195 link_state_event(l_ptr, TIMEOUT_EVT); 196 197 if (l_ptr->next_out) 198 tipc_link_push_queue(l_ptr); 199 200 tipc_node_unlock(l_ptr->owner); 201 } 202 203 static void link_set_timer(struct tipc_link *l_ptr, u32 time) 204 { 205 k_start_timer(&l_ptr->timer, time); 206 } 207 208 /** 209 * tipc_link_create - create a new link 210 * @n_ptr: pointer to associated node 211 * @b_ptr: pointer to associated bearer 212 * @media_addr: media address to use when sending messages over link 213 * 214 * Returns pointer to link. 215 */ 216 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, 217 struct tipc_bearer *b_ptr, 218 const struct tipc_media_addr *media_addr) 219 { 220 struct tipc_link *l_ptr; 221 struct tipc_msg *msg; 222 char *if_name; 223 char addr_string[16]; 224 u32 peer = n_ptr->addr; 225 226 if (n_ptr->link_cnt >= 2) { 227 tipc_addr_string_fill(addr_string, n_ptr->addr); 228 pr_err("Attempt to establish third link to %s\n", addr_string); 229 return NULL; 230 } 231 232 if (n_ptr->links[b_ptr->identity]) { 233 tipc_addr_string_fill(addr_string, n_ptr->addr); 234 pr_err("Attempt to establish second link on <%s> to %s\n", 235 b_ptr->name, addr_string); 236 return NULL; 237 } 238 239 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC); 240 if (!l_ptr) { 241 pr_warn("Link creation failed, no memory\n"); 242 return NULL; 243 } 244 245 l_ptr->addr = peer; 246 if_name = strchr(b_ptr->name, ':') + 1; 247 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 248 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 249 tipc_node(tipc_own_addr), 250 if_name, 251 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 252 /* note: peer i/f name is updated by reset/activate message */ 253 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr)); 254 l_ptr->owner = n_ptr; 255 l_ptr->checkpoint = 1; 256 l_ptr->peer_session = INVALID_SESSION; 257 l_ptr->b_ptr = b_ptr; 258 link_set_supervision_props(l_ptr, b_ptr->tolerance); 259 l_ptr->state = RESET_UNKNOWN; 260 261 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; 262 msg = l_ptr->pmsg; 263 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); 264 msg_set_size(msg, sizeof(l_ptr->proto_msg)); 265 msg_set_session(msg, (tipc_random & 0xffff)); 266 msg_set_bearer_id(msg, b_ptr->identity); 267 strcpy((char *)msg_data(msg), if_name); 268 269 l_ptr->priority = b_ptr->priority; 270 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 271 272 link_init_max_pkt(l_ptr); 273 274 l_ptr->next_out_no = 1; 275 INIT_LIST_HEAD(&l_ptr->waiting_ports); 276 277 link_reset_statistics(l_ptr); 278 279 tipc_node_attach_link(n_ptr, l_ptr); 280 281 k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr); 282 list_add_tail(&l_ptr->link_list, &b_ptr->links); 283 tipc_k_signal((Handler)link_start, (unsigned long)l_ptr); 284 285 return l_ptr; 286 } 287 288 /** 289 * tipc_link_delete - delete a link 290 * @l_ptr: pointer to link 291 * 292 * Note: 'tipc_net_lock' is write_locked, bearer is locked. 293 * This routine must not grab the node lock until after link timer cancellation 294 * to avoid a potential deadlock situation. 295 */ 296 void tipc_link_delete(struct tipc_link *l_ptr) 297 { 298 if (!l_ptr) { 299 pr_err("Attempt to delete non-existent link\n"); 300 return; 301 } 302 303 k_cancel_timer(&l_ptr->timer); 304 305 tipc_node_lock(l_ptr->owner); 306 tipc_link_reset(l_ptr); 307 tipc_node_detach_link(l_ptr->owner, l_ptr); 308 tipc_link_stop(l_ptr); 309 list_del_init(&l_ptr->link_list); 310 tipc_node_unlock(l_ptr->owner); 311 k_term_timer(&l_ptr->timer); 312 kfree(l_ptr); 313 } 314 315 static void link_start(struct tipc_link *l_ptr) 316 { 317 tipc_node_lock(l_ptr->owner); 318 link_state_event(l_ptr, STARTING_EVT); 319 tipc_node_unlock(l_ptr->owner); 320 } 321 322 /** 323 * link_schedule_port - schedule port for deferred sending 324 * @l_ptr: pointer to link 325 * @origport: reference to sending port 326 * @sz: amount of data to be sent 327 * 328 * Schedules port for renewed sending of messages after link congestion 329 * has abated. 330 */ 331 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) 332 { 333 struct tipc_port *p_ptr; 334 335 spin_lock_bh(&tipc_port_list_lock); 336 p_ptr = tipc_port_lock(origport); 337 if (p_ptr) { 338 if (!p_ptr->wakeup) 339 goto exit; 340 if (!list_empty(&p_ptr->wait_list)) 341 goto exit; 342 p_ptr->congested = 1; 343 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); 344 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); 345 l_ptr->stats.link_congs++; 346 exit: 347 tipc_port_unlock(p_ptr); 348 } 349 spin_unlock_bh(&tipc_port_list_lock); 350 return -ELINKCONG; 351 } 352 353 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all) 354 { 355 struct tipc_port *p_ptr; 356 struct tipc_port *temp_p_ptr; 357 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size; 358 359 if (all) 360 win = 100000; 361 if (win <= 0) 362 return; 363 if (!spin_trylock_bh(&tipc_port_list_lock)) 364 return; 365 if (link_congested(l_ptr)) 366 goto exit; 367 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 368 wait_list) { 369 if (win <= 0) 370 break; 371 list_del_init(&p_ptr->wait_list); 372 spin_lock_bh(p_ptr->lock); 373 p_ptr->congested = 0; 374 p_ptr->wakeup(p_ptr); 375 win -= p_ptr->waiting_pkts; 376 spin_unlock_bh(p_ptr->lock); 377 } 378 379 exit: 380 spin_unlock_bh(&tipc_port_list_lock); 381 } 382 383 /** 384 * link_release_outqueue - purge link's outbound message queue 385 * @l_ptr: pointer to link 386 */ 387 static void link_release_outqueue(struct tipc_link *l_ptr) 388 { 389 kfree_skb_list(l_ptr->first_out); 390 l_ptr->first_out = NULL; 391 l_ptr->out_queue_size = 0; 392 } 393 394 /** 395 * tipc_link_reset_fragments - purge link's inbound message fragments queue 396 * @l_ptr: pointer to link 397 */ 398 void tipc_link_reset_fragments(struct tipc_link *l_ptr) 399 { 400 kfree_skb(l_ptr->reasm_head); 401 l_ptr->reasm_head = NULL; 402 l_ptr->reasm_tail = NULL; 403 } 404 405 /** 406 * tipc_link_stop - purge all inbound and outbound messages associated with link 407 * @l_ptr: pointer to link 408 */ 409 void tipc_link_stop(struct tipc_link *l_ptr) 410 { 411 kfree_skb_list(l_ptr->oldest_deferred_in); 412 kfree_skb_list(l_ptr->first_out); 413 tipc_link_reset_fragments(l_ptr); 414 kfree_skb(l_ptr->proto_msg_queue); 415 l_ptr->proto_msg_queue = NULL; 416 } 417 418 void tipc_link_reset(struct tipc_link *l_ptr) 419 { 420 u32 prev_state = l_ptr->state; 421 u32 checkpoint = l_ptr->next_in_no; 422 int was_active_link = tipc_link_is_active(l_ptr); 423 424 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); 425 426 /* Link is down, accept any session */ 427 l_ptr->peer_session = INVALID_SESSION; 428 429 /* Prepare for max packet size negotiation */ 430 link_init_max_pkt(l_ptr); 431 432 l_ptr->state = RESET_UNKNOWN; 433 434 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET)) 435 return; 436 437 tipc_node_link_down(l_ptr->owner, l_ptr); 438 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); 439 440 if (was_active_link && tipc_node_active_links(l_ptr->owner) && 441 l_ptr->owner->permit_changeover) { 442 l_ptr->reset_checkpoint = checkpoint; 443 l_ptr->exp_msg_count = START_CHANGEOVER; 444 } 445 446 /* Clean up all queues: */ 447 link_release_outqueue(l_ptr); 448 kfree_skb(l_ptr->proto_msg_queue); 449 l_ptr->proto_msg_queue = NULL; 450 kfree_skb_list(l_ptr->oldest_deferred_in); 451 if (!list_empty(&l_ptr->waiting_ports)) 452 tipc_link_wakeup_ports(l_ptr, 1); 453 454 l_ptr->retransm_queue_head = 0; 455 l_ptr->retransm_queue_size = 0; 456 l_ptr->last_out = NULL; 457 l_ptr->first_out = NULL; 458 l_ptr->next_out = NULL; 459 l_ptr->unacked_window = 0; 460 l_ptr->checkpoint = 1; 461 l_ptr->next_out_no = 1; 462 l_ptr->deferred_inqueue_sz = 0; 463 l_ptr->oldest_deferred_in = NULL; 464 l_ptr->newest_deferred_in = NULL; 465 l_ptr->fsm_msg_cnt = 0; 466 l_ptr->stale_count = 0; 467 link_reset_statistics(l_ptr); 468 } 469 470 471 static void link_activate(struct tipc_link *l_ptr) 472 { 473 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 474 tipc_node_link_up(l_ptr->owner, l_ptr); 475 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 476 } 477 478 /** 479 * link_state_event - link finite state machine 480 * @l_ptr: pointer to link 481 * @event: state machine event to process 482 */ 483 static void link_state_event(struct tipc_link *l_ptr, unsigned int event) 484 { 485 struct tipc_link *other; 486 u32 cont_intv = l_ptr->continuity_interval; 487 488 if (!l_ptr->started && (event != STARTING_EVT)) 489 return; /* Not yet. */ 490 491 /* Check whether changeover is going on */ 492 if (l_ptr->exp_msg_count) { 493 if (event == TIMEOUT_EVT) 494 link_set_timer(l_ptr, cont_intv); 495 return; 496 } 497 498 switch (l_ptr->state) { 499 case WORKING_WORKING: 500 switch (event) { 501 case TRAFFIC_MSG_EVT: 502 case ACTIVATE_MSG: 503 break; 504 case TIMEOUT_EVT: 505 if (l_ptr->next_in_no != l_ptr->checkpoint) { 506 l_ptr->checkpoint = l_ptr->next_in_no; 507 if (tipc_bclink_acks_missing(l_ptr->owner)) { 508 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 509 0, 0, 0, 0, 0); 510 l_ptr->fsm_msg_cnt++; 511 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { 512 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 513 1, 0, 0, 0, 0); 514 l_ptr->fsm_msg_cnt++; 515 } 516 link_set_timer(l_ptr, cont_intv); 517 break; 518 } 519 l_ptr->state = WORKING_UNKNOWN; 520 l_ptr->fsm_msg_cnt = 0; 521 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 522 l_ptr->fsm_msg_cnt++; 523 link_set_timer(l_ptr, cont_intv / 4); 524 break; 525 case RESET_MSG: 526 pr_info("%s<%s>, requested by peer\n", link_rst_msg, 527 l_ptr->name); 528 tipc_link_reset(l_ptr); 529 l_ptr->state = RESET_RESET; 530 l_ptr->fsm_msg_cnt = 0; 531 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 532 l_ptr->fsm_msg_cnt++; 533 link_set_timer(l_ptr, cont_intv); 534 break; 535 default: 536 pr_err("%s%u in WW state\n", link_unk_evt, event); 537 } 538 break; 539 case WORKING_UNKNOWN: 540 switch (event) { 541 case TRAFFIC_MSG_EVT: 542 case ACTIVATE_MSG: 543 l_ptr->state = WORKING_WORKING; 544 l_ptr->fsm_msg_cnt = 0; 545 link_set_timer(l_ptr, cont_intv); 546 break; 547 case RESET_MSG: 548 pr_info("%s<%s>, requested by peer while probing\n", 549 link_rst_msg, l_ptr->name); 550 tipc_link_reset(l_ptr); 551 l_ptr->state = RESET_RESET; 552 l_ptr->fsm_msg_cnt = 0; 553 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 554 l_ptr->fsm_msg_cnt++; 555 link_set_timer(l_ptr, cont_intv); 556 break; 557 case TIMEOUT_EVT: 558 if (l_ptr->next_in_no != l_ptr->checkpoint) { 559 l_ptr->state = WORKING_WORKING; 560 l_ptr->fsm_msg_cnt = 0; 561 l_ptr->checkpoint = l_ptr->next_in_no; 562 if (tipc_bclink_acks_missing(l_ptr->owner)) { 563 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 564 0, 0, 0, 0, 0); 565 l_ptr->fsm_msg_cnt++; 566 } 567 link_set_timer(l_ptr, cont_intv); 568 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { 569 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 570 1, 0, 0, 0, 0); 571 l_ptr->fsm_msg_cnt++; 572 link_set_timer(l_ptr, cont_intv / 4); 573 } else { /* Link has failed */ 574 pr_warn("%s<%s>, peer not responding\n", 575 link_rst_msg, l_ptr->name); 576 tipc_link_reset(l_ptr); 577 l_ptr->state = RESET_UNKNOWN; 578 l_ptr->fsm_msg_cnt = 0; 579 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 580 0, 0, 0, 0, 0); 581 l_ptr->fsm_msg_cnt++; 582 link_set_timer(l_ptr, cont_intv); 583 } 584 break; 585 default: 586 pr_err("%s%u in WU state\n", link_unk_evt, event); 587 } 588 break; 589 case RESET_UNKNOWN: 590 switch (event) { 591 case TRAFFIC_MSG_EVT: 592 break; 593 case ACTIVATE_MSG: 594 other = l_ptr->owner->active_links[0]; 595 if (other && link_working_unknown(other)) 596 break; 597 l_ptr->state = WORKING_WORKING; 598 l_ptr->fsm_msg_cnt = 0; 599 link_activate(l_ptr); 600 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 601 l_ptr->fsm_msg_cnt++; 602 if (l_ptr->owner->working_links == 1) 603 tipc_link_send_sync(l_ptr); 604 link_set_timer(l_ptr, cont_intv); 605 break; 606 case RESET_MSG: 607 l_ptr->state = RESET_RESET; 608 l_ptr->fsm_msg_cnt = 0; 609 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0); 610 l_ptr->fsm_msg_cnt++; 611 link_set_timer(l_ptr, cont_intv); 612 break; 613 case STARTING_EVT: 614 l_ptr->started = 1; 615 /* fall through */ 616 case TIMEOUT_EVT: 617 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 618 l_ptr->fsm_msg_cnt++; 619 link_set_timer(l_ptr, cont_intv); 620 break; 621 default: 622 pr_err("%s%u in RU state\n", link_unk_evt, event); 623 } 624 break; 625 case RESET_RESET: 626 switch (event) { 627 case TRAFFIC_MSG_EVT: 628 case ACTIVATE_MSG: 629 other = l_ptr->owner->active_links[0]; 630 if (other && link_working_unknown(other)) 631 break; 632 l_ptr->state = WORKING_WORKING; 633 l_ptr->fsm_msg_cnt = 0; 634 link_activate(l_ptr); 635 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 636 l_ptr->fsm_msg_cnt++; 637 if (l_ptr->owner->working_links == 1) 638 tipc_link_send_sync(l_ptr); 639 link_set_timer(l_ptr, cont_intv); 640 break; 641 case RESET_MSG: 642 break; 643 case TIMEOUT_EVT: 644 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 645 l_ptr->fsm_msg_cnt++; 646 link_set_timer(l_ptr, cont_intv); 647 break; 648 default: 649 pr_err("%s%u in RR state\n", link_unk_evt, event); 650 } 651 break; 652 default: 653 pr_err("Unknown link state %u/%u\n", l_ptr->state, event); 654 } 655 } 656 657 /* 658 * link_bundle_buf(): Append contents of a buffer to 659 * the tail of an existing one. 660 */ 661 static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler, 662 struct sk_buff *buf) 663 { 664 struct tipc_msg *bundler_msg = buf_msg(bundler); 665 struct tipc_msg *msg = buf_msg(buf); 666 u32 size = msg_size(msg); 667 u32 bundle_size = msg_size(bundler_msg); 668 u32 to_pos = align(bundle_size); 669 u32 pad = to_pos - bundle_size; 670 671 if (msg_user(bundler_msg) != MSG_BUNDLER) 672 return 0; 673 if (msg_type(bundler_msg) != OPEN_MSG) 674 return 0; 675 if (skb_tailroom(bundler) < (pad + size)) 676 return 0; 677 if (l_ptr->max_pkt < (to_pos + size)) 678 return 0; 679 680 skb_put(bundler, pad + size); 681 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size); 682 msg_set_size(bundler_msg, to_pos + size); 683 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); 684 kfree_skb(buf); 685 l_ptr->stats.sent_bundled++; 686 return 1; 687 } 688 689 static void link_add_to_outqueue(struct tipc_link *l_ptr, 690 struct sk_buff *buf, 691 struct tipc_msg *msg) 692 { 693 u32 ack = mod(l_ptr->next_in_no - 1); 694 u32 seqno = mod(l_ptr->next_out_no++); 695 696 msg_set_word(msg, 2, ((ack << 16) | seqno)); 697 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 698 buf->next = NULL; 699 if (l_ptr->first_out) { 700 l_ptr->last_out->next = buf; 701 l_ptr->last_out = buf; 702 } else 703 l_ptr->first_out = l_ptr->last_out = buf; 704 705 l_ptr->out_queue_size++; 706 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz) 707 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size; 708 } 709 710 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr, 711 struct sk_buff *buf_chain, 712 u32 long_msgno) 713 { 714 struct sk_buff *buf; 715 struct tipc_msg *msg; 716 717 if (!l_ptr->next_out) 718 l_ptr->next_out = buf_chain; 719 while (buf_chain) { 720 buf = buf_chain; 721 buf_chain = buf_chain->next; 722 723 msg = buf_msg(buf); 724 msg_set_long_msgno(msg, long_msgno); 725 link_add_to_outqueue(l_ptr, buf, msg); 726 } 727 } 728 729 /* 730 * tipc_link_send_buf() is the 'full path' for messages, called from 731 * inside TIPC when the 'fast path' in tipc_send_buf 732 * has failed, and from link_send() 733 */ 734 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 735 { 736 struct tipc_msg *msg = buf_msg(buf); 737 u32 size = msg_size(msg); 738 u32 dsz = msg_data_sz(msg); 739 u32 queue_size = l_ptr->out_queue_size; 740 u32 imp = tipc_msg_tot_importance(msg); 741 u32 queue_limit = l_ptr->queue_limit[imp]; 742 u32 max_packet = l_ptr->max_pkt; 743 744 /* Match msg importance against queue limits: */ 745 if (unlikely(queue_size >= queue_limit)) { 746 if (imp <= TIPC_CRITICAL_IMPORTANCE) { 747 link_schedule_port(l_ptr, msg_origport(msg), size); 748 kfree_skb(buf); 749 return -ELINKCONG; 750 } 751 kfree_skb(buf); 752 if (imp > CONN_MANAGER) { 753 pr_warn("%s<%s>, send queue full", link_rst_msg, 754 l_ptr->name); 755 tipc_link_reset(l_ptr); 756 } 757 return dsz; 758 } 759 760 /* Fragmentation needed ? */ 761 if (size > max_packet) 762 return link_send_long_buf(l_ptr, buf); 763 764 /* Packet can be queued or sent. */ 765 if (likely(!link_congested(l_ptr))) { 766 link_add_to_outqueue(l_ptr, buf, msg); 767 768 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 769 l_ptr->unacked_window = 0; 770 return dsz; 771 } 772 /* Congestion: can message be bundled ? */ 773 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && 774 (msg_user(msg) != MSG_FRAGMENTER)) { 775 776 /* Try adding message to an existing bundle */ 777 if (l_ptr->next_out && 778 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) 779 return dsz; 780 781 /* Try creating a new bundle */ 782 if (size <= max_packet * 2 / 3) { 783 struct sk_buff *bundler = tipc_buf_acquire(max_packet); 784 struct tipc_msg bundler_hdr; 785 786 if (bundler) { 787 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, 788 INT_H_SIZE, l_ptr->addr); 789 skb_copy_to_linear_data(bundler, &bundler_hdr, 790 INT_H_SIZE); 791 skb_trim(bundler, INT_H_SIZE); 792 link_bundle_buf(l_ptr, bundler, buf); 793 buf = bundler; 794 msg = buf_msg(buf); 795 l_ptr->stats.sent_bundles++; 796 } 797 } 798 } 799 if (!l_ptr->next_out) 800 l_ptr->next_out = buf; 801 link_add_to_outqueue(l_ptr, buf, msg); 802 return dsz; 803 } 804 805 /* 806 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has 807 * not been selected yet, and the the owner node is not locked 808 * Called by TIPC internal users, e.g. the name distributor 809 */ 810 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) 811 { 812 struct tipc_link *l_ptr; 813 struct tipc_node *n_ptr; 814 int res = -ELINKCONG; 815 816 read_lock_bh(&tipc_net_lock); 817 n_ptr = tipc_node_find(dest); 818 if (n_ptr) { 819 tipc_node_lock(n_ptr); 820 l_ptr = n_ptr->active_links[selector & 1]; 821 if (l_ptr) 822 res = tipc_link_send_buf(l_ptr, buf); 823 else 824 kfree_skb(buf); 825 tipc_node_unlock(n_ptr); 826 } else { 827 kfree_skb(buf); 828 } 829 read_unlock_bh(&tipc_net_lock); 830 return res; 831 } 832 833 /* 834 * tipc_link_send_sync - synchronize broadcast link endpoints. 835 * 836 * Give a newly added peer node the sequence number where it should 837 * start receiving and acking broadcast packets. 838 * 839 * Called with node locked 840 */ 841 static void tipc_link_send_sync(struct tipc_link *l) 842 { 843 struct sk_buff *buf; 844 struct tipc_msg *msg; 845 846 buf = tipc_buf_acquire(INT_H_SIZE); 847 if (!buf) 848 return; 849 850 msg = buf_msg(buf); 851 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr); 852 msg_set_last_bcast(msg, l->owner->bclink.acked); 853 link_add_chain_to_outqueue(l, buf, 0); 854 tipc_link_push_queue(l); 855 } 856 857 /* 858 * tipc_link_recv_sync - synchronize broadcast link endpoints. 859 * Receive the sequence number where we should start receiving and 860 * acking broadcast packets from a newly added peer node, and open 861 * up for reception of such packets. 862 * 863 * Called with node locked 864 */ 865 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf) 866 { 867 struct tipc_msg *msg = buf_msg(buf); 868 869 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg); 870 n->bclink.recv_permitted = true; 871 kfree_skb(buf); 872 } 873 874 /* 875 * tipc_link_send_names - send name table entries to new neighbor 876 * 877 * Send routine for bulk delivery of name table messages when contact 878 * with a new neighbor occurs. No link congestion checking is performed 879 * because name table messages *must* be delivered. The messages must be 880 * small enough not to require fragmentation. 881 * Called without any locks held. 882 */ 883 void tipc_link_send_names(struct list_head *message_list, u32 dest) 884 { 885 struct tipc_node *n_ptr; 886 struct tipc_link *l_ptr; 887 struct sk_buff *buf; 888 struct sk_buff *temp_buf; 889 890 if (list_empty(message_list)) 891 return; 892 893 read_lock_bh(&tipc_net_lock); 894 n_ptr = tipc_node_find(dest); 895 if (n_ptr) { 896 tipc_node_lock(n_ptr); 897 l_ptr = n_ptr->active_links[0]; 898 if (l_ptr) { 899 /* convert circular list to linear list */ 900 ((struct sk_buff *)message_list->prev)->next = NULL; 901 link_add_chain_to_outqueue(l_ptr, 902 (struct sk_buff *)message_list->next, 0); 903 tipc_link_push_queue(l_ptr); 904 INIT_LIST_HEAD(message_list); 905 } 906 tipc_node_unlock(n_ptr); 907 } 908 read_unlock_bh(&tipc_net_lock); 909 910 /* discard the messages if they couldn't be sent */ 911 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 912 list_del((struct list_head *)buf); 913 kfree_skb(buf); 914 } 915 } 916 917 /* 918 * link_send_buf_fast: Entry for data messages where the 919 * destination link is known and the header is complete, 920 * inclusive total message length. Very time critical. 921 * Link is locked. Returns user data length. 922 */ 923 static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, 924 u32 *used_max_pkt) 925 { 926 struct tipc_msg *msg = buf_msg(buf); 927 int res = msg_data_sz(msg); 928 929 if (likely(!link_congested(l_ptr))) { 930 if (likely(msg_size(msg) <= l_ptr->max_pkt)) { 931 link_add_to_outqueue(l_ptr, buf, msg); 932 tipc_bearer_send(l_ptr->b_ptr, buf, 933 &l_ptr->media_addr); 934 l_ptr->unacked_window = 0; 935 return res; 936 } 937 else 938 *used_max_pkt = l_ptr->max_pkt; 939 } 940 return tipc_link_send_buf(l_ptr, buf); /* All other cases */ 941 } 942 943 /* 944 * tipc_link_send_sections_fast: Entry for messages where the 945 * destination processor is known and the header is complete, 946 * except for total message length. 947 * Returns user data length or errno. 948 */ 949 int tipc_link_send_sections_fast(struct tipc_port *sender, 950 struct iovec const *msg_sect, 951 unsigned int len, u32 destaddr) 952 { 953 struct tipc_msg *hdr = &sender->phdr; 954 struct tipc_link *l_ptr; 955 struct sk_buff *buf; 956 struct tipc_node *node; 957 int res; 958 u32 selector = msg_origport(hdr) & 1; 959 960 again: 961 /* 962 * Try building message using port's max_pkt hint. 963 * (Must not hold any locks while building message.) 964 */ 965 res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf); 966 /* Exit if build request was invalid */ 967 if (unlikely(res < 0)) 968 return res; 969 970 read_lock_bh(&tipc_net_lock); 971 node = tipc_node_find(destaddr); 972 if (likely(node)) { 973 tipc_node_lock(node); 974 l_ptr = node->active_links[selector]; 975 if (likely(l_ptr)) { 976 if (likely(buf)) { 977 res = link_send_buf_fast(l_ptr, buf, 978 &sender->max_pkt); 979 exit: 980 tipc_node_unlock(node); 981 read_unlock_bh(&tipc_net_lock); 982 return res; 983 } 984 985 /* Exit if link (or bearer) is congested */ 986 if (link_congested(l_ptr)) { 987 res = link_schedule_port(l_ptr, 988 sender->ref, res); 989 goto exit; 990 } 991 992 /* 993 * Message size exceeds max_pkt hint; update hint, 994 * then re-try fast path or fragment the message 995 */ 996 sender->max_pkt = l_ptr->max_pkt; 997 tipc_node_unlock(node); 998 read_unlock_bh(&tipc_net_lock); 999 1000 1001 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 1002 goto again; 1003 1004 return link_send_sections_long(sender, msg_sect, len, 1005 destaddr); 1006 } 1007 tipc_node_unlock(node); 1008 } 1009 read_unlock_bh(&tipc_net_lock); 1010 1011 /* Couldn't find a link to the destination node */ 1012 if (buf) 1013 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1014 if (res >= 0) 1015 return tipc_port_reject_sections(sender, hdr, msg_sect, 1016 len, TIPC_ERR_NO_NODE); 1017 return res; 1018 } 1019 1020 /* 1021 * link_send_sections_long(): Entry for long messages where the 1022 * destination node is known and the header is complete, 1023 * inclusive total message length. 1024 * Link and bearer congestion status have been checked to be ok, 1025 * and are ignored if they change. 1026 * 1027 * Note that fragments do not use the full link MTU so that they won't have 1028 * to undergo refragmentation if link changeover causes them to be sent 1029 * over another link with an additional tunnel header added as prefix. 1030 * (Refragmentation will still occur if the other link has a smaller MTU.) 1031 * 1032 * Returns user data length or errno. 1033 */ 1034 static int link_send_sections_long(struct tipc_port *sender, 1035 struct iovec const *msg_sect, 1036 unsigned int len, u32 destaddr) 1037 { 1038 struct tipc_link *l_ptr; 1039 struct tipc_node *node; 1040 struct tipc_msg *hdr = &sender->phdr; 1041 u32 dsz = len; 1042 u32 max_pkt, fragm_sz, rest; 1043 struct tipc_msg fragm_hdr; 1044 struct sk_buff *buf, *buf_chain, *prev; 1045 u32 fragm_crs, fragm_rest, hsz, sect_rest; 1046 const unchar __user *sect_crs; 1047 int curr_sect; 1048 u32 fragm_no; 1049 int res = 0; 1050 1051 again: 1052 fragm_no = 1; 1053 max_pkt = sender->max_pkt - INT_H_SIZE; 1054 /* leave room for tunnel header in case of link changeover */ 1055 fragm_sz = max_pkt - INT_H_SIZE; 1056 /* leave room for fragmentation header in each fragment */ 1057 rest = dsz; 1058 fragm_crs = 0; 1059 fragm_rest = 0; 1060 sect_rest = 0; 1061 sect_crs = NULL; 1062 curr_sect = -1; 1063 1064 /* Prepare reusable fragment header */ 1065 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1066 INT_H_SIZE, msg_destnode(hdr)); 1067 msg_set_size(&fragm_hdr, max_pkt); 1068 msg_set_fragm_no(&fragm_hdr, 1); 1069 1070 /* Prepare header of first fragment */ 1071 buf_chain = buf = tipc_buf_acquire(max_pkt); 1072 if (!buf) 1073 return -ENOMEM; 1074 buf->next = NULL; 1075 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1076 hsz = msg_hdr_sz(hdr); 1077 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); 1078 1079 /* Chop up message */ 1080 fragm_crs = INT_H_SIZE + hsz; 1081 fragm_rest = fragm_sz - hsz; 1082 1083 do { /* For all sections */ 1084 u32 sz; 1085 1086 if (!sect_rest) { 1087 sect_rest = msg_sect[++curr_sect].iov_len; 1088 sect_crs = msg_sect[curr_sect].iov_base; 1089 } 1090 1091 if (sect_rest < fragm_rest) 1092 sz = sect_rest; 1093 else 1094 sz = fragm_rest; 1095 1096 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) { 1097 res = -EFAULT; 1098 error: 1099 kfree_skb_list(buf_chain); 1100 return res; 1101 } 1102 sect_crs += sz; 1103 sect_rest -= sz; 1104 fragm_crs += sz; 1105 fragm_rest -= sz; 1106 rest -= sz; 1107 1108 if (!fragm_rest && rest) { 1109 1110 /* Initiate new fragment: */ 1111 if (rest <= fragm_sz) { 1112 fragm_sz = rest; 1113 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 1114 } else { 1115 msg_set_type(&fragm_hdr, FRAGMENT); 1116 } 1117 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 1118 msg_set_fragm_no(&fragm_hdr, ++fragm_no); 1119 prev = buf; 1120 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 1121 if (!buf) { 1122 res = -ENOMEM; 1123 goto error; 1124 } 1125 1126 buf->next = NULL; 1127 prev->next = buf; 1128 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1129 fragm_crs = INT_H_SIZE; 1130 fragm_rest = fragm_sz; 1131 } 1132 } while (rest > 0); 1133 1134 /* 1135 * Now we have a buffer chain. Select a link and check 1136 * that packet size is still OK 1137 */ 1138 node = tipc_node_find(destaddr); 1139 if (likely(node)) { 1140 tipc_node_lock(node); 1141 l_ptr = node->active_links[sender->ref & 1]; 1142 if (!l_ptr) { 1143 tipc_node_unlock(node); 1144 goto reject; 1145 } 1146 if (l_ptr->max_pkt < max_pkt) { 1147 sender->max_pkt = l_ptr->max_pkt; 1148 tipc_node_unlock(node); 1149 kfree_skb_list(buf_chain); 1150 goto again; 1151 } 1152 } else { 1153 reject: 1154 kfree_skb_list(buf_chain); 1155 return tipc_port_reject_sections(sender, hdr, msg_sect, 1156 len, TIPC_ERR_NO_NODE); 1157 } 1158 1159 /* Append chain of fragments to send queue & send them */ 1160 l_ptr->long_msg_seq_no++; 1161 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 1162 l_ptr->stats.sent_fragments += fragm_no; 1163 l_ptr->stats.sent_fragmented++; 1164 tipc_link_push_queue(l_ptr); 1165 tipc_node_unlock(node); 1166 return dsz; 1167 } 1168 1169 /* 1170 * tipc_link_push_packet: Push one unsent packet to the media 1171 */ 1172 u32 tipc_link_push_packet(struct tipc_link *l_ptr) 1173 { 1174 struct sk_buff *buf = l_ptr->first_out; 1175 u32 r_q_size = l_ptr->retransm_queue_size; 1176 u32 r_q_head = l_ptr->retransm_queue_head; 1177 1178 /* Step to position where retransmission failed, if any, */ 1179 /* consider that buffers may have been released in meantime */ 1180 if (r_q_size && buf) { 1181 u32 last = lesser(mod(r_q_head + r_q_size), 1182 link_last_sent(l_ptr)); 1183 u32 first = buf_seqno(buf); 1184 1185 while (buf && less(first, r_q_head)) { 1186 first = mod(first + 1); 1187 buf = buf->next; 1188 } 1189 l_ptr->retransm_queue_head = r_q_head = first; 1190 l_ptr->retransm_queue_size = r_q_size = mod(last - first); 1191 } 1192 1193 /* Continue retransmission now, if there is anything: */ 1194 if (r_q_size && buf) { 1195 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1196 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1197 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1198 l_ptr->retransm_queue_head = mod(++r_q_head); 1199 l_ptr->retransm_queue_size = --r_q_size; 1200 l_ptr->stats.retransmitted++; 1201 return 0; 1202 } 1203 1204 /* Send deferred protocol message, if any: */ 1205 buf = l_ptr->proto_msg_queue; 1206 if (buf) { 1207 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1208 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1209 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1210 l_ptr->unacked_window = 0; 1211 kfree_skb(buf); 1212 l_ptr->proto_msg_queue = NULL; 1213 return 0; 1214 } 1215 1216 /* Send one deferred data message, if send window not full: */ 1217 buf = l_ptr->next_out; 1218 if (buf) { 1219 struct tipc_msg *msg = buf_msg(buf); 1220 u32 next = msg_seqno(msg); 1221 u32 first = buf_seqno(l_ptr->first_out); 1222 1223 if (mod(next - first) < l_ptr->queue_limit[0]) { 1224 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1225 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1226 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1227 if (msg_user(msg) == MSG_BUNDLER) 1228 msg_set_type(msg, CLOSED_MSG); 1229 l_ptr->next_out = buf->next; 1230 return 0; 1231 } 1232 } 1233 return 1; 1234 } 1235 1236 /* 1237 * push_queue(): push out the unsent messages of a link where 1238 * congestion has abated. Node is locked 1239 */ 1240 void tipc_link_push_queue(struct tipc_link *l_ptr) 1241 { 1242 u32 res; 1243 1244 do { 1245 res = tipc_link_push_packet(l_ptr); 1246 } while (!res); 1247 } 1248 1249 static void link_reset_all(unsigned long addr) 1250 { 1251 struct tipc_node *n_ptr; 1252 char addr_string[16]; 1253 u32 i; 1254 1255 read_lock_bh(&tipc_net_lock); 1256 n_ptr = tipc_node_find((u32)addr); 1257 if (!n_ptr) { 1258 read_unlock_bh(&tipc_net_lock); 1259 return; /* node no longer exists */ 1260 } 1261 1262 tipc_node_lock(n_ptr); 1263 1264 pr_warn("Resetting all links to %s\n", 1265 tipc_addr_string_fill(addr_string, n_ptr->addr)); 1266 1267 for (i = 0; i < MAX_BEARERS; i++) { 1268 if (n_ptr->links[i]) { 1269 link_print(n_ptr->links[i], "Resetting link\n"); 1270 tipc_link_reset(n_ptr->links[i]); 1271 } 1272 } 1273 1274 tipc_node_unlock(n_ptr); 1275 read_unlock_bh(&tipc_net_lock); 1276 } 1277 1278 static void link_retransmit_failure(struct tipc_link *l_ptr, 1279 struct sk_buff *buf) 1280 { 1281 struct tipc_msg *msg = buf_msg(buf); 1282 1283 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); 1284 1285 if (l_ptr->addr) { 1286 /* Handle failure on standard link */ 1287 link_print(l_ptr, "Resetting link\n"); 1288 tipc_link_reset(l_ptr); 1289 1290 } else { 1291 /* Handle failure on broadcast link */ 1292 struct tipc_node *n_ptr; 1293 char addr_string[16]; 1294 1295 pr_info("Msg seq number: %u, ", msg_seqno(msg)); 1296 pr_cont("Outstanding acks: %lu\n", 1297 (unsigned long) TIPC_SKB_CB(buf)->handle); 1298 1299 n_ptr = tipc_bclink_retransmit_to(); 1300 tipc_node_lock(n_ptr); 1301 1302 tipc_addr_string_fill(addr_string, n_ptr->addr); 1303 pr_info("Broadcast link info for %s\n", addr_string); 1304 pr_info("Reception permitted: %d, Acked: %u\n", 1305 n_ptr->bclink.recv_permitted, 1306 n_ptr->bclink.acked); 1307 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", 1308 n_ptr->bclink.last_in, 1309 n_ptr->bclink.oos_state, 1310 n_ptr->bclink.last_sent); 1311 1312 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr); 1313 1314 tipc_node_unlock(n_ptr); 1315 1316 l_ptr->stale_count = 0; 1317 } 1318 } 1319 1320 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, 1321 u32 retransmits) 1322 { 1323 struct tipc_msg *msg; 1324 1325 if (!buf) 1326 return; 1327 1328 msg = buf_msg(buf); 1329 1330 /* Detect repeated retransmit failures */ 1331 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1332 if (++l_ptr->stale_count > 100) { 1333 link_retransmit_failure(l_ptr, buf); 1334 return; 1335 } 1336 } else { 1337 l_ptr->last_retransmitted = msg_seqno(msg); 1338 l_ptr->stale_count = 1; 1339 } 1340 1341 while (retransmits && (buf != l_ptr->next_out) && buf) { 1342 msg = buf_msg(buf); 1343 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1344 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1345 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1346 buf = buf->next; 1347 retransmits--; 1348 l_ptr->stats.retransmitted++; 1349 } 1350 1351 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1352 } 1353 1354 /** 1355 * link_insert_deferred_queue - insert deferred messages back into receive chain 1356 */ 1357 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, 1358 struct sk_buff *buf) 1359 { 1360 u32 seq_no; 1361 1362 if (l_ptr->oldest_deferred_in == NULL) 1363 return buf; 1364 1365 seq_no = buf_seqno(l_ptr->oldest_deferred_in); 1366 if (seq_no == mod(l_ptr->next_in_no)) { 1367 l_ptr->newest_deferred_in->next = buf; 1368 buf = l_ptr->oldest_deferred_in; 1369 l_ptr->oldest_deferred_in = NULL; 1370 l_ptr->deferred_inqueue_sz = 0; 1371 } 1372 return buf; 1373 } 1374 1375 /** 1376 * link_recv_buf_validate - validate basic format of received message 1377 * 1378 * This routine ensures a TIPC message has an acceptable header, and at least 1379 * as much data as the header indicates it should. The routine also ensures 1380 * that the entire message header is stored in the main fragment of the message 1381 * buffer, to simplify future access to message header fields. 1382 * 1383 * Note: Having extra info present in the message header or data areas is OK. 1384 * TIPC will ignore the excess, under the assumption that it is optional info 1385 * introduced by a later release of the protocol. 1386 */ 1387 static int link_recv_buf_validate(struct sk_buff *buf) 1388 { 1389 static u32 min_data_hdr_size[8] = { 1390 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE, 1391 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE 1392 }; 1393 1394 struct tipc_msg *msg; 1395 u32 tipc_hdr[2]; 1396 u32 size; 1397 u32 hdr_size; 1398 u32 min_hdr_size; 1399 1400 if (unlikely(buf->len < MIN_H_SIZE)) 1401 return 0; 1402 1403 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); 1404 if (msg == NULL) 1405 return 0; 1406 1407 if (unlikely(msg_version(msg) != TIPC_VERSION)) 1408 return 0; 1409 1410 size = msg_size(msg); 1411 hdr_size = msg_hdr_sz(msg); 1412 min_hdr_size = msg_isdata(msg) ? 1413 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; 1414 1415 if (unlikely((hdr_size < min_hdr_size) || 1416 (size < hdr_size) || 1417 (buf->len < size) || 1418 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) 1419 return 0; 1420 1421 return pskb_may_pull(buf, hdr_size); 1422 } 1423 1424 /** 1425 * tipc_recv_msg - process TIPC messages arriving from off-node 1426 * @head: pointer to message buffer chain 1427 * @tb_ptr: pointer to bearer message arrived on 1428 * 1429 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1430 * structure (i.e. cannot be NULL), but bearer can be inactive. 1431 */ 1432 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) 1433 { 1434 read_lock_bh(&tipc_net_lock); 1435 while (head) { 1436 struct tipc_node *n_ptr; 1437 struct tipc_link *l_ptr; 1438 struct sk_buff *crs; 1439 struct sk_buff *buf = head; 1440 struct tipc_msg *msg; 1441 u32 seq_no; 1442 u32 ackd; 1443 u32 released = 0; 1444 int type; 1445 1446 head = head->next; 1447 1448 /* Ensure bearer is still enabled */ 1449 if (unlikely(!b_ptr->active)) 1450 goto discard; 1451 1452 /* Ensure message is well-formed */ 1453 if (unlikely(!link_recv_buf_validate(buf))) 1454 goto discard; 1455 1456 /* Ensure message data is a single contiguous unit */ 1457 if (unlikely(skb_linearize(buf))) 1458 goto discard; 1459 1460 /* Handle arrival of a non-unicast link message */ 1461 msg = buf_msg(buf); 1462 1463 if (unlikely(msg_non_seq(msg))) { 1464 if (msg_user(msg) == LINK_CONFIG) 1465 tipc_disc_recv_msg(buf, b_ptr); 1466 else 1467 tipc_bclink_recv_pkt(buf); 1468 continue; 1469 } 1470 1471 /* Discard unicast link messages destined for another node */ 1472 if (unlikely(!msg_short(msg) && 1473 (msg_destnode(msg) != tipc_own_addr))) 1474 goto discard; 1475 1476 /* Locate neighboring node that sent message */ 1477 n_ptr = tipc_node_find(msg_prevnode(msg)); 1478 if (unlikely(!n_ptr)) 1479 goto discard; 1480 tipc_node_lock(n_ptr); 1481 1482 /* Locate unicast link endpoint that should handle message */ 1483 l_ptr = n_ptr->links[b_ptr->identity]; 1484 if (unlikely(!l_ptr)) 1485 goto unlock_discard; 1486 1487 /* Verify that communication with node is currently allowed */ 1488 if ((n_ptr->block_setup & WAIT_PEER_DOWN) && 1489 msg_user(msg) == LINK_PROTOCOL && 1490 (msg_type(msg) == RESET_MSG || 1491 msg_type(msg) == ACTIVATE_MSG) && 1492 !msg_redundant_link(msg)) 1493 n_ptr->block_setup &= ~WAIT_PEER_DOWN; 1494 1495 if (n_ptr->block_setup) 1496 goto unlock_discard; 1497 1498 /* Validate message sequence number info */ 1499 seq_no = msg_seqno(msg); 1500 ackd = msg_ack(msg); 1501 1502 /* Release acked messages */ 1503 if (n_ptr->bclink.recv_permitted) 1504 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1505 1506 crs = l_ptr->first_out; 1507 while ((crs != l_ptr->next_out) && 1508 less_eq(buf_seqno(crs), ackd)) { 1509 struct sk_buff *next = crs->next; 1510 1511 kfree_skb(crs); 1512 crs = next; 1513 released++; 1514 } 1515 if (released) { 1516 l_ptr->first_out = crs; 1517 l_ptr->out_queue_size -= released; 1518 } 1519 1520 /* Try sending any messages link endpoint has pending */ 1521 if (unlikely(l_ptr->next_out)) 1522 tipc_link_push_queue(l_ptr); 1523 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1524 tipc_link_wakeup_ports(l_ptr, 0); 1525 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { 1526 l_ptr->stats.sent_acks++; 1527 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1528 } 1529 1530 /* Now (finally!) process the incoming message */ 1531 protocol_check: 1532 if (unlikely(!link_working_working(l_ptr))) { 1533 if (msg_user(msg) == LINK_PROTOCOL) { 1534 link_recv_proto_msg(l_ptr, buf); 1535 head = link_insert_deferred_queue(l_ptr, head); 1536 tipc_node_unlock(n_ptr); 1537 continue; 1538 } 1539 1540 /* Traffic message. Conditionally activate link */ 1541 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1542 1543 if (link_working_working(l_ptr)) { 1544 /* Re-insert buffer in front of queue */ 1545 buf->next = head; 1546 head = buf; 1547 tipc_node_unlock(n_ptr); 1548 continue; 1549 } 1550 goto unlock_discard; 1551 } 1552 1553 /* Link is now in state WORKING_WORKING */ 1554 if (unlikely(seq_no != mod(l_ptr->next_in_no))) { 1555 link_handle_out_of_seq_msg(l_ptr, buf); 1556 head = link_insert_deferred_queue(l_ptr, head); 1557 tipc_node_unlock(n_ptr); 1558 continue; 1559 } 1560 l_ptr->next_in_no++; 1561 if (unlikely(l_ptr->oldest_deferred_in)) 1562 head = link_insert_deferred_queue(l_ptr, head); 1563 deliver: 1564 if (likely(msg_isdata(msg))) { 1565 tipc_node_unlock(n_ptr); 1566 tipc_port_recv_msg(buf); 1567 continue; 1568 } 1569 switch (msg_user(msg)) { 1570 int ret; 1571 case MSG_BUNDLER: 1572 l_ptr->stats.recv_bundles++; 1573 l_ptr->stats.recv_bundled += msg_msgcnt(msg); 1574 tipc_node_unlock(n_ptr); 1575 tipc_link_recv_bundle(buf); 1576 continue; 1577 case NAME_DISTRIBUTOR: 1578 n_ptr->bclink.recv_permitted = true; 1579 tipc_node_unlock(n_ptr); 1580 tipc_named_recv(buf); 1581 continue; 1582 case BCAST_PROTOCOL: 1583 tipc_link_recv_sync(n_ptr, buf); 1584 tipc_node_unlock(n_ptr); 1585 continue; 1586 case CONN_MANAGER: 1587 tipc_node_unlock(n_ptr); 1588 tipc_port_recv_proto_msg(buf); 1589 continue; 1590 case MSG_FRAGMENTER: 1591 l_ptr->stats.recv_fragments++; 1592 ret = tipc_link_recv_fragment(&l_ptr->reasm_head, 1593 &l_ptr->reasm_tail, 1594 &buf); 1595 if (ret == LINK_REASM_COMPLETE) { 1596 l_ptr->stats.recv_fragmented++; 1597 msg = buf_msg(buf); 1598 goto deliver; 1599 } 1600 if (ret == LINK_REASM_ERROR) 1601 tipc_link_reset(l_ptr); 1602 tipc_node_unlock(n_ptr); 1603 continue; 1604 case CHANGEOVER_PROTOCOL: 1605 type = msg_type(msg); 1606 if (link_recv_changeover_msg(&l_ptr, &buf)) { 1607 msg = buf_msg(buf); 1608 seq_no = msg_seqno(msg); 1609 if (type == ORIGINAL_MSG) 1610 goto deliver; 1611 goto protocol_check; 1612 } 1613 break; 1614 default: 1615 kfree_skb(buf); 1616 buf = NULL; 1617 break; 1618 } 1619 tipc_node_unlock(n_ptr); 1620 tipc_net_route_msg(buf); 1621 continue; 1622 unlock_discard: 1623 1624 tipc_node_unlock(n_ptr); 1625 discard: 1626 kfree_skb(buf); 1627 } 1628 read_unlock_bh(&tipc_net_lock); 1629 } 1630 1631 /** 1632 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue 1633 * 1634 * Returns increase in queue length (i.e. 0 or 1) 1635 */ 1636 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, 1637 struct sk_buff *buf) 1638 { 1639 struct sk_buff *queue_buf; 1640 struct sk_buff **prev; 1641 u32 seq_no = buf_seqno(buf); 1642 1643 buf->next = NULL; 1644 1645 /* Empty queue ? */ 1646 if (*head == NULL) { 1647 *head = *tail = buf; 1648 return 1; 1649 } 1650 1651 /* Last ? */ 1652 if (less(buf_seqno(*tail), seq_no)) { 1653 (*tail)->next = buf; 1654 *tail = buf; 1655 return 1; 1656 } 1657 1658 /* Locate insertion point in queue, then insert; discard if duplicate */ 1659 prev = head; 1660 queue_buf = *head; 1661 for (;;) { 1662 u32 curr_seqno = buf_seqno(queue_buf); 1663 1664 if (seq_no == curr_seqno) { 1665 kfree_skb(buf); 1666 return 0; 1667 } 1668 1669 if (less(seq_no, curr_seqno)) 1670 break; 1671 1672 prev = &queue_buf->next; 1673 queue_buf = queue_buf->next; 1674 } 1675 1676 buf->next = queue_buf; 1677 *prev = buf; 1678 return 1; 1679 } 1680 1681 /* 1682 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet 1683 */ 1684 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 1685 struct sk_buff *buf) 1686 { 1687 u32 seq_no = buf_seqno(buf); 1688 1689 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) { 1690 link_recv_proto_msg(l_ptr, buf); 1691 return; 1692 } 1693 1694 /* Record OOS packet arrival (force mismatch on next timeout) */ 1695 l_ptr->checkpoint--; 1696 1697 /* 1698 * Discard packet if a duplicate; otherwise add it to deferred queue 1699 * and notify peer of gap as per protocol specification 1700 */ 1701 if (less(seq_no, mod(l_ptr->next_in_no))) { 1702 l_ptr->stats.duplicates++; 1703 kfree_skb(buf); 1704 return; 1705 } 1706 1707 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in, 1708 &l_ptr->newest_deferred_in, buf)) { 1709 l_ptr->deferred_inqueue_sz++; 1710 l_ptr->stats.deferred_recv++; 1711 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1712 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1713 } else 1714 l_ptr->stats.duplicates++; 1715 } 1716 1717 /* 1718 * Send protocol message to the other endpoint. 1719 */ 1720 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, 1721 int probe_msg, u32 gap, u32 tolerance, 1722 u32 priority, u32 ack_mtu) 1723 { 1724 struct sk_buff *buf = NULL; 1725 struct tipc_msg *msg = l_ptr->pmsg; 1726 u32 msg_size = sizeof(l_ptr->proto_msg); 1727 int r_flag; 1728 1729 /* Discard any previous message that was deferred due to congestion */ 1730 if (l_ptr->proto_msg_queue) { 1731 kfree_skb(l_ptr->proto_msg_queue); 1732 l_ptr->proto_msg_queue = NULL; 1733 } 1734 1735 /* Don't send protocol message during link changeover */ 1736 if (l_ptr->exp_msg_count) 1737 return; 1738 1739 /* Abort non-RESET send if communication with node is prohibited */ 1740 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1741 return; 1742 1743 /* Create protocol message with "out-of-sequence" sequence number */ 1744 msg_set_type(msg, msg_typ); 1745 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1746 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1747 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1748 1749 if (msg_typ == STATE_MSG) { 1750 u32 next_sent = mod(l_ptr->next_out_no); 1751 1752 if (!tipc_link_is_up(l_ptr)) 1753 return; 1754 if (l_ptr->next_out) 1755 next_sent = buf_seqno(l_ptr->next_out); 1756 msg_set_next_sent(msg, next_sent); 1757 if (l_ptr->oldest_deferred_in) { 1758 u32 rec = buf_seqno(l_ptr->oldest_deferred_in); 1759 gap = mod(rec - mod(l_ptr->next_in_no)); 1760 } 1761 msg_set_seq_gap(msg, gap); 1762 if (gap) 1763 l_ptr->stats.sent_nacks++; 1764 msg_set_link_tolerance(msg, tolerance); 1765 msg_set_linkprio(msg, priority); 1766 msg_set_max_pkt(msg, ack_mtu); 1767 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1768 msg_set_probe(msg, probe_msg != 0); 1769 if (probe_msg) { 1770 u32 mtu = l_ptr->max_pkt; 1771 1772 if ((mtu < l_ptr->max_pkt_target) && 1773 link_working_working(l_ptr) && 1774 l_ptr->fsm_msg_cnt) { 1775 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1776 if (l_ptr->max_pkt_probes == 10) { 1777 l_ptr->max_pkt_target = (msg_size - 4); 1778 l_ptr->max_pkt_probes = 0; 1779 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1780 } 1781 l_ptr->max_pkt_probes++; 1782 } 1783 1784 l_ptr->stats.sent_probes++; 1785 } 1786 l_ptr->stats.sent_states++; 1787 } else { /* RESET_MSG or ACTIVATE_MSG */ 1788 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); 1789 msg_set_seq_gap(msg, 0); 1790 msg_set_next_sent(msg, 1); 1791 msg_set_probe(msg, 0); 1792 msg_set_link_tolerance(msg, l_ptr->tolerance); 1793 msg_set_linkprio(msg, l_ptr->priority); 1794 msg_set_max_pkt(msg, l_ptr->max_pkt_target); 1795 } 1796 1797 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); 1798 msg_set_redundant_link(msg, r_flag); 1799 msg_set_linkprio(msg, l_ptr->priority); 1800 msg_set_size(msg, msg_size); 1801 1802 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2))); 1803 1804 buf = tipc_buf_acquire(msg_size); 1805 if (!buf) 1806 return; 1807 1808 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1809 buf->priority = TC_PRIO_CONTROL; 1810 1811 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1812 l_ptr->unacked_window = 0; 1813 kfree_skb(buf); 1814 } 1815 1816 /* 1817 * Receive protocol message : 1818 * Note that network plane id propagates through the network, and may 1819 * change at any time. The node with lowest address rules 1820 */ 1821 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) 1822 { 1823 u32 rec_gap = 0; 1824 u32 max_pkt_info; 1825 u32 max_pkt_ack; 1826 u32 msg_tol; 1827 struct tipc_msg *msg = buf_msg(buf); 1828 1829 /* Discard protocol message during link changeover */ 1830 if (l_ptr->exp_msg_count) 1831 goto exit; 1832 1833 /* record unnumbered packet arrival (force mismatch on next timeout) */ 1834 l_ptr->checkpoint--; 1835 1836 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg)) 1837 if (tipc_own_addr > msg_prevnode(msg)) 1838 l_ptr->b_ptr->net_plane = msg_net_plane(msg); 1839 1840 l_ptr->owner->permit_changeover = msg_redundant_link(msg); 1841 1842 switch (msg_type(msg)) { 1843 1844 case RESET_MSG: 1845 if (!link_working_unknown(l_ptr) && 1846 (l_ptr->peer_session != INVALID_SESSION)) { 1847 if (less_eq(msg_session(msg), l_ptr->peer_session)) 1848 break; /* duplicate or old reset: ignore */ 1849 } 1850 1851 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) || 1852 link_working_unknown(l_ptr))) { 1853 /* 1854 * peer has lost contact -- don't allow peer's links 1855 * to reactivate before we recognize loss & clean up 1856 */ 1857 l_ptr->owner->block_setup = WAIT_NODE_DOWN; 1858 } 1859 1860 link_state_event(l_ptr, RESET_MSG); 1861 1862 /* fall thru' */ 1863 case ACTIVATE_MSG: 1864 /* Update link settings according other endpoint's values */ 1865 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); 1866 1867 msg_tol = msg_link_tolerance(msg); 1868 if (msg_tol > l_ptr->tolerance) 1869 link_set_supervision_props(l_ptr, msg_tol); 1870 1871 if (msg_linkprio(msg) > l_ptr->priority) 1872 l_ptr->priority = msg_linkprio(msg); 1873 1874 max_pkt_info = msg_max_pkt(msg); 1875 if (max_pkt_info) { 1876 if (max_pkt_info < l_ptr->max_pkt_target) 1877 l_ptr->max_pkt_target = max_pkt_info; 1878 if (l_ptr->max_pkt > l_ptr->max_pkt_target) 1879 l_ptr->max_pkt = l_ptr->max_pkt_target; 1880 } else { 1881 l_ptr->max_pkt = l_ptr->max_pkt_target; 1882 } 1883 1884 /* Synchronize broadcast link info, if not done previously */ 1885 if (!tipc_node_is_up(l_ptr->owner)) { 1886 l_ptr->owner->bclink.last_sent = 1887 l_ptr->owner->bclink.last_in = 1888 msg_last_bcast(msg); 1889 l_ptr->owner->bclink.oos_state = 0; 1890 } 1891 1892 l_ptr->peer_session = msg_session(msg); 1893 l_ptr->peer_bearer_id = msg_bearer_id(msg); 1894 1895 if (msg_type(msg) == ACTIVATE_MSG) 1896 link_state_event(l_ptr, ACTIVATE_MSG); 1897 break; 1898 case STATE_MSG: 1899 1900 msg_tol = msg_link_tolerance(msg); 1901 if (msg_tol) 1902 link_set_supervision_props(l_ptr, msg_tol); 1903 1904 if (msg_linkprio(msg) && 1905 (msg_linkprio(msg) != l_ptr->priority)) { 1906 pr_warn("%s<%s>, priority change %u->%u\n", 1907 link_rst_msg, l_ptr->name, l_ptr->priority, 1908 msg_linkprio(msg)); 1909 l_ptr->priority = msg_linkprio(msg); 1910 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 1911 break; 1912 } 1913 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1914 l_ptr->stats.recv_states++; 1915 if (link_reset_unknown(l_ptr)) 1916 break; 1917 1918 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) { 1919 rec_gap = mod(msg_next_sent(msg) - 1920 mod(l_ptr->next_in_no)); 1921 } 1922 1923 max_pkt_ack = msg_max_pkt(msg); 1924 if (max_pkt_ack > l_ptr->max_pkt) { 1925 l_ptr->max_pkt = max_pkt_ack; 1926 l_ptr->max_pkt_probes = 0; 1927 } 1928 1929 max_pkt_ack = 0; 1930 if (msg_probe(msg)) { 1931 l_ptr->stats.recv_probes++; 1932 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) 1933 max_pkt_ack = msg_size(msg); 1934 } 1935 1936 /* Protocol message before retransmits, reduce loss risk */ 1937 if (l_ptr->owner->bclink.recv_permitted) 1938 tipc_bclink_update_link_state(l_ptr->owner, 1939 msg_last_bcast(msg)); 1940 1941 if (rec_gap || (msg_probe(msg))) { 1942 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1943 0, rec_gap, 0, 0, max_pkt_ack); 1944 } 1945 if (msg_seq_gap(msg)) { 1946 l_ptr->stats.recv_nacks++; 1947 tipc_link_retransmit(l_ptr, l_ptr->first_out, 1948 msg_seq_gap(msg)); 1949 } 1950 break; 1951 } 1952 exit: 1953 kfree_skb(buf); 1954 } 1955 1956 1957 /* 1958 * tipc_link_tunnel(): Send one message via a link belonging to 1959 * another bearer. Owner node is locked. 1960 */ 1961 static void tipc_link_tunnel(struct tipc_link *l_ptr, 1962 struct tipc_msg *tunnel_hdr, struct tipc_msg *msg, 1963 u32 selector) 1964 { 1965 struct tipc_link *tunnel; 1966 struct sk_buff *buf; 1967 u32 length = msg_size(msg); 1968 1969 tunnel = l_ptr->owner->active_links[selector & 1]; 1970 if (!tipc_link_is_up(tunnel)) { 1971 pr_warn("%stunnel link no longer available\n", link_co_err); 1972 return; 1973 } 1974 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 1975 buf = tipc_buf_acquire(length + INT_H_SIZE); 1976 if (!buf) { 1977 pr_warn("%sunable to send tunnel msg\n", link_co_err); 1978 return; 1979 } 1980 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); 1981 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); 1982 tipc_link_send_buf(tunnel, buf); 1983 } 1984 1985 1986 1987 /* 1988 * changeover(): Send whole message queue via the remaining link 1989 * Owner node is locked. 1990 */ 1991 void tipc_link_changeover(struct tipc_link *l_ptr) 1992 { 1993 u32 msgcount = l_ptr->out_queue_size; 1994 struct sk_buff *crs = l_ptr->first_out; 1995 struct tipc_link *tunnel = l_ptr->owner->active_links[0]; 1996 struct tipc_msg tunnel_hdr; 1997 int split_bundles; 1998 1999 if (!tunnel) 2000 return; 2001 2002 if (!l_ptr->owner->permit_changeover) { 2003 pr_warn("%speer did not permit changeover\n", link_co_err); 2004 return; 2005 } 2006 2007 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2008 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); 2009 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2010 msg_set_msgcnt(&tunnel_hdr, msgcount); 2011 2012 if (!l_ptr->first_out) { 2013 struct sk_buff *buf; 2014 2015 buf = tipc_buf_acquire(INT_H_SIZE); 2016 if (buf) { 2017 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE); 2018 msg_set_size(&tunnel_hdr, INT_H_SIZE); 2019 tipc_link_send_buf(tunnel, buf); 2020 } else { 2021 pr_warn("%sunable to send changeover msg\n", 2022 link_co_err); 2023 } 2024 return; 2025 } 2026 2027 split_bundles = (l_ptr->owner->active_links[0] != 2028 l_ptr->owner->active_links[1]); 2029 2030 while (crs) { 2031 struct tipc_msg *msg = buf_msg(crs); 2032 2033 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { 2034 struct tipc_msg *m = msg_get_wrapped(msg); 2035 unchar *pos = (unchar *)m; 2036 2037 msgcount = msg_msgcnt(msg); 2038 while (msgcount--) { 2039 msg_set_seqno(m, msg_seqno(msg)); 2040 tipc_link_tunnel(l_ptr, &tunnel_hdr, m, 2041 msg_link_selector(m)); 2042 pos += align(msg_size(m)); 2043 m = (struct tipc_msg *)pos; 2044 } 2045 } else { 2046 tipc_link_tunnel(l_ptr, &tunnel_hdr, msg, 2047 msg_link_selector(msg)); 2048 } 2049 crs = crs->next; 2050 } 2051 } 2052 2053 void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel) 2054 { 2055 struct sk_buff *iter; 2056 struct tipc_msg tunnel_hdr; 2057 2058 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2059 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); 2060 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); 2061 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2062 iter = l_ptr->first_out; 2063 while (iter) { 2064 struct sk_buff *outbuf; 2065 struct tipc_msg *msg = buf_msg(iter); 2066 u32 length = msg_size(msg); 2067 2068 if (msg_user(msg) == MSG_BUNDLER) 2069 msg_set_type(msg, CLOSED_MSG); 2070 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ 2071 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 2072 msg_set_size(&tunnel_hdr, length + INT_H_SIZE); 2073 outbuf = tipc_buf_acquire(length + INT_H_SIZE); 2074 if (outbuf == NULL) { 2075 pr_warn("%sunable to send duplicate msg\n", 2076 link_co_err); 2077 return; 2078 } 2079 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); 2080 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data, 2081 length); 2082 tipc_link_send_buf(tunnel, outbuf); 2083 if (!tipc_link_is_up(l_ptr)) 2084 return; 2085 iter = iter->next; 2086 } 2087 } 2088 2089 /** 2090 * buf_extract - extracts embedded TIPC message from another message 2091 * @skb: encapsulating message buffer 2092 * @from_pos: offset to extract from 2093 * 2094 * Returns a new message buffer containing an embedded message. The 2095 * encapsulating message itself is left unchanged. 2096 */ 2097 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) 2098 { 2099 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); 2100 u32 size = msg_size(msg); 2101 struct sk_buff *eb; 2102 2103 eb = tipc_buf_acquire(size); 2104 if (eb) 2105 skb_copy_to_linear_data(eb, msg, size); 2106 return eb; 2107 } 2108 2109 /* 2110 * link_recv_changeover_msg(): Receive tunneled packet sent 2111 * via other link. Node is locked. Return extracted buffer. 2112 */ 2113 static int link_recv_changeover_msg(struct tipc_link **l_ptr, 2114 struct sk_buff **buf) 2115 { 2116 struct sk_buff *tunnel_buf = *buf; 2117 struct tipc_link *dest_link; 2118 struct tipc_msg *msg; 2119 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf); 2120 u32 msg_typ = msg_type(tunnel_msg); 2121 u32 msg_count = msg_msgcnt(tunnel_msg); 2122 u32 bearer_id = msg_bearer_id(tunnel_msg); 2123 2124 if (bearer_id >= MAX_BEARERS) 2125 goto exit; 2126 dest_link = (*l_ptr)->owner->links[bearer_id]; 2127 if (!dest_link) 2128 goto exit; 2129 if (dest_link == *l_ptr) { 2130 pr_err("Unexpected changeover message on link <%s>\n", 2131 (*l_ptr)->name); 2132 goto exit; 2133 } 2134 *l_ptr = dest_link; 2135 msg = msg_get_wrapped(tunnel_msg); 2136 2137 if (msg_typ == DUPLICATE_MSG) { 2138 if (less(msg_seqno(msg), mod(dest_link->next_in_no))) 2139 goto exit; 2140 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2141 if (*buf == NULL) { 2142 pr_warn("%sduplicate msg dropped\n", link_co_err); 2143 goto exit; 2144 } 2145 kfree_skb(tunnel_buf); 2146 return 1; 2147 } 2148 2149 /* First original message ?: */ 2150 if (tipc_link_is_up(dest_link)) { 2151 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg, 2152 dest_link->name); 2153 tipc_link_reset(dest_link); 2154 dest_link->exp_msg_count = msg_count; 2155 if (!msg_count) 2156 goto exit; 2157 } else if (dest_link->exp_msg_count == START_CHANGEOVER) { 2158 dest_link->exp_msg_count = msg_count; 2159 if (!msg_count) 2160 goto exit; 2161 } 2162 2163 /* Receive original message */ 2164 if (dest_link->exp_msg_count == 0) { 2165 pr_warn("%sgot too many tunnelled messages\n", link_co_err); 2166 goto exit; 2167 } 2168 dest_link->exp_msg_count--; 2169 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) { 2170 goto exit; 2171 } else { 2172 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2173 if (*buf != NULL) { 2174 kfree_skb(tunnel_buf); 2175 return 1; 2176 } else { 2177 pr_warn("%soriginal msg dropped\n", link_co_err); 2178 } 2179 } 2180 exit: 2181 *buf = NULL; 2182 kfree_skb(tunnel_buf); 2183 return 0; 2184 } 2185 2186 /* 2187 * Bundler functionality: 2188 */ 2189 void tipc_link_recv_bundle(struct sk_buff *buf) 2190 { 2191 u32 msgcount = msg_msgcnt(buf_msg(buf)); 2192 u32 pos = INT_H_SIZE; 2193 struct sk_buff *obuf; 2194 2195 while (msgcount--) { 2196 obuf = buf_extract(buf, pos); 2197 if (obuf == NULL) { 2198 pr_warn("Link unable to unbundle message(s)\n"); 2199 break; 2200 } 2201 pos += align(msg_size(buf_msg(obuf))); 2202 tipc_net_route_msg(obuf); 2203 } 2204 kfree_skb(buf); 2205 } 2206 2207 /* 2208 * Fragmentation/defragmentation: 2209 */ 2210 2211 /* 2212 * link_send_long_buf: Entry for buffers needing fragmentation. 2213 * The buffer is complete, inclusive total message length. 2214 * Returns user data length. 2215 */ 2216 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 2217 { 2218 struct sk_buff *buf_chain = NULL; 2219 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain; 2220 struct tipc_msg *inmsg = buf_msg(buf); 2221 struct tipc_msg fragm_hdr; 2222 u32 insize = msg_size(inmsg); 2223 u32 dsz = msg_data_sz(inmsg); 2224 unchar *crs = buf->data; 2225 u32 rest = insize; 2226 u32 pack_sz = l_ptr->max_pkt; 2227 u32 fragm_sz = pack_sz - INT_H_SIZE; 2228 u32 fragm_no = 0; 2229 u32 destaddr; 2230 2231 if (msg_short(inmsg)) 2232 destaddr = l_ptr->addr; 2233 else 2234 destaddr = msg_destnode(inmsg); 2235 2236 /* Prepare reusable fragment header: */ 2237 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2238 INT_H_SIZE, destaddr); 2239 2240 /* Chop up message: */ 2241 while (rest > 0) { 2242 struct sk_buff *fragm; 2243 2244 if (rest <= fragm_sz) { 2245 fragm_sz = rest; 2246 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 2247 } 2248 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 2249 if (fragm == NULL) { 2250 kfree_skb(buf); 2251 kfree_skb_list(buf_chain); 2252 return -ENOMEM; 2253 } 2254 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 2255 fragm_no++; 2256 msg_set_fragm_no(&fragm_hdr, fragm_no); 2257 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE); 2258 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs, 2259 fragm_sz); 2260 buf_chain_tail->next = fragm; 2261 buf_chain_tail = fragm; 2262 2263 rest -= fragm_sz; 2264 crs += fragm_sz; 2265 msg_set_type(&fragm_hdr, FRAGMENT); 2266 } 2267 kfree_skb(buf); 2268 2269 /* Append chain of fragments to send queue & send them */ 2270 l_ptr->long_msg_seq_no++; 2271 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 2272 l_ptr->stats.sent_fragments += fragm_no; 2273 l_ptr->stats.sent_fragmented++; 2274 tipc_link_push_queue(l_ptr); 2275 2276 return dsz; 2277 } 2278 2279 /* 2280 * tipc_link_recv_fragment(): Called with node lock on. Returns 2281 * the reassembled buffer if message is complete. 2282 */ 2283 int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail, 2284 struct sk_buff **fbuf) 2285 { 2286 struct sk_buff *frag = *fbuf; 2287 struct tipc_msg *msg = buf_msg(frag); 2288 u32 fragid = msg_type(msg); 2289 bool headstolen; 2290 int delta; 2291 2292 skb_pull(frag, msg_hdr_sz(msg)); 2293 if (fragid == FIRST_FRAGMENT) { 2294 if (*head || skb_unclone(frag, GFP_ATOMIC)) 2295 goto out_free; 2296 *head = frag; 2297 skb_frag_list_init(*head); 2298 return 0; 2299 } else if (*head && 2300 skb_try_coalesce(*head, frag, &headstolen, &delta)) { 2301 kfree_skb_partial(frag, headstolen); 2302 } else { 2303 if (!*head) 2304 goto out_free; 2305 if (!skb_has_frag_list(*head)) 2306 skb_shinfo(*head)->frag_list = frag; 2307 else 2308 (*tail)->next = frag; 2309 *tail = frag; 2310 (*head)->truesize += frag->truesize; 2311 } 2312 if (fragid == LAST_FRAGMENT) { 2313 *fbuf = *head; 2314 *tail = *head = NULL; 2315 return LINK_REASM_COMPLETE; 2316 } 2317 return 0; 2318 out_free: 2319 pr_warn_ratelimited("Link unable to reassemble fragmented message\n"); 2320 kfree_skb(*fbuf); 2321 return LINK_REASM_ERROR; 2322 } 2323 2324 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2325 { 2326 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2327 return; 2328 2329 l_ptr->tolerance = tolerance; 2330 l_ptr->continuity_interval = 2331 ((tolerance / 4) > 500) ? 500 : tolerance / 4; 2332 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); 2333 } 2334 2335 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) 2336 { 2337 /* Data messages from this node, inclusive FIRST_FRAGM */ 2338 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; 2339 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; 2340 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; 2341 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; 2342 /* Transiting data messages,inclusive FIRST_FRAGM */ 2343 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; 2344 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; 2345 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; 2346 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; 2347 l_ptr->queue_limit[CONN_MANAGER] = 1200; 2348 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; 2349 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; 2350 /* FRAGMENT and LAST_FRAGMENT packets */ 2351 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; 2352 } 2353 2354 /** 2355 * link_find_link - locate link by name 2356 * @name: ptr to link name string 2357 * @node: ptr to area to be filled with ptr to associated node 2358 * 2359 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted; 2360 * this also prevents link deletion. 2361 * 2362 * Returns pointer to link (or 0 if invalid link name). 2363 */ 2364 static struct tipc_link *link_find_link(const char *name, 2365 struct tipc_node **node) 2366 { 2367 struct tipc_link *l_ptr; 2368 struct tipc_node *n_ptr; 2369 int i; 2370 2371 list_for_each_entry(n_ptr, &tipc_node_list, list) { 2372 for (i = 0; i < MAX_BEARERS; i++) { 2373 l_ptr = n_ptr->links[i]; 2374 if (l_ptr && !strcmp(l_ptr->name, name)) 2375 goto found; 2376 } 2377 } 2378 l_ptr = NULL; 2379 n_ptr = NULL; 2380 found: 2381 *node = n_ptr; 2382 return l_ptr; 2383 } 2384 2385 /** 2386 * link_value_is_valid -- validate proposed link tolerance/priority/window 2387 * 2388 * @cmd: value type (TIPC_CMD_SET_LINK_*) 2389 * @new_value: the new value 2390 * 2391 * Returns 1 if value is within range, 0 if not. 2392 */ 2393 static int link_value_is_valid(u16 cmd, u32 new_value) 2394 { 2395 switch (cmd) { 2396 case TIPC_CMD_SET_LINK_TOL: 2397 return (new_value >= TIPC_MIN_LINK_TOL) && 2398 (new_value <= TIPC_MAX_LINK_TOL); 2399 case TIPC_CMD_SET_LINK_PRI: 2400 return (new_value <= TIPC_MAX_LINK_PRI); 2401 case TIPC_CMD_SET_LINK_WINDOW: 2402 return (new_value >= TIPC_MIN_LINK_WIN) && 2403 (new_value <= TIPC_MAX_LINK_WIN); 2404 } 2405 return 0; 2406 } 2407 2408 /** 2409 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media 2410 * @name: ptr to link, bearer, or media name 2411 * @new_value: new value of link, bearer, or media setting 2412 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2413 * 2414 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2415 * 2416 * Returns 0 if value updated and negative value on error. 2417 */ 2418 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) 2419 { 2420 struct tipc_node *node; 2421 struct tipc_link *l_ptr; 2422 struct tipc_bearer *b_ptr; 2423 struct tipc_media *m_ptr; 2424 int res = 0; 2425 2426 l_ptr = link_find_link(name, &node); 2427 if (l_ptr) { 2428 /* 2429 * acquire node lock for tipc_link_send_proto_msg(). 2430 * see "TIPC locking policy" in net.c. 2431 */ 2432 tipc_node_lock(node); 2433 switch (cmd) { 2434 case TIPC_CMD_SET_LINK_TOL: 2435 link_set_supervision_props(l_ptr, new_value); 2436 tipc_link_send_proto_msg(l_ptr, 2437 STATE_MSG, 0, 0, new_value, 0, 0); 2438 break; 2439 case TIPC_CMD_SET_LINK_PRI: 2440 l_ptr->priority = new_value; 2441 tipc_link_send_proto_msg(l_ptr, 2442 STATE_MSG, 0, 0, 0, new_value, 0); 2443 break; 2444 case TIPC_CMD_SET_LINK_WINDOW: 2445 tipc_link_set_queue_limits(l_ptr, new_value); 2446 break; 2447 default: 2448 res = -EINVAL; 2449 break; 2450 } 2451 tipc_node_unlock(node); 2452 return res; 2453 } 2454 2455 b_ptr = tipc_bearer_find(name); 2456 if (b_ptr) { 2457 switch (cmd) { 2458 case TIPC_CMD_SET_LINK_TOL: 2459 b_ptr->tolerance = new_value; 2460 break; 2461 case TIPC_CMD_SET_LINK_PRI: 2462 b_ptr->priority = new_value; 2463 break; 2464 case TIPC_CMD_SET_LINK_WINDOW: 2465 b_ptr->window = new_value; 2466 break; 2467 default: 2468 res = -EINVAL; 2469 break; 2470 } 2471 return res; 2472 } 2473 2474 m_ptr = tipc_media_find(name); 2475 if (!m_ptr) 2476 return -ENODEV; 2477 switch (cmd) { 2478 case TIPC_CMD_SET_LINK_TOL: 2479 m_ptr->tolerance = new_value; 2480 break; 2481 case TIPC_CMD_SET_LINK_PRI: 2482 m_ptr->priority = new_value; 2483 break; 2484 case TIPC_CMD_SET_LINK_WINDOW: 2485 m_ptr->window = new_value; 2486 break; 2487 default: 2488 res = -EINVAL; 2489 break; 2490 } 2491 return res; 2492 } 2493 2494 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, 2495 u16 cmd) 2496 { 2497 struct tipc_link_config *args; 2498 u32 new_value; 2499 int res; 2500 2501 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) 2502 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2503 2504 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area); 2505 new_value = ntohl(args->value); 2506 2507 if (!link_value_is_valid(cmd, new_value)) 2508 return tipc_cfg_reply_error_string( 2509 "cannot change, value invalid"); 2510 2511 if (!strcmp(args->name, tipc_bclink_name)) { 2512 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && 2513 (tipc_bclink_set_queue_limits(new_value) == 0)) 2514 return tipc_cfg_reply_none(); 2515 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 2516 " (cannot change setting on broadcast link)"); 2517 } 2518 2519 read_lock_bh(&tipc_net_lock); 2520 res = link_cmd_set_value(args->name, new_value, cmd); 2521 read_unlock_bh(&tipc_net_lock); 2522 if (res) 2523 return tipc_cfg_reply_error_string("cannot change link setting"); 2524 2525 return tipc_cfg_reply_none(); 2526 } 2527 2528 /** 2529 * link_reset_statistics - reset link statistics 2530 * @l_ptr: pointer to link 2531 */ 2532 static void link_reset_statistics(struct tipc_link *l_ptr) 2533 { 2534 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 2535 l_ptr->stats.sent_info = l_ptr->next_out_no; 2536 l_ptr->stats.recv_info = l_ptr->next_in_no; 2537 } 2538 2539 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) 2540 { 2541 char *link_name; 2542 struct tipc_link *l_ptr; 2543 struct tipc_node *node; 2544 2545 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2546 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2547 2548 link_name = (char *)TLV_DATA(req_tlv_area); 2549 if (!strcmp(link_name, tipc_bclink_name)) { 2550 if (tipc_bclink_reset_stats()) 2551 return tipc_cfg_reply_error_string("link not found"); 2552 return tipc_cfg_reply_none(); 2553 } 2554 2555 read_lock_bh(&tipc_net_lock); 2556 l_ptr = link_find_link(link_name, &node); 2557 if (!l_ptr) { 2558 read_unlock_bh(&tipc_net_lock); 2559 return tipc_cfg_reply_error_string("link not found"); 2560 } 2561 2562 tipc_node_lock(node); 2563 link_reset_statistics(l_ptr); 2564 tipc_node_unlock(node); 2565 read_unlock_bh(&tipc_net_lock); 2566 return tipc_cfg_reply_none(); 2567 } 2568 2569 /** 2570 * percent - convert count to a percentage of total (rounding up or down) 2571 */ 2572 static u32 percent(u32 count, u32 total) 2573 { 2574 return (count * 100 + (total / 2)) / total; 2575 } 2576 2577 /** 2578 * tipc_link_stats - print link statistics 2579 * @name: link name 2580 * @buf: print buffer area 2581 * @buf_size: size of print buffer area 2582 * 2583 * Returns length of print buffer data string (or 0 if error) 2584 */ 2585 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) 2586 { 2587 struct tipc_link *l; 2588 struct tipc_stats *s; 2589 struct tipc_node *node; 2590 char *status; 2591 u32 profile_total = 0; 2592 int ret; 2593 2594 if (!strcmp(name, tipc_bclink_name)) 2595 return tipc_bclink_stats(buf, buf_size); 2596 2597 read_lock_bh(&tipc_net_lock); 2598 l = link_find_link(name, &node); 2599 if (!l) { 2600 read_unlock_bh(&tipc_net_lock); 2601 return 0; 2602 } 2603 tipc_node_lock(node); 2604 s = &l->stats; 2605 2606 if (tipc_link_is_active(l)) 2607 status = "ACTIVE"; 2608 else if (tipc_link_is_up(l)) 2609 status = "STANDBY"; 2610 else 2611 status = "DEFUNCT"; 2612 2613 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n" 2614 " %s MTU:%u Priority:%u Tolerance:%u ms" 2615 " Window:%u packets\n", 2616 l->name, status, l->max_pkt, l->priority, 2617 l->tolerance, l->queue_limit[0]); 2618 2619 ret += tipc_snprintf(buf + ret, buf_size - ret, 2620 " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 2621 l->next_in_no - s->recv_info, s->recv_fragments, 2622 s->recv_fragmented, s->recv_bundles, 2623 s->recv_bundled); 2624 2625 ret += tipc_snprintf(buf + ret, buf_size - ret, 2626 " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 2627 l->next_out_no - s->sent_info, s->sent_fragments, 2628 s->sent_fragmented, s->sent_bundles, 2629 s->sent_bundled); 2630 2631 profile_total = s->msg_length_counts; 2632 if (!profile_total) 2633 profile_total = 1; 2634 2635 ret += tipc_snprintf(buf + ret, buf_size - ret, 2636 " TX profile sample:%u packets average:%u octets\n" 2637 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% " 2638 "-16384:%u%% -32768:%u%% -66000:%u%%\n", 2639 s->msg_length_counts, 2640 s->msg_lengths_total / profile_total, 2641 percent(s->msg_length_profile[0], profile_total), 2642 percent(s->msg_length_profile[1], profile_total), 2643 percent(s->msg_length_profile[2], profile_total), 2644 percent(s->msg_length_profile[3], profile_total), 2645 percent(s->msg_length_profile[4], profile_total), 2646 percent(s->msg_length_profile[5], profile_total), 2647 percent(s->msg_length_profile[6], profile_total)); 2648 2649 ret += tipc_snprintf(buf + ret, buf_size - ret, 2650 " RX states:%u probes:%u naks:%u defs:%u" 2651 " dups:%u\n", s->recv_states, s->recv_probes, 2652 s->recv_nacks, s->deferred_recv, s->duplicates); 2653 2654 ret += tipc_snprintf(buf + ret, buf_size - ret, 2655 " TX states:%u probes:%u naks:%u acks:%u" 2656 " dups:%u\n", s->sent_states, s->sent_probes, 2657 s->sent_nacks, s->sent_acks, s->retransmitted); 2658 2659 ret += tipc_snprintf(buf + ret, buf_size - ret, 2660 " Congestion link:%u Send queue" 2661 " max:%u avg:%u\n", s->link_congs, 2662 s->max_queue_sz, s->queue_sz_counts ? 2663 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2664 2665 tipc_node_unlock(node); 2666 read_unlock_bh(&tipc_net_lock); 2667 return ret; 2668 } 2669 2670 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) 2671 { 2672 struct sk_buff *buf; 2673 struct tlv_desc *rep_tlv; 2674 int str_len; 2675 int pb_len; 2676 char *pb; 2677 2678 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2679 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2680 2681 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); 2682 if (!buf) 2683 return NULL; 2684 2685 rep_tlv = (struct tlv_desc *)buf->data; 2686 pb = TLV_DATA(rep_tlv); 2687 pb_len = ULTRA_STRING_MAX_LEN; 2688 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), 2689 pb, pb_len); 2690 if (!str_len) { 2691 kfree_skb(buf); 2692 return tipc_cfg_reply_error_string("link not found"); 2693 } 2694 str_len += 1; /* for "\0" */ 2695 skb_put(buf, TLV_SPACE(str_len)); 2696 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 2697 2698 return buf; 2699 } 2700 2701 /** 2702 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination 2703 * @dest: network address of destination node 2704 * @selector: used to select from set of active links 2705 * 2706 * If no active link can be found, uses default maximum packet size. 2707 */ 2708 u32 tipc_link_get_max_pkt(u32 dest, u32 selector) 2709 { 2710 struct tipc_node *n_ptr; 2711 struct tipc_link *l_ptr; 2712 u32 res = MAX_PKT_DEFAULT; 2713 2714 if (dest == tipc_own_addr) 2715 return MAX_MSG_SIZE; 2716 2717 read_lock_bh(&tipc_net_lock); 2718 n_ptr = tipc_node_find(dest); 2719 if (n_ptr) { 2720 tipc_node_lock(n_ptr); 2721 l_ptr = n_ptr->active_links[selector & 1]; 2722 if (l_ptr) 2723 res = l_ptr->max_pkt; 2724 tipc_node_unlock(n_ptr); 2725 } 2726 read_unlock_bh(&tipc_net_lock); 2727 return res; 2728 } 2729 2730 static void link_print(struct tipc_link *l_ptr, const char *str) 2731 { 2732 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name); 2733 2734 if (link_working_unknown(l_ptr)) 2735 pr_cont(":WU\n"); 2736 else if (link_reset_reset(l_ptr)) 2737 pr_cont(":RR\n"); 2738 else if (link_reset_unknown(l_ptr)) 2739 pr_cont(":RU\n"); 2740 else if (link_working_working(l_ptr)) 2741 pr_cont(":WW\n"); 2742 else 2743 pr_cont("\n"); 2744 } 2745