1 /* 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2007, Ericsson AB 5 * Copyright (c) 2004-2007, 2010-2011, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "link.h" 39 #include "port.h" 40 #include "name_distr.h" 41 #include "discover.h" 42 #include "config.h" 43 44 /* 45 * Error message prefixes 46 */ 47 static const char *link_co_err = "Link changeover error, "; 48 static const char *link_rst_msg = "Resetting link "; 49 static const char *link_unk_evt = "Unknown link event "; 50 51 /* 52 * Out-of-range value for link session numbers 53 */ 54 #define INVALID_SESSION 0x10000 55 56 /* 57 * Link state events: 58 */ 59 #define STARTING_EVT 856384768 /* link processing trigger */ 60 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ 61 #define TIMEOUT_EVT 560817u /* link timer expired */ 62 63 /* 64 * The following two 'message types' is really just implementation 65 * data conveniently stored in the message header. 66 * They must not be considered part of the protocol 67 */ 68 #define OPEN_MSG 0 69 #define CLOSED_MSG 1 70 71 /* 72 * State value stored in 'exp_msg_count' 73 */ 74 #define START_CHANGEOVER 100000u 75 76 /** 77 * struct tipc_link_name - deconstructed link name 78 * @addr_local: network address of node at this end 79 * @if_local: name of interface at this end 80 * @addr_peer: network address of node at far end 81 * @if_peer: name of interface at far end 82 */ 83 struct tipc_link_name { 84 u32 addr_local; 85 char if_local[TIPC_MAX_IF_NAME]; 86 u32 addr_peer; 87 char if_peer[TIPC_MAX_IF_NAME]; 88 }; 89 90 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 91 struct sk_buff *buf); 92 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf); 93 static int link_recv_changeover_msg(struct tipc_link **l_ptr, 94 struct sk_buff **buf); 95 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance); 96 static int link_send_sections_long(struct tipc_port *sender, 97 struct iovec const *msg_sect, 98 u32 num_sect, unsigned int total_len, 99 u32 destnode); 100 static void link_check_defragm_bufs(struct tipc_link *l_ptr); 101 static void link_state_event(struct tipc_link *l_ptr, u32 event); 102 static void link_reset_statistics(struct tipc_link *l_ptr); 103 static void link_print(struct tipc_link *l_ptr, const char *str); 104 static void link_start(struct tipc_link *l_ptr); 105 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf); 106 107 /* 108 * Simple link routines 109 */ 110 static unsigned int align(unsigned int i) 111 { 112 return (i + 3) & ~3u; 113 } 114 115 static void link_init_max_pkt(struct tipc_link *l_ptr) 116 { 117 u32 max_pkt; 118 119 max_pkt = (l_ptr->b_ptr->mtu & ~3); 120 if (max_pkt > MAX_MSG_SIZE) 121 max_pkt = MAX_MSG_SIZE; 122 123 l_ptr->max_pkt_target = max_pkt; 124 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) 125 l_ptr->max_pkt = l_ptr->max_pkt_target; 126 else 127 l_ptr->max_pkt = MAX_PKT_DEFAULT; 128 129 l_ptr->max_pkt_probes = 0; 130 } 131 132 static u32 link_next_sent(struct tipc_link *l_ptr) 133 { 134 if (l_ptr->next_out) 135 return buf_seqno(l_ptr->next_out); 136 return mod(l_ptr->next_out_no); 137 } 138 139 static u32 link_last_sent(struct tipc_link *l_ptr) 140 { 141 return mod(link_next_sent(l_ptr) - 1); 142 } 143 144 /* 145 * Simple non-static link routines (i.e. referenced outside this file) 146 */ 147 int tipc_link_is_up(struct tipc_link *l_ptr) 148 { 149 if (!l_ptr) 150 return 0; 151 return link_working_working(l_ptr) || link_working_unknown(l_ptr); 152 } 153 154 int tipc_link_is_active(struct tipc_link *l_ptr) 155 { 156 return (l_ptr->owner->active_links[0] == l_ptr) || 157 (l_ptr->owner->active_links[1] == l_ptr); 158 } 159 160 /** 161 * link_name_validate - validate & (optionally) deconstruct tipc_link name 162 * @name: ptr to link name string 163 * @name_parts: ptr to area for link name components (or NULL if not needed) 164 * 165 * Returns 1 if link name is valid, otherwise 0. 166 */ 167 static int link_name_validate(const char *name, 168 struct tipc_link_name *name_parts) 169 { 170 char name_copy[TIPC_MAX_LINK_NAME]; 171 char *addr_local; 172 char *if_local; 173 char *addr_peer; 174 char *if_peer; 175 char dummy; 176 u32 z_local, c_local, n_local; 177 u32 z_peer, c_peer, n_peer; 178 u32 if_local_len; 179 u32 if_peer_len; 180 181 /* copy link name & ensure length is OK */ 182 name_copy[TIPC_MAX_LINK_NAME - 1] = 0; 183 /* need above in case non-Posix strncpy() doesn't pad with nulls */ 184 strncpy(name_copy, name, TIPC_MAX_LINK_NAME); 185 if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0) 186 return 0; 187 188 /* ensure all component parts of link name are present */ 189 addr_local = name_copy; 190 if_local = strchr(addr_local, ':'); 191 if (if_local == NULL) 192 return 0; 193 *(if_local++) = 0; 194 addr_peer = strchr(if_local, '-'); 195 if (addr_peer == NULL) 196 return 0; 197 *(addr_peer++) = 0; 198 if_local_len = addr_peer - if_local; 199 if_peer = strchr(addr_peer, ':'); 200 if (if_peer == NULL) 201 return 0; 202 *(if_peer++) = 0; 203 if_peer_len = strlen(if_peer) + 1; 204 205 /* validate component parts of link name */ 206 if ((sscanf(addr_local, "%u.%u.%u%c", 207 &z_local, &c_local, &n_local, &dummy) != 3) || 208 (sscanf(addr_peer, "%u.%u.%u%c", 209 &z_peer, &c_peer, &n_peer, &dummy) != 3) || 210 (z_local > 255) || (c_local > 4095) || (n_local > 4095) || 211 (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) || 212 (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) || 213 (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) || 214 (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) || 215 (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1))) 216 return 0; 217 218 /* return link name components, if necessary */ 219 if (name_parts) { 220 name_parts->addr_local = tipc_addr(z_local, c_local, n_local); 221 strcpy(name_parts->if_local, if_local); 222 name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer); 223 strcpy(name_parts->if_peer, if_peer); 224 } 225 return 1; 226 } 227 228 /** 229 * link_timeout - handle expiration of link timer 230 * @l_ptr: pointer to link 231 * 232 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict 233 * with tipc_link_delete(). (There is no risk that the node will be deleted by 234 * another thread because tipc_link_delete() always cancels the link timer before 235 * tipc_node_delete() is called.) 236 */ 237 static void link_timeout(struct tipc_link *l_ptr) 238 { 239 tipc_node_lock(l_ptr->owner); 240 241 /* update counters used in statistical profiling of send traffic */ 242 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; 243 l_ptr->stats.queue_sz_counts++; 244 245 if (l_ptr->first_out) { 246 struct tipc_msg *msg = buf_msg(l_ptr->first_out); 247 u32 length = msg_size(msg); 248 249 if ((msg_user(msg) == MSG_FRAGMENTER) && 250 (msg_type(msg) == FIRST_FRAGMENT)) { 251 length = msg_size(msg_get_wrapped(msg)); 252 } 253 if (length) { 254 l_ptr->stats.msg_lengths_total += length; 255 l_ptr->stats.msg_length_counts++; 256 if (length <= 64) 257 l_ptr->stats.msg_length_profile[0]++; 258 else if (length <= 256) 259 l_ptr->stats.msg_length_profile[1]++; 260 else if (length <= 1024) 261 l_ptr->stats.msg_length_profile[2]++; 262 else if (length <= 4096) 263 l_ptr->stats.msg_length_profile[3]++; 264 else if (length <= 16384) 265 l_ptr->stats.msg_length_profile[4]++; 266 else if (length <= 32768) 267 l_ptr->stats.msg_length_profile[5]++; 268 else 269 l_ptr->stats.msg_length_profile[6]++; 270 } 271 } 272 273 /* do all other link processing performed on a periodic basis */ 274 link_check_defragm_bufs(l_ptr); 275 276 link_state_event(l_ptr, TIMEOUT_EVT); 277 278 if (l_ptr->next_out) 279 tipc_link_push_queue(l_ptr); 280 281 tipc_node_unlock(l_ptr->owner); 282 } 283 284 static void link_set_timer(struct tipc_link *l_ptr, u32 time) 285 { 286 k_start_timer(&l_ptr->timer, time); 287 } 288 289 /** 290 * tipc_link_create - create a new link 291 * @n_ptr: pointer to associated node 292 * @b_ptr: pointer to associated bearer 293 * @media_addr: media address to use when sending messages over link 294 * 295 * Returns pointer to link. 296 */ 297 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, 298 struct tipc_bearer *b_ptr, 299 const struct tipc_media_addr *media_addr) 300 { 301 struct tipc_link *l_ptr; 302 struct tipc_msg *msg; 303 char *if_name; 304 char addr_string[16]; 305 u32 peer = n_ptr->addr; 306 307 if (n_ptr->link_cnt >= 2) { 308 tipc_addr_string_fill(addr_string, n_ptr->addr); 309 pr_err("Attempt to establish third link to %s\n", addr_string); 310 return NULL; 311 } 312 313 if (n_ptr->links[b_ptr->identity]) { 314 tipc_addr_string_fill(addr_string, n_ptr->addr); 315 pr_err("Attempt to establish second link on <%s> to %s\n", 316 b_ptr->name, addr_string); 317 return NULL; 318 } 319 320 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC); 321 if (!l_ptr) { 322 pr_warn("Link creation failed, no memory\n"); 323 return NULL; 324 } 325 326 l_ptr->addr = peer; 327 if_name = strchr(b_ptr->name, ':') + 1; 328 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 329 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 330 tipc_node(tipc_own_addr), 331 if_name, 332 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 333 /* note: peer i/f name is updated by reset/activate message */ 334 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr)); 335 l_ptr->owner = n_ptr; 336 l_ptr->checkpoint = 1; 337 l_ptr->peer_session = INVALID_SESSION; 338 l_ptr->b_ptr = b_ptr; 339 link_set_supervision_props(l_ptr, b_ptr->tolerance); 340 l_ptr->state = RESET_UNKNOWN; 341 342 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; 343 msg = l_ptr->pmsg; 344 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); 345 msg_set_size(msg, sizeof(l_ptr->proto_msg)); 346 msg_set_session(msg, (tipc_random & 0xffff)); 347 msg_set_bearer_id(msg, b_ptr->identity); 348 strcpy((char *)msg_data(msg), if_name); 349 350 l_ptr->priority = b_ptr->priority; 351 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 352 353 link_init_max_pkt(l_ptr); 354 355 l_ptr->next_out_no = 1; 356 INIT_LIST_HEAD(&l_ptr->waiting_ports); 357 358 link_reset_statistics(l_ptr); 359 360 tipc_node_attach_link(n_ptr, l_ptr); 361 362 k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr); 363 list_add_tail(&l_ptr->link_list, &b_ptr->links); 364 tipc_k_signal((Handler)link_start, (unsigned long)l_ptr); 365 366 return l_ptr; 367 } 368 369 /** 370 * tipc_link_delete - delete a link 371 * @l_ptr: pointer to link 372 * 373 * Note: 'tipc_net_lock' is write_locked, bearer is locked. 374 * This routine must not grab the node lock until after link timer cancellation 375 * to avoid a potential deadlock situation. 376 */ 377 void tipc_link_delete(struct tipc_link *l_ptr) 378 { 379 if (!l_ptr) { 380 pr_err("Attempt to delete non-existent link\n"); 381 return; 382 } 383 384 k_cancel_timer(&l_ptr->timer); 385 386 tipc_node_lock(l_ptr->owner); 387 tipc_link_reset(l_ptr); 388 tipc_node_detach_link(l_ptr->owner, l_ptr); 389 tipc_link_stop(l_ptr); 390 list_del_init(&l_ptr->link_list); 391 tipc_node_unlock(l_ptr->owner); 392 k_term_timer(&l_ptr->timer); 393 kfree(l_ptr); 394 } 395 396 static void link_start(struct tipc_link *l_ptr) 397 { 398 tipc_node_lock(l_ptr->owner); 399 link_state_event(l_ptr, STARTING_EVT); 400 tipc_node_unlock(l_ptr->owner); 401 } 402 403 /** 404 * link_schedule_port - schedule port for deferred sending 405 * @l_ptr: pointer to link 406 * @origport: reference to sending port 407 * @sz: amount of data to be sent 408 * 409 * Schedules port for renewed sending of messages after link congestion 410 * has abated. 411 */ 412 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) 413 { 414 struct tipc_port *p_ptr; 415 416 spin_lock_bh(&tipc_port_list_lock); 417 p_ptr = tipc_port_lock(origport); 418 if (p_ptr) { 419 if (!p_ptr->wakeup) 420 goto exit; 421 if (!list_empty(&p_ptr->wait_list)) 422 goto exit; 423 p_ptr->congested = 1; 424 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); 425 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); 426 l_ptr->stats.link_congs++; 427 exit: 428 tipc_port_unlock(p_ptr); 429 } 430 spin_unlock_bh(&tipc_port_list_lock); 431 return -ELINKCONG; 432 } 433 434 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all) 435 { 436 struct tipc_port *p_ptr; 437 struct tipc_port *temp_p_ptr; 438 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size; 439 440 if (all) 441 win = 100000; 442 if (win <= 0) 443 return; 444 if (!spin_trylock_bh(&tipc_port_list_lock)) 445 return; 446 if (link_congested(l_ptr)) 447 goto exit; 448 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 449 wait_list) { 450 if (win <= 0) 451 break; 452 list_del_init(&p_ptr->wait_list); 453 spin_lock_bh(p_ptr->lock); 454 p_ptr->congested = 0; 455 p_ptr->wakeup(p_ptr); 456 win -= p_ptr->waiting_pkts; 457 spin_unlock_bh(p_ptr->lock); 458 } 459 460 exit: 461 spin_unlock_bh(&tipc_port_list_lock); 462 } 463 464 /** 465 * link_release_outqueue - purge link's outbound message queue 466 * @l_ptr: pointer to link 467 */ 468 static void link_release_outqueue(struct tipc_link *l_ptr) 469 { 470 struct sk_buff *buf = l_ptr->first_out; 471 struct sk_buff *next; 472 473 while (buf) { 474 next = buf->next; 475 kfree_skb(buf); 476 buf = next; 477 } 478 l_ptr->first_out = NULL; 479 l_ptr->out_queue_size = 0; 480 } 481 482 /** 483 * tipc_link_reset_fragments - purge link's inbound message fragments queue 484 * @l_ptr: pointer to link 485 */ 486 void tipc_link_reset_fragments(struct tipc_link *l_ptr) 487 { 488 struct sk_buff *buf = l_ptr->defragm_buf; 489 struct sk_buff *next; 490 491 while (buf) { 492 next = buf->next; 493 kfree_skb(buf); 494 buf = next; 495 } 496 l_ptr->defragm_buf = NULL; 497 } 498 499 /** 500 * tipc_link_stop - purge all inbound and outbound messages associated with link 501 * @l_ptr: pointer to link 502 */ 503 void tipc_link_stop(struct tipc_link *l_ptr) 504 { 505 struct sk_buff *buf; 506 struct sk_buff *next; 507 508 buf = l_ptr->oldest_deferred_in; 509 while (buf) { 510 next = buf->next; 511 kfree_skb(buf); 512 buf = next; 513 } 514 515 buf = l_ptr->first_out; 516 while (buf) { 517 next = buf->next; 518 kfree_skb(buf); 519 buf = next; 520 } 521 522 tipc_link_reset_fragments(l_ptr); 523 524 kfree_skb(l_ptr->proto_msg_queue); 525 l_ptr->proto_msg_queue = NULL; 526 } 527 528 void tipc_link_reset(struct tipc_link *l_ptr) 529 { 530 struct sk_buff *buf; 531 u32 prev_state = l_ptr->state; 532 u32 checkpoint = l_ptr->next_in_no; 533 int was_active_link = tipc_link_is_active(l_ptr); 534 535 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); 536 537 /* Link is down, accept any session */ 538 l_ptr->peer_session = INVALID_SESSION; 539 540 /* Prepare for max packet size negotiation */ 541 link_init_max_pkt(l_ptr); 542 543 l_ptr->state = RESET_UNKNOWN; 544 545 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET)) 546 return; 547 548 tipc_node_link_down(l_ptr->owner, l_ptr); 549 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); 550 551 if (was_active_link && tipc_node_active_links(l_ptr->owner) && 552 l_ptr->owner->permit_changeover) { 553 l_ptr->reset_checkpoint = checkpoint; 554 l_ptr->exp_msg_count = START_CHANGEOVER; 555 } 556 557 /* Clean up all queues: */ 558 link_release_outqueue(l_ptr); 559 kfree_skb(l_ptr->proto_msg_queue); 560 l_ptr->proto_msg_queue = NULL; 561 buf = l_ptr->oldest_deferred_in; 562 while (buf) { 563 struct sk_buff *next = buf->next; 564 kfree_skb(buf); 565 buf = next; 566 } 567 if (!list_empty(&l_ptr->waiting_ports)) 568 tipc_link_wakeup_ports(l_ptr, 1); 569 570 l_ptr->retransm_queue_head = 0; 571 l_ptr->retransm_queue_size = 0; 572 l_ptr->last_out = NULL; 573 l_ptr->first_out = NULL; 574 l_ptr->next_out = NULL; 575 l_ptr->unacked_window = 0; 576 l_ptr->checkpoint = 1; 577 l_ptr->next_out_no = 1; 578 l_ptr->deferred_inqueue_sz = 0; 579 l_ptr->oldest_deferred_in = NULL; 580 l_ptr->newest_deferred_in = NULL; 581 l_ptr->fsm_msg_cnt = 0; 582 l_ptr->stale_count = 0; 583 link_reset_statistics(l_ptr); 584 } 585 586 587 static void link_activate(struct tipc_link *l_ptr) 588 { 589 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 590 tipc_node_link_up(l_ptr->owner, l_ptr); 591 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 592 } 593 594 /** 595 * link_state_event - link finite state machine 596 * @l_ptr: pointer to link 597 * @event: state machine event to process 598 */ 599 static void link_state_event(struct tipc_link *l_ptr, unsigned int event) 600 { 601 struct tipc_link *other; 602 u32 cont_intv = l_ptr->continuity_interval; 603 604 if (!l_ptr->started && (event != STARTING_EVT)) 605 return; /* Not yet. */ 606 607 if (link_blocked(l_ptr)) { 608 if (event == TIMEOUT_EVT) 609 link_set_timer(l_ptr, cont_intv); 610 return; /* Changeover going on */ 611 } 612 613 switch (l_ptr->state) { 614 case WORKING_WORKING: 615 switch (event) { 616 case TRAFFIC_MSG_EVT: 617 case ACTIVATE_MSG: 618 break; 619 case TIMEOUT_EVT: 620 if (l_ptr->next_in_no != l_ptr->checkpoint) { 621 l_ptr->checkpoint = l_ptr->next_in_no; 622 if (tipc_bclink_acks_missing(l_ptr->owner)) { 623 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 624 0, 0, 0, 0, 0); 625 l_ptr->fsm_msg_cnt++; 626 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { 627 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 628 1, 0, 0, 0, 0); 629 l_ptr->fsm_msg_cnt++; 630 } 631 link_set_timer(l_ptr, cont_intv); 632 break; 633 } 634 l_ptr->state = WORKING_UNKNOWN; 635 l_ptr->fsm_msg_cnt = 0; 636 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 637 l_ptr->fsm_msg_cnt++; 638 link_set_timer(l_ptr, cont_intv / 4); 639 break; 640 case RESET_MSG: 641 pr_info("%s<%s>, requested by peer\n", link_rst_msg, 642 l_ptr->name); 643 tipc_link_reset(l_ptr); 644 l_ptr->state = RESET_RESET; 645 l_ptr->fsm_msg_cnt = 0; 646 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 647 l_ptr->fsm_msg_cnt++; 648 link_set_timer(l_ptr, cont_intv); 649 break; 650 default: 651 pr_err("%s%u in WW state\n", link_unk_evt, event); 652 } 653 break; 654 case WORKING_UNKNOWN: 655 switch (event) { 656 case TRAFFIC_MSG_EVT: 657 case ACTIVATE_MSG: 658 l_ptr->state = WORKING_WORKING; 659 l_ptr->fsm_msg_cnt = 0; 660 link_set_timer(l_ptr, cont_intv); 661 break; 662 case RESET_MSG: 663 pr_info("%s<%s>, requested by peer while probing\n", 664 link_rst_msg, l_ptr->name); 665 tipc_link_reset(l_ptr); 666 l_ptr->state = RESET_RESET; 667 l_ptr->fsm_msg_cnt = 0; 668 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 669 l_ptr->fsm_msg_cnt++; 670 link_set_timer(l_ptr, cont_intv); 671 break; 672 case TIMEOUT_EVT: 673 if (l_ptr->next_in_no != l_ptr->checkpoint) { 674 l_ptr->state = WORKING_WORKING; 675 l_ptr->fsm_msg_cnt = 0; 676 l_ptr->checkpoint = l_ptr->next_in_no; 677 if (tipc_bclink_acks_missing(l_ptr->owner)) { 678 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 679 0, 0, 0, 0, 0); 680 l_ptr->fsm_msg_cnt++; 681 } 682 link_set_timer(l_ptr, cont_intv); 683 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { 684 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 685 1, 0, 0, 0, 0); 686 l_ptr->fsm_msg_cnt++; 687 link_set_timer(l_ptr, cont_intv / 4); 688 } else { /* Link has failed */ 689 pr_warn("%s<%s>, peer not responding\n", 690 link_rst_msg, l_ptr->name); 691 tipc_link_reset(l_ptr); 692 l_ptr->state = RESET_UNKNOWN; 693 l_ptr->fsm_msg_cnt = 0; 694 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 695 0, 0, 0, 0, 0); 696 l_ptr->fsm_msg_cnt++; 697 link_set_timer(l_ptr, cont_intv); 698 } 699 break; 700 default: 701 pr_err("%s%u in WU state\n", link_unk_evt, event); 702 } 703 break; 704 case RESET_UNKNOWN: 705 switch (event) { 706 case TRAFFIC_MSG_EVT: 707 break; 708 case ACTIVATE_MSG: 709 other = l_ptr->owner->active_links[0]; 710 if (other && link_working_unknown(other)) 711 break; 712 l_ptr->state = WORKING_WORKING; 713 l_ptr->fsm_msg_cnt = 0; 714 link_activate(l_ptr); 715 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 716 l_ptr->fsm_msg_cnt++; 717 link_set_timer(l_ptr, cont_intv); 718 break; 719 case RESET_MSG: 720 l_ptr->state = RESET_RESET; 721 l_ptr->fsm_msg_cnt = 0; 722 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0); 723 l_ptr->fsm_msg_cnt++; 724 link_set_timer(l_ptr, cont_intv); 725 break; 726 case STARTING_EVT: 727 l_ptr->started = 1; 728 /* fall through */ 729 case TIMEOUT_EVT: 730 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 731 l_ptr->fsm_msg_cnt++; 732 link_set_timer(l_ptr, cont_intv); 733 break; 734 default: 735 pr_err("%s%u in RU state\n", link_unk_evt, event); 736 } 737 break; 738 case RESET_RESET: 739 switch (event) { 740 case TRAFFIC_MSG_EVT: 741 case ACTIVATE_MSG: 742 other = l_ptr->owner->active_links[0]; 743 if (other && link_working_unknown(other)) 744 break; 745 l_ptr->state = WORKING_WORKING; 746 l_ptr->fsm_msg_cnt = 0; 747 link_activate(l_ptr); 748 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 749 l_ptr->fsm_msg_cnt++; 750 link_set_timer(l_ptr, cont_intv); 751 break; 752 case RESET_MSG: 753 break; 754 case TIMEOUT_EVT: 755 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 756 l_ptr->fsm_msg_cnt++; 757 link_set_timer(l_ptr, cont_intv); 758 break; 759 default: 760 pr_err("%s%u in RR state\n", link_unk_evt, event); 761 } 762 break; 763 default: 764 pr_err("Unknown link state %u/%u\n", l_ptr->state, event); 765 } 766 } 767 768 /* 769 * link_bundle_buf(): Append contents of a buffer to 770 * the tail of an existing one. 771 */ 772 static int link_bundle_buf(struct tipc_link *l_ptr, 773 struct sk_buff *bundler, 774 struct sk_buff *buf) 775 { 776 struct tipc_msg *bundler_msg = buf_msg(bundler); 777 struct tipc_msg *msg = buf_msg(buf); 778 u32 size = msg_size(msg); 779 u32 bundle_size = msg_size(bundler_msg); 780 u32 to_pos = align(bundle_size); 781 u32 pad = to_pos - bundle_size; 782 783 if (msg_user(bundler_msg) != MSG_BUNDLER) 784 return 0; 785 if (msg_type(bundler_msg) != OPEN_MSG) 786 return 0; 787 if (skb_tailroom(bundler) < (pad + size)) 788 return 0; 789 if (l_ptr->max_pkt < (to_pos + size)) 790 return 0; 791 792 skb_put(bundler, pad + size); 793 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size); 794 msg_set_size(bundler_msg, to_pos + size); 795 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); 796 kfree_skb(buf); 797 l_ptr->stats.sent_bundled++; 798 return 1; 799 } 800 801 static void link_add_to_outqueue(struct tipc_link *l_ptr, 802 struct sk_buff *buf, 803 struct tipc_msg *msg) 804 { 805 u32 ack = mod(l_ptr->next_in_no - 1); 806 u32 seqno = mod(l_ptr->next_out_no++); 807 808 msg_set_word(msg, 2, ((ack << 16) | seqno)); 809 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 810 buf->next = NULL; 811 if (l_ptr->first_out) { 812 l_ptr->last_out->next = buf; 813 l_ptr->last_out = buf; 814 } else 815 l_ptr->first_out = l_ptr->last_out = buf; 816 817 l_ptr->out_queue_size++; 818 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz) 819 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size; 820 } 821 822 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr, 823 struct sk_buff *buf_chain, 824 u32 long_msgno) 825 { 826 struct sk_buff *buf; 827 struct tipc_msg *msg; 828 829 if (!l_ptr->next_out) 830 l_ptr->next_out = buf_chain; 831 while (buf_chain) { 832 buf = buf_chain; 833 buf_chain = buf_chain->next; 834 835 msg = buf_msg(buf); 836 msg_set_long_msgno(msg, long_msgno); 837 link_add_to_outqueue(l_ptr, buf, msg); 838 } 839 } 840 841 /* 842 * tipc_link_send_buf() is the 'full path' for messages, called from 843 * inside TIPC when the 'fast path' in tipc_send_buf 844 * has failed, and from link_send() 845 */ 846 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 847 { 848 struct tipc_msg *msg = buf_msg(buf); 849 u32 size = msg_size(msg); 850 u32 dsz = msg_data_sz(msg); 851 u32 queue_size = l_ptr->out_queue_size; 852 u32 imp = tipc_msg_tot_importance(msg); 853 u32 queue_limit = l_ptr->queue_limit[imp]; 854 u32 max_packet = l_ptr->max_pkt; 855 856 /* Match msg importance against queue limits: */ 857 if (unlikely(queue_size >= queue_limit)) { 858 if (imp <= TIPC_CRITICAL_IMPORTANCE) { 859 link_schedule_port(l_ptr, msg_origport(msg), size); 860 kfree_skb(buf); 861 return -ELINKCONG; 862 } 863 kfree_skb(buf); 864 if (imp > CONN_MANAGER) { 865 pr_warn("%s<%s>, send queue full", link_rst_msg, 866 l_ptr->name); 867 tipc_link_reset(l_ptr); 868 } 869 return dsz; 870 } 871 872 /* Fragmentation needed ? */ 873 if (size > max_packet) 874 return link_send_long_buf(l_ptr, buf); 875 876 /* Packet can be queued or sent. */ 877 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && 878 !link_congested(l_ptr))) { 879 link_add_to_outqueue(l_ptr, buf, msg); 880 881 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) { 882 l_ptr->unacked_window = 0; 883 } else { 884 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); 885 l_ptr->stats.bearer_congs++; 886 l_ptr->next_out = buf; 887 } 888 return dsz; 889 } 890 /* Congestion: can message be bundled ? */ 891 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && 892 (msg_user(msg) != MSG_FRAGMENTER)) { 893 894 /* Try adding message to an existing bundle */ 895 if (l_ptr->next_out && 896 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) { 897 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); 898 return dsz; 899 } 900 901 /* Try creating a new bundle */ 902 if (size <= max_packet * 2 / 3) { 903 struct sk_buff *bundler = tipc_buf_acquire(max_packet); 904 struct tipc_msg bundler_hdr; 905 906 if (bundler) { 907 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG, 908 INT_H_SIZE, l_ptr->addr); 909 skb_copy_to_linear_data(bundler, &bundler_hdr, 910 INT_H_SIZE); 911 skb_trim(bundler, INT_H_SIZE); 912 link_bundle_buf(l_ptr, bundler, buf); 913 buf = bundler; 914 msg = buf_msg(buf); 915 l_ptr->stats.sent_bundles++; 916 } 917 } 918 } 919 if (!l_ptr->next_out) 920 l_ptr->next_out = buf; 921 link_add_to_outqueue(l_ptr, buf, msg); 922 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); 923 return dsz; 924 } 925 926 /* 927 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has 928 * not been selected yet, and the the owner node is not locked 929 * Called by TIPC internal users, e.g. the name distributor 930 */ 931 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) 932 { 933 struct tipc_link *l_ptr; 934 struct tipc_node *n_ptr; 935 int res = -ELINKCONG; 936 937 read_lock_bh(&tipc_net_lock); 938 n_ptr = tipc_node_find(dest); 939 if (n_ptr) { 940 tipc_node_lock(n_ptr); 941 l_ptr = n_ptr->active_links[selector & 1]; 942 if (l_ptr) 943 res = tipc_link_send_buf(l_ptr, buf); 944 else 945 kfree_skb(buf); 946 tipc_node_unlock(n_ptr); 947 } else { 948 kfree_skb(buf); 949 } 950 read_unlock_bh(&tipc_net_lock); 951 return res; 952 } 953 954 /** 955 * tipc_link_send_names - send name table entries to new neighbor 956 * 957 * Send routine for bulk delivery of name table messages when contact 958 * with a new neighbor occurs. No link congestion checking is performed 959 * because name table messages *must* be delivered. The messages must be 960 * small enough not to require fragmentation. 961 * Called without any locks held. 962 */ 963 void tipc_link_send_names(struct list_head *message_list, u32 dest) 964 { 965 struct tipc_node *n_ptr; 966 struct tipc_link *l_ptr; 967 struct sk_buff *buf; 968 struct sk_buff *temp_buf; 969 970 if (list_empty(message_list)) 971 return; 972 973 read_lock_bh(&tipc_net_lock); 974 n_ptr = tipc_node_find(dest); 975 if (n_ptr) { 976 tipc_node_lock(n_ptr); 977 l_ptr = n_ptr->active_links[0]; 978 if (l_ptr) { 979 /* convert circular list to linear list */ 980 ((struct sk_buff *)message_list->prev)->next = NULL; 981 link_add_chain_to_outqueue(l_ptr, 982 (struct sk_buff *)message_list->next, 0); 983 tipc_link_push_queue(l_ptr); 984 INIT_LIST_HEAD(message_list); 985 } 986 tipc_node_unlock(n_ptr); 987 } 988 read_unlock_bh(&tipc_net_lock); 989 990 /* discard the messages if they couldn't be sent */ 991 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 992 list_del((struct list_head *)buf); 993 kfree_skb(buf); 994 } 995 } 996 997 /* 998 * link_send_buf_fast: Entry for data messages where the 999 * destination link is known and the header is complete, 1000 * inclusive total message length. Very time critical. 1001 * Link is locked. Returns user data length. 1002 */ 1003 static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, 1004 u32 *used_max_pkt) 1005 { 1006 struct tipc_msg *msg = buf_msg(buf); 1007 int res = msg_data_sz(msg); 1008 1009 if (likely(!link_congested(l_ptr))) { 1010 if (likely(msg_size(msg) <= l_ptr->max_pkt)) { 1011 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) { 1012 link_add_to_outqueue(l_ptr, buf, msg); 1013 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, 1014 &l_ptr->media_addr))) { 1015 l_ptr->unacked_window = 0; 1016 return res; 1017 } 1018 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); 1019 l_ptr->stats.bearer_congs++; 1020 l_ptr->next_out = buf; 1021 return res; 1022 } 1023 } else 1024 *used_max_pkt = l_ptr->max_pkt; 1025 } 1026 return tipc_link_send_buf(l_ptr, buf); /* All other cases */ 1027 } 1028 1029 /* 1030 * tipc_send_buf_fast: Entry for data messages where the 1031 * destination node is known and the header is complete, 1032 * inclusive total message length. 1033 * Returns user data length. 1034 */ 1035 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode) 1036 { 1037 struct tipc_link *l_ptr; 1038 struct tipc_node *n_ptr; 1039 int res; 1040 u32 selector = msg_origport(buf_msg(buf)) & 1; 1041 u32 dummy; 1042 1043 read_lock_bh(&tipc_net_lock); 1044 n_ptr = tipc_node_find(destnode); 1045 if (likely(n_ptr)) { 1046 tipc_node_lock(n_ptr); 1047 l_ptr = n_ptr->active_links[selector]; 1048 if (likely(l_ptr)) { 1049 res = link_send_buf_fast(l_ptr, buf, &dummy); 1050 tipc_node_unlock(n_ptr); 1051 read_unlock_bh(&tipc_net_lock); 1052 return res; 1053 } 1054 tipc_node_unlock(n_ptr); 1055 } 1056 read_unlock_bh(&tipc_net_lock); 1057 res = msg_data_sz(buf_msg(buf)); 1058 tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1059 return res; 1060 } 1061 1062 1063 /* 1064 * tipc_link_send_sections_fast: Entry for messages where the 1065 * destination processor is known and the header is complete, 1066 * except for total message length. 1067 * Returns user data length or errno. 1068 */ 1069 int tipc_link_send_sections_fast(struct tipc_port *sender, 1070 struct iovec const *msg_sect, 1071 const u32 num_sect, 1072 unsigned int total_len, 1073 u32 destaddr) 1074 { 1075 struct tipc_msg *hdr = &sender->phdr; 1076 struct tipc_link *l_ptr; 1077 struct sk_buff *buf; 1078 struct tipc_node *node; 1079 int res; 1080 u32 selector = msg_origport(hdr) & 1; 1081 1082 again: 1083 /* 1084 * Try building message using port's max_pkt hint. 1085 * (Must not hold any locks while building message.) 1086 */ 1087 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, 1088 sender->max_pkt, !sender->user_port, &buf); 1089 1090 read_lock_bh(&tipc_net_lock); 1091 node = tipc_node_find(destaddr); 1092 if (likely(node)) { 1093 tipc_node_lock(node); 1094 l_ptr = node->active_links[selector]; 1095 if (likely(l_ptr)) { 1096 if (likely(buf)) { 1097 res = link_send_buf_fast(l_ptr, buf, 1098 &sender->max_pkt); 1099 exit: 1100 tipc_node_unlock(node); 1101 read_unlock_bh(&tipc_net_lock); 1102 return res; 1103 } 1104 1105 /* Exit if build request was invalid */ 1106 if (unlikely(res < 0)) 1107 goto exit; 1108 1109 /* Exit if link (or bearer) is congested */ 1110 if (link_congested(l_ptr) || 1111 !list_empty(&l_ptr->b_ptr->cong_links)) { 1112 res = link_schedule_port(l_ptr, 1113 sender->ref, res); 1114 goto exit; 1115 } 1116 1117 /* 1118 * Message size exceeds max_pkt hint; update hint, 1119 * then re-try fast path or fragment the message 1120 */ 1121 sender->max_pkt = l_ptr->max_pkt; 1122 tipc_node_unlock(node); 1123 read_unlock_bh(&tipc_net_lock); 1124 1125 1126 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 1127 goto again; 1128 1129 return link_send_sections_long(sender, msg_sect, 1130 num_sect, total_len, 1131 destaddr); 1132 } 1133 tipc_node_unlock(node); 1134 } 1135 read_unlock_bh(&tipc_net_lock); 1136 1137 /* Couldn't find a link to the destination node */ 1138 if (buf) 1139 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1140 if (res >= 0) 1141 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect, 1142 total_len, TIPC_ERR_NO_NODE); 1143 return res; 1144 } 1145 1146 /* 1147 * link_send_sections_long(): Entry for long messages where the 1148 * destination node is known and the header is complete, 1149 * inclusive total message length. 1150 * Link and bearer congestion status have been checked to be ok, 1151 * and are ignored if they change. 1152 * 1153 * Note that fragments do not use the full link MTU so that they won't have 1154 * to undergo refragmentation if link changeover causes them to be sent 1155 * over another link with an additional tunnel header added as prefix. 1156 * (Refragmentation will still occur if the other link has a smaller MTU.) 1157 * 1158 * Returns user data length or errno. 1159 */ 1160 static int link_send_sections_long(struct tipc_port *sender, 1161 struct iovec const *msg_sect, 1162 u32 num_sect, 1163 unsigned int total_len, 1164 u32 destaddr) 1165 { 1166 struct tipc_link *l_ptr; 1167 struct tipc_node *node; 1168 struct tipc_msg *hdr = &sender->phdr; 1169 u32 dsz = total_len; 1170 u32 max_pkt, fragm_sz, rest; 1171 struct tipc_msg fragm_hdr; 1172 struct sk_buff *buf, *buf_chain, *prev; 1173 u32 fragm_crs, fragm_rest, hsz, sect_rest; 1174 const unchar *sect_crs; 1175 int curr_sect; 1176 u32 fragm_no; 1177 1178 again: 1179 fragm_no = 1; 1180 max_pkt = sender->max_pkt - INT_H_SIZE; 1181 /* leave room for tunnel header in case of link changeover */ 1182 fragm_sz = max_pkt - INT_H_SIZE; 1183 /* leave room for fragmentation header in each fragment */ 1184 rest = dsz; 1185 fragm_crs = 0; 1186 fragm_rest = 0; 1187 sect_rest = 0; 1188 sect_crs = NULL; 1189 curr_sect = -1; 1190 1191 /* Prepare reusable fragment header */ 1192 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1193 INT_H_SIZE, msg_destnode(hdr)); 1194 msg_set_size(&fragm_hdr, max_pkt); 1195 msg_set_fragm_no(&fragm_hdr, 1); 1196 1197 /* Prepare header of first fragment */ 1198 buf_chain = buf = tipc_buf_acquire(max_pkt); 1199 if (!buf) 1200 return -ENOMEM; 1201 buf->next = NULL; 1202 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1203 hsz = msg_hdr_sz(hdr); 1204 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); 1205 1206 /* Chop up message */ 1207 fragm_crs = INT_H_SIZE + hsz; 1208 fragm_rest = fragm_sz - hsz; 1209 1210 do { /* For all sections */ 1211 u32 sz; 1212 1213 if (!sect_rest) { 1214 sect_rest = msg_sect[++curr_sect].iov_len; 1215 sect_crs = (const unchar *)msg_sect[curr_sect].iov_base; 1216 } 1217 1218 if (sect_rest < fragm_rest) 1219 sz = sect_rest; 1220 else 1221 sz = fragm_rest; 1222 1223 if (likely(!sender->user_port)) { 1224 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) { 1225 error: 1226 for (; buf_chain; buf_chain = buf) { 1227 buf = buf_chain->next; 1228 kfree_skb(buf_chain); 1229 } 1230 return -EFAULT; 1231 } 1232 } else 1233 skb_copy_to_linear_data_offset(buf, fragm_crs, 1234 sect_crs, sz); 1235 sect_crs += sz; 1236 sect_rest -= sz; 1237 fragm_crs += sz; 1238 fragm_rest -= sz; 1239 rest -= sz; 1240 1241 if (!fragm_rest && rest) { 1242 1243 /* Initiate new fragment: */ 1244 if (rest <= fragm_sz) { 1245 fragm_sz = rest; 1246 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 1247 } else { 1248 msg_set_type(&fragm_hdr, FRAGMENT); 1249 } 1250 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 1251 msg_set_fragm_no(&fragm_hdr, ++fragm_no); 1252 prev = buf; 1253 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 1254 if (!buf) 1255 goto error; 1256 1257 buf->next = NULL; 1258 prev->next = buf; 1259 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE); 1260 fragm_crs = INT_H_SIZE; 1261 fragm_rest = fragm_sz; 1262 } 1263 } while (rest > 0); 1264 1265 /* 1266 * Now we have a buffer chain. Select a link and check 1267 * that packet size is still OK 1268 */ 1269 node = tipc_node_find(destaddr); 1270 if (likely(node)) { 1271 tipc_node_lock(node); 1272 l_ptr = node->active_links[sender->ref & 1]; 1273 if (!l_ptr) { 1274 tipc_node_unlock(node); 1275 goto reject; 1276 } 1277 if (l_ptr->max_pkt < max_pkt) { 1278 sender->max_pkt = l_ptr->max_pkt; 1279 tipc_node_unlock(node); 1280 for (; buf_chain; buf_chain = buf) { 1281 buf = buf_chain->next; 1282 kfree_skb(buf_chain); 1283 } 1284 goto again; 1285 } 1286 } else { 1287 reject: 1288 for (; buf_chain; buf_chain = buf) { 1289 buf = buf_chain->next; 1290 kfree_skb(buf_chain); 1291 } 1292 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect, 1293 total_len, TIPC_ERR_NO_NODE); 1294 } 1295 1296 /* Append chain of fragments to send queue & send them */ 1297 l_ptr->long_msg_seq_no++; 1298 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 1299 l_ptr->stats.sent_fragments += fragm_no; 1300 l_ptr->stats.sent_fragmented++; 1301 tipc_link_push_queue(l_ptr); 1302 tipc_node_unlock(node); 1303 return dsz; 1304 } 1305 1306 /* 1307 * tipc_link_push_packet: Push one unsent packet to the media 1308 */ 1309 u32 tipc_link_push_packet(struct tipc_link *l_ptr) 1310 { 1311 struct sk_buff *buf = l_ptr->first_out; 1312 u32 r_q_size = l_ptr->retransm_queue_size; 1313 u32 r_q_head = l_ptr->retransm_queue_head; 1314 1315 /* Step to position where retransmission failed, if any, */ 1316 /* consider that buffers may have been released in meantime */ 1317 if (r_q_size && buf) { 1318 u32 last = lesser(mod(r_q_head + r_q_size), 1319 link_last_sent(l_ptr)); 1320 u32 first = buf_seqno(buf); 1321 1322 while (buf && less(first, r_q_head)) { 1323 first = mod(first + 1); 1324 buf = buf->next; 1325 } 1326 l_ptr->retransm_queue_head = r_q_head = first; 1327 l_ptr->retransm_queue_size = r_q_size = mod(last - first); 1328 } 1329 1330 /* Continue retransmission now, if there is anything: */ 1331 if (r_q_size && buf) { 1332 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1333 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1334 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1335 l_ptr->retransm_queue_head = mod(++r_q_head); 1336 l_ptr->retransm_queue_size = --r_q_size; 1337 l_ptr->stats.retransmitted++; 1338 return 0; 1339 } else { 1340 l_ptr->stats.bearer_congs++; 1341 return PUSH_FAILED; 1342 } 1343 } 1344 1345 /* Send deferred protocol message, if any: */ 1346 buf = l_ptr->proto_msg_queue; 1347 if (buf) { 1348 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1349 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1350 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1351 l_ptr->unacked_window = 0; 1352 kfree_skb(buf); 1353 l_ptr->proto_msg_queue = NULL; 1354 return 0; 1355 } else { 1356 l_ptr->stats.bearer_congs++; 1357 return PUSH_FAILED; 1358 } 1359 } 1360 1361 /* Send one deferred data message, if send window not full: */ 1362 buf = l_ptr->next_out; 1363 if (buf) { 1364 struct tipc_msg *msg = buf_msg(buf); 1365 u32 next = msg_seqno(msg); 1366 u32 first = buf_seqno(l_ptr->first_out); 1367 1368 if (mod(next - first) < l_ptr->queue_limit[0]) { 1369 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1370 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1371 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1372 if (msg_user(msg) == MSG_BUNDLER) 1373 msg_set_type(msg, CLOSED_MSG); 1374 l_ptr->next_out = buf->next; 1375 return 0; 1376 } else { 1377 l_ptr->stats.bearer_congs++; 1378 return PUSH_FAILED; 1379 } 1380 } 1381 } 1382 return PUSH_FINISHED; 1383 } 1384 1385 /* 1386 * push_queue(): push out the unsent messages of a link where 1387 * congestion has abated. Node is locked 1388 */ 1389 void tipc_link_push_queue(struct tipc_link *l_ptr) 1390 { 1391 u32 res; 1392 1393 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) 1394 return; 1395 1396 do { 1397 res = tipc_link_push_packet(l_ptr); 1398 } while (!res); 1399 1400 if (res == PUSH_FAILED) 1401 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); 1402 } 1403 1404 static void link_reset_all(unsigned long addr) 1405 { 1406 struct tipc_node *n_ptr; 1407 char addr_string[16]; 1408 u32 i; 1409 1410 read_lock_bh(&tipc_net_lock); 1411 n_ptr = tipc_node_find((u32)addr); 1412 if (!n_ptr) { 1413 read_unlock_bh(&tipc_net_lock); 1414 return; /* node no longer exists */ 1415 } 1416 1417 tipc_node_lock(n_ptr); 1418 1419 pr_warn("Resetting all links to %s\n", 1420 tipc_addr_string_fill(addr_string, n_ptr->addr)); 1421 1422 for (i = 0; i < MAX_BEARERS; i++) { 1423 if (n_ptr->links[i]) { 1424 link_print(n_ptr->links[i], "Resetting link\n"); 1425 tipc_link_reset(n_ptr->links[i]); 1426 } 1427 } 1428 1429 tipc_node_unlock(n_ptr); 1430 read_unlock_bh(&tipc_net_lock); 1431 } 1432 1433 static void link_retransmit_failure(struct tipc_link *l_ptr, 1434 struct sk_buff *buf) 1435 { 1436 struct tipc_msg *msg = buf_msg(buf); 1437 1438 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); 1439 1440 if (l_ptr->addr) { 1441 /* Handle failure on standard link */ 1442 link_print(l_ptr, "Resetting link\n"); 1443 tipc_link_reset(l_ptr); 1444 1445 } else { 1446 /* Handle failure on broadcast link */ 1447 struct tipc_node *n_ptr; 1448 char addr_string[16]; 1449 1450 pr_info("Msg seq number: %u, ", msg_seqno(msg)); 1451 pr_cont("Outstanding acks: %lu\n", 1452 (unsigned long) TIPC_SKB_CB(buf)->handle); 1453 1454 n_ptr = tipc_bclink_retransmit_to(); 1455 tipc_node_lock(n_ptr); 1456 1457 tipc_addr_string_fill(addr_string, n_ptr->addr); 1458 pr_info("Broadcast link info for %s\n", addr_string); 1459 pr_info("Supportable: %d, Supported: %d, Acked: %u\n", 1460 n_ptr->bclink.supportable, 1461 n_ptr->bclink.supported, 1462 n_ptr->bclink.acked); 1463 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n", 1464 n_ptr->bclink.last_in, 1465 n_ptr->bclink.oos_state, 1466 n_ptr->bclink.last_sent); 1467 1468 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr); 1469 1470 tipc_node_unlock(n_ptr); 1471 1472 l_ptr->stale_count = 0; 1473 } 1474 } 1475 1476 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, 1477 u32 retransmits) 1478 { 1479 struct tipc_msg *msg; 1480 1481 if (!buf) 1482 return; 1483 1484 msg = buf_msg(buf); 1485 1486 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { 1487 if (l_ptr->retransm_queue_size == 0) { 1488 l_ptr->retransm_queue_head = msg_seqno(msg); 1489 l_ptr->retransm_queue_size = retransmits; 1490 } else { 1491 pr_err("Unexpected retransmit on link %s (qsize=%d)\n", 1492 l_ptr->name, l_ptr->retransm_queue_size); 1493 } 1494 return; 1495 } else { 1496 /* Detect repeated retransmit failures on uncongested bearer */ 1497 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1498 if (++l_ptr->stale_count > 100) { 1499 link_retransmit_failure(l_ptr, buf); 1500 return; 1501 } 1502 } else { 1503 l_ptr->last_retransmitted = msg_seqno(msg); 1504 l_ptr->stale_count = 1; 1505 } 1506 } 1507 1508 while (retransmits && (buf != l_ptr->next_out) && buf) { 1509 msg = buf_msg(buf); 1510 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1511 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1512 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1513 buf = buf->next; 1514 retransmits--; 1515 l_ptr->stats.retransmitted++; 1516 } else { 1517 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); 1518 l_ptr->stats.bearer_congs++; 1519 l_ptr->retransm_queue_head = buf_seqno(buf); 1520 l_ptr->retransm_queue_size = retransmits; 1521 return; 1522 } 1523 } 1524 1525 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1526 } 1527 1528 /** 1529 * link_insert_deferred_queue - insert deferred messages back into receive chain 1530 */ 1531 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, 1532 struct sk_buff *buf) 1533 { 1534 u32 seq_no; 1535 1536 if (l_ptr->oldest_deferred_in == NULL) 1537 return buf; 1538 1539 seq_no = buf_seqno(l_ptr->oldest_deferred_in); 1540 if (seq_no == mod(l_ptr->next_in_no)) { 1541 l_ptr->newest_deferred_in->next = buf; 1542 buf = l_ptr->oldest_deferred_in; 1543 l_ptr->oldest_deferred_in = NULL; 1544 l_ptr->deferred_inqueue_sz = 0; 1545 } 1546 return buf; 1547 } 1548 1549 /** 1550 * link_recv_buf_validate - validate basic format of received message 1551 * 1552 * This routine ensures a TIPC message has an acceptable header, and at least 1553 * as much data as the header indicates it should. The routine also ensures 1554 * that the entire message header is stored in the main fragment of the message 1555 * buffer, to simplify future access to message header fields. 1556 * 1557 * Note: Having extra info present in the message header or data areas is OK. 1558 * TIPC will ignore the excess, under the assumption that it is optional info 1559 * introduced by a later release of the protocol. 1560 */ 1561 static int link_recv_buf_validate(struct sk_buff *buf) 1562 { 1563 static u32 min_data_hdr_size[8] = { 1564 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE, 1565 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE 1566 }; 1567 1568 struct tipc_msg *msg; 1569 u32 tipc_hdr[2]; 1570 u32 size; 1571 u32 hdr_size; 1572 u32 min_hdr_size; 1573 1574 if (unlikely(buf->len < MIN_H_SIZE)) 1575 return 0; 1576 1577 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); 1578 if (msg == NULL) 1579 return 0; 1580 1581 if (unlikely(msg_version(msg) != TIPC_VERSION)) 1582 return 0; 1583 1584 size = msg_size(msg); 1585 hdr_size = msg_hdr_sz(msg); 1586 min_hdr_size = msg_isdata(msg) ? 1587 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; 1588 1589 if (unlikely((hdr_size < min_hdr_size) || 1590 (size < hdr_size) || 1591 (buf->len < size) || 1592 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) 1593 return 0; 1594 1595 return pskb_may_pull(buf, hdr_size); 1596 } 1597 1598 /** 1599 * tipc_recv_msg - process TIPC messages arriving from off-node 1600 * @head: pointer to message buffer chain 1601 * @tb_ptr: pointer to bearer message arrived on 1602 * 1603 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1604 * structure (i.e. cannot be NULL), but bearer can be inactive. 1605 */ 1606 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) 1607 { 1608 read_lock_bh(&tipc_net_lock); 1609 while (head) { 1610 struct tipc_node *n_ptr; 1611 struct tipc_link *l_ptr; 1612 struct sk_buff *crs; 1613 struct sk_buff *buf = head; 1614 struct tipc_msg *msg; 1615 u32 seq_no; 1616 u32 ackd; 1617 u32 released = 0; 1618 int type; 1619 1620 head = head->next; 1621 1622 /* Ensure bearer is still enabled */ 1623 if (unlikely(!b_ptr->active)) 1624 goto cont; 1625 1626 /* Ensure message is well-formed */ 1627 if (unlikely(!link_recv_buf_validate(buf))) 1628 goto cont; 1629 1630 /* Ensure message data is a single contiguous unit */ 1631 if (unlikely(skb_linearize(buf))) 1632 goto cont; 1633 1634 /* Handle arrival of a non-unicast link message */ 1635 msg = buf_msg(buf); 1636 1637 if (unlikely(msg_non_seq(msg))) { 1638 if (msg_user(msg) == LINK_CONFIG) 1639 tipc_disc_recv_msg(buf, b_ptr); 1640 else 1641 tipc_bclink_recv_pkt(buf); 1642 continue; 1643 } 1644 1645 /* Discard unicast link messages destined for another node */ 1646 if (unlikely(!msg_short(msg) && 1647 (msg_destnode(msg) != tipc_own_addr))) 1648 goto cont; 1649 1650 /* Locate neighboring node that sent message */ 1651 n_ptr = tipc_node_find(msg_prevnode(msg)); 1652 if (unlikely(!n_ptr)) 1653 goto cont; 1654 tipc_node_lock(n_ptr); 1655 1656 /* Locate unicast link endpoint that should handle message */ 1657 l_ptr = n_ptr->links[b_ptr->identity]; 1658 if (unlikely(!l_ptr)) { 1659 tipc_node_unlock(n_ptr); 1660 goto cont; 1661 } 1662 1663 /* Verify that communication with node is currently allowed */ 1664 if ((n_ptr->block_setup & WAIT_PEER_DOWN) && 1665 msg_user(msg) == LINK_PROTOCOL && 1666 (msg_type(msg) == RESET_MSG || 1667 msg_type(msg) == ACTIVATE_MSG) && 1668 !msg_redundant_link(msg)) 1669 n_ptr->block_setup &= ~WAIT_PEER_DOWN; 1670 1671 if (n_ptr->block_setup) { 1672 tipc_node_unlock(n_ptr); 1673 goto cont; 1674 } 1675 1676 /* Validate message sequence number info */ 1677 seq_no = msg_seqno(msg); 1678 ackd = msg_ack(msg); 1679 1680 /* Release acked messages */ 1681 if (n_ptr->bclink.supported) 1682 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1683 1684 crs = l_ptr->first_out; 1685 while ((crs != l_ptr->next_out) && 1686 less_eq(buf_seqno(crs), ackd)) { 1687 struct sk_buff *next = crs->next; 1688 1689 kfree_skb(crs); 1690 crs = next; 1691 released++; 1692 } 1693 if (released) { 1694 l_ptr->first_out = crs; 1695 l_ptr->out_queue_size -= released; 1696 } 1697 1698 /* Try sending any messages link endpoint has pending */ 1699 if (unlikely(l_ptr->next_out)) 1700 tipc_link_push_queue(l_ptr); 1701 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1702 tipc_link_wakeup_ports(l_ptr, 0); 1703 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { 1704 l_ptr->stats.sent_acks++; 1705 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1706 } 1707 1708 /* Now (finally!) process the incoming message */ 1709 protocol_check: 1710 if (likely(link_working_working(l_ptr))) { 1711 if (likely(seq_no == mod(l_ptr->next_in_no))) { 1712 l_ptr->next_in_no++; 1713 if (unlikely(l_ptr->oldest_deferred_in)) 1714 head = link_insert_deferred_queue(l_ptr, 1715 head); 1716 deliver: 1717 if (likely(msg_isdata(msg))) { 1718 tipc_node_unlock(n_ptr); 1719 tipc_port_recv_msg(buf); 1720 continue; 1721 } 1722 switch (msg_user(msg)) { 1723 int ret; 1724 case MSG_BUNDLER: 1725 l_ptr->stats.recv_bundles++; 1726 l_ptr->stats.recv_bundled += 1727 msg_msgcnt(msg); 1728 tipc_node_unlock(n_ptr); 1729 tipc_link_recv_bundle(buf); 1730 continue; 1731 case NAME_DISTRIBUTOR: 1732 tipc_node_unlock(n_ptr); 1733 tipc_named_recv(buf); 1734 continue; 1735 case CONN_MANAGER: 1736 tipc_node_unlock(n_ptr); 1737 tipc_port_recv_proto_msg(buf); 1738 continue; 1739 case MSG_FRAGMENTER: 1740 l_ptr->stats.recv_fragments++; 1741 ret = tipc_link_recv_fragment( 1742 &l_ptr->defragm_buf, 1743 &buf, &msg); 1744 if (ret == 1) { 1745 l_ptr->stats.recv_fragmented++; 1746 goto deliver; 1747 } 1748 if (ret == -1) 1749 l_ptr->next_in_no--; 1750 break; 1751 case CHANGEOVER_PROTOCOL: 1752 type = msg_type(msg); 1753 if (link_recv_changeover_msg(&l_ptr, 1754 &buf)) { 1755 msg = buf_msg(buf); 1756 seq_no = msg_seqno(msg); 1757 if (type == ORIGINAL_MSG) 1758 goto deliver; 1759 goto protocol_check; 1760 } 1761 break; 1762 default: 1763 kfree_skb(buf); 1764 buf = NULL; 1765 break; 1766 } 1767 tipc_node_unlock(n_ptr); 1768 tipc_net_route_msg(buf); 1769 continue; 1770 } 1771 link_handle_out_of_seq_msg(l_ptr, buf); 1772 head = link_insert_deferred_queue(l_ptr, head); 1773 tipc_node_unlock(n_ptr); 1774 continue; 1775 } 1776 1777 if (msg_user(msg) == LINK_PROTOCOL) { 1778 link_recv_proto_msg(l_ptr, buf); 1779 head = link_insert_deferred_queue(l_ptr, head); 1780 tipc_node_unlock(n_ptr); 1781 continue; 1782 } 1783 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1784 1785 if (link_working_working(l_ptr)) { 1786 /* Re-insert in front of queue */ 1787 buf->next = head; 1788 head = buf; 1789 tipc_node_unlock(n_ptr); 1790 continue; 1791 } 1792 tipc_node_unlock(n_ptr); 1793 cont: 1794 kfree_skb(buf); 1795 } 1796 read_unlock_bh(&tipc_net_lock); 1797 } 1798 1799 /** 1800 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue 1801 * 1802 * Returns increase in queue length (i.e. 0 or 1) 1803 */ 1804 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, 1805 struct sk_buff *buf) 1806 { 1807 struct sk_buff *queue_buf; 1808 struct sk_buff **prev; 1809 u32 seq_no = buf_seqno(buf); 1810 1811 buf->next = NULL; 1812 1813 /* Empty queue ? */ 1814 if (*head == NULL) { 1815 *head = *tail = buf; 1816 return 1; 1817 } 1818 1819 /* Last ? */ 1820 if (less(buf_seqno(*tail), seq_no)) { 1821 (*tail)->next = buf; 1822 *tail = buf; 1823 return 1; 1824 } 1825 1826 /* Locate insertion point in queue, then insert; discard if duplicate */ 1827 prev = head; 1828 queue_buf = *head; 1829 for (;;) { 1830 u32 curr_seqno = buf_seqno(queue_buf); 1831 1832 if (seq_no == curr_seqno) { 1833 kfree_skb(buf); 1834 return 0; 1835 } 1836 1837 if (less(seq_no, curr_seqno)) 1838 break; 1839 1840 prev = &queue_buf->next; 1841 queue_buf = queue_buf->next; 1842 } 1843 1844 buf->next = queue_buf; 1845 *prev = buf; 1846 return 1; 1847 } 1848 1849 /* 1850 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet 1851 */ 1852 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 1853 struct sk_buff *buf) 1854 { 1855 u32 seq_no = buf_seqno(buf); 1856 1857 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) { 1858 link_recv_proto_msg(l_ptr, buf); 1859 return; 1860 } 1861 1862 /* Record OOS packet arrival (force mismatch on next timeout) */ 1863 l_ptr->checkpoint--; 1864 1865 /* 1866 * Discard packet if a duplicate; otherwise add it to deferred queue 1867 * and notify peer of gap as per protocol specification 1868 */ 1869 if (less(seq_no, mod(l_ptr->next_in_no))) { 1870 l_ptr->stats.duplicates++; 1871 kfree_skb(buf); 1872 return; 1873 } 1874 1875 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in, 1876 &l_ptr->newest_deferred_in, buf)) { 1877 l_ptr->deferred_inqueue_sz++; 1878 l_ptr->stats.deferred_recv++; 1879 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1880 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1881 } else 1882 l_ptr->stats.duplicates++; 1883 } 1884 1885 /* 1886 * Send protocol message to the other endpoint. 1887 */ 1888 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, 1889 int probe_msg, u32 gap, u32 tolerance, 1890 u32 priority, u32 ack_mtu) 1891 { 1892 struct sk_buff *buf = NULL; 1893 struct tipc_msg *msg = l_ptr->pmsg; 1894 u32 msg_size = sizeof(l_ptr->proto_msg); 1895 int r_flag; 1896 1897 /* Discard any previous message that was deferred due to congestion */ 1898 if (l_ptr->proto_msg_queue) { 1899 kfree_skb(l_ptr->proto_msg_queue); 1900 l_ptr->proto_msg_queue = NULL; 1901 } 1902 1903 if (link_blocked(l_ptr)) 1904 return; 1905 1906 /* Abort non-RESET send if communication with node is prohibited */ 1907 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1908 return; 1909 1910 /* Create protocol message with "out-of-sequence" sequence number */ 1911 msg_set_type(msg, msg_typ); 1912 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1913 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1914 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1915 1916 if (msg_typ == STATE_MSG) { 1917 u32 next_sent = mod(l_ptr->next_out_no); 1918 1919 if (!tipc_link_is_up(l_ptr)) 1920 return; 1921 if (l_ptr->next_out) 1922 next_sent = buf_seqno(l_ptr->next_out); 1923 msg_set_next_sent(msg, next_sent); 1924 if (l_ptr->oldest_deferred_in) { 1925 u32 rec = buf_seqno(l_ptr->oldest_deferred_in); 1926 gap = mod(rec - mod(l_ptr->next_in_no)); 1927 } 1928 msg_set_seq_gap(msg, gap); 1929 if (gap) 1930 l_ptr->stats.sent_nacks++; 1931 msg_set_link_tolerance(msg, tolerance); 1932 msg_set_linkprio(msg, priority); 1933 msg_set_max_pkt(msg, ack_mtu); 1934 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1935 msg_set_probe(msg, probe_msg != 0); 1936 if (probe_msg) { 1937 u32 mtu = l_ptr->max_pkt; 1938 1939 if ((mtu < l_ptr->max_pkt_target) && 1940 link_working_working(l_ptr) && 1941 l_ptr->fsm_msg_cnt) { 1942 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1943 if (l_ptr->max_pkt_probes == 10) { 1944 l_ptr->max_pkt_target = (msg_size - 4); 1945 l_ptr->max_pkt_probes = 0; 1946 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; 1947 } 1948 l_ptr->max_pkt_probes++; 1949 } 1950 1951 l_ptr->stats.sent_probes++; 1952 } 1953 l_ptr->stats.sent_states++; 1954 } else { /* RESET_MSG or ACTIVATE_MSG */ 1955 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); 1956 msg_set_seq_gap(msg, 0); 1957 msg_set_next_sent(msg, 1); 1958 msg_set_probe(msg, 0); 1959 msg_set_link_tolerance(msg, l_ptr->tolerance); 1960 msg_set_linkprio(msg, l_ptr->priority); 1961 msg_set_max_pkt(msg, l_ptr->max_pkt_target); 1962 } 1963 1964 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); 1965 msg_set_redundant_link(msg, r_flag); 1966 msg_set_linkprio(msg, l_ptr->priority); 1967 msg_set_size(msg, msg_size); 1968 1969 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2))); 1970 1971 buf = tipc_buf_acquire(msg_size); 1972 if (!buf) 1973 return; 1974 1975 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1976 1977 /* Defer message if bearer is already congested */ 1978 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { 1979 l_ptr->proto_msg_queue = buf; 1980 return; 1981 } 1982 1983 /* Defer message if attempting to send results in bearer congestion */ 1984 if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1985 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); 1986 l_ptr->proto_msg_queue = buf; 1987 l_ptr->stats.bearer_congs++; 1988 return; 1989 } 1990 1991 /* Discard message if it was sent successfully */ 1992 l_ptr->unacked_window = 0; 1993 kfree_skb(buf); 1994 } 1995 1996 /* 1997 * Receive protocol message : 1998 * Note that network plane id propagates through the network, and may 1999 * change at any time. The node with lowest address rules 2000 */ 2001 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) 2002 { 2003 u32 rec_gap = 0; 2004 u32 max_pkt_info; 2005 u32 max_pkt_ack; 2006 u32 msg_tol; 2007 struct tipc_msg *msg = buf_msg(buf); 2008 2009 if (link_blocked(l_ptr)) 2010 goto exit; 2011 2012 /* record unnumbered packet arrival (force mismatch on next timeout) */ 2013 l_ptr->checkpoint--; 2014 2015 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg)) 2016 if (tipc_own_addr > msg_prevnode(msg)) 2017 l_ptr->b_ptr->net_plane = msg_net_plane(msg); 2018 2019 l_ptr->owner->permit_changeover = msg_redundant_link(msg); 2020 2021 switch (msg_type(msg)) { 2022 2023 case RESET_MSG: 2024 if (!link_working_unknown(l_ptr) && 2025 (l_ptr->peer_session != INVALID_SESSION)) { 2026 if (less_eq(msg_session(msg), l_ptr->peer_session)) 2027 break; /* duplicate or old reset: ignore */ 2028 } 2029 2030 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) || 2031 link_working_unknown(l_ptr))) { 2032 /* 2033 * peer has lost contact -- don't allow peer's links 2034 * to reactivate before we recognize loss & clean up 2035 */ 2036 l_ptr->owner->block_setup = WAIT_NODE_DOWN; 2037 } 2038 2039 link_state_event(l_ptr, RESET_MSG); 2040 2041 /* fall thru' */ 2042 case ACTIVATE_MSG: 2043 /* Update link settings according other endpoint's values */ 2044 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); 2045 2046 msg_tol = msg_link_tolerance(msg); 2047 if (msg_tol > l_ptr->tolerance) 2048 link_set_supervision_props(l_ptr, msg_tol); 2049 2050 if (msg_linkprio(msg) > l_ptr->priority) 2051 l_ptr->priority = msg_linkprio(msg); 2052 2053 max_pkt_info = msg_max_pkt(msg); 2054 if (max_pkt_info) { 2055 if (max_pkt_info < l_ptr->max_pkt_target) 2056 l_ptr->max_pkt_target = max_pkt_info; 2057 if (l_ptr->max_pkt > l_ptr->max_pkt_target) 2058 l_ptr->max_pkt = l_ptr->max_pkt_target; 2059 } else { 2060 l_ptr->max_pkt = l_ptr->max_pkt_target; 2061 } 2062 l_ptr->owner->bclink.supportable = (max_pkt_info != 0); 2063 2064 /* Synchronize broadcast link info, if not done previously */ 2065 if (!tipc_node_is_up(l_ptr->owner)) { 2066 l_ptr->owner->bclink.last_sent = 2067 l_ptr->owner->bclink.last_in = 2068 msg_last_bcast(msg); 2069 l_ptr->owner->bclink.oos_state = 0; 2070 } 2071 2072 l_ptr->peer_session = msg_session(msg); 2073 l_ptr->peer_bearer_id = msg_bearer_id(msg); 2074 2075 if (msg_type(msg) == ACTIVATE_MSG) 2076 link_state_event(l_ptr, ACTIVATE_MSG); 2077 break; 2078 case STATE_MSG: 2079 2080 msg_tol = msg_link_tolerance(msg); 2081 if (msg_tol) 2082 link_set_supervision_props(l_ptr, msg_tol); 2083 2084 if (msg_linkprio(msg) && 2085 (msg_linkprio(msg) != l_ptr->priority)) { 2086 pr_warn("%s<%s>, priority change %u->%u\n", 2087 link_rst_msg, l_ptr->name, l_ptr->priority, 2088 msg_linkprio(msg)); 2089 l_ptr->priority = msg_linkprio(msg); 2090 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 2091 break; 2092 } 2093 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 2094 l_ptr->stats.recv_states++; 2095 if (link_reset_unknown(l_ptr)) 2096 break; 2097 2098 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) { 2099 rec_gap = mod(msg_next_sent(msg) - 2100 mod(l_ptr->next_in_no)); 2101 } 2102 2103 max_pkt_ack = msg_max_pkt(msg); 2104 if (max_pkt_ack > l_ptr->max_pkt) { 2105 l_ptr->max_pkt = max_pkt_ack; 2106 l_ptr->max_pkt_probes = 0; 2107 } 2108 2109 max_pkt_ack = 0; 2110 if (msg_probe(msg)) { 2111 l_ptr->stats.recv_probes++; 2112 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) 2113 max_pkt_ack = msg_size(msg); 2114 } 2115 2116 /* Protocol message before retransmits, reduce loss risk */ 2117 if (l_ptr->owner->bclink.supported) 2118 tipc_bclink_update_link_state(l_ptr->owner, 2119 msg_last_bcast(msg)); 2120 2121 if (rec_gap || (msg_probe(msg))) { 2122 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 2123 0, rec_gap, 0, 0, max_pkt_ack); 2124 } 2125 if (msg_seq_gap(msg)) { 2126 l_ptr->stats.recv_nacks++; 2127 tipc_link_retransmit(l_ptr, l_ptr->first_out, 2128 msg_seq_gap(msg)); 2129 } 2130 break; 2131 } 2132 exit: 2133 kfree_skb(buf); 2134 } 2135 2136 2137 /* 2138 * tipc_link_tunnel(): Send one message via a link belonging to 2139 * another bearer. Owner node is locked. 2140 */ 2141 static void tipc_link_tunnel(struct tipc_link *l_ptr, 2142 struct tipc_msg *tunnel_hdr, 2143 struct tipc_msg *msg, 2144 u32 selector) 2145 { 2146 struct tipc_link *tunnel; 2147 struct sk_buff *buf; 2148 u32 length = msg_size(msg); 2149 2150 tunnel = l_ptr->owner->active_links[selector & 1]; 2151 if (!tipc_link_is_up(tunnel)) { 2152 pr_warn("%stunnel link no longer available\n", link_co_err); 2153 return; 2154 } 2155 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 2156 buf = tipc_buf_acquire(length + INT_H_SIZE); 2157 if (!buf) { 2158 pr_warn("%sunable to send tunnel msg\n", link_co_err); 2159 return; 2160 } 2161 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); 2162 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); 2163 tipc_link_send_buf(tunnel, buf); 2164 } 2165 2166 2167 2168 /* 2169 * changeover(): Send whole message queue via the remaining link 2170 * Owner node is locked. 2171 */ 2172 void tipc_link_changeover(struct tipc_link *l_ptr) 2173 { 2174 u32 msgcount = l_ptr->out_queue_size; 2175 struct sk_buff *crs = l_ptr->first_out; 2176 struct tipc_link *tunnel = l_ptr->owner->active_links[0]; 2177 struct tipc_msg tunnel_hdr; 2178 int split_bundles; 2179 2180 if (!tunnel) 2181 return; 2182 2183 if (!l_ptr->owner->permit_changeover) { 2184 pr_warn("%speer did not permit changeover\n", link_co_err); 2185 return; 2186 } 2187 2188 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2189 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); 2190 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2191 msg_set_msgcnt(&tunnel_hdr, msgcount); 2192 2193 if (!l_ptr->first_out) { 2194 struct sk_buff *buf; 2195 2196 buf = tipc_buf_acquire(INT_H_SIZE); 2197 if (buf) { 2198 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE); 2199 msg_set_size(&tunnel_hdr, INT_H_SIZE); 2200 tipc_link_send_buf(tunnel, buf); 2201 } else { 2202 pr_warn("%sunable to send changeover msg\n", 2203 link_co_err); 2204 } 2205 return; 2206 } 2207 2208 split_bundles = (l_ptr->owner->active_links[0] != 2209 l_ptr->owner->active_links[1]); 2210 2211 while (crs) { 2212 struct tipc_msg *msg = buf_msg(crs); 2213 2214 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { 2215 struct tipc_msg *m = msg_get_wrapped(msg); 2216 unchar *pos = (unchar *)m; 2217 2218 msgcount = msg_msgcnt(msg); 2219 while (msgcount--) { 2220 msg_set_seqno(m, msg_seqno(msg)); 2221 tipc_link_tunnel(l_ptr, &tunnel_hdr, m, 2222 msg_link_selector(m)); 2223 pos += align(msg_size(m)); 2224 m = (struct tipc_msg *)pos; 2225 } 2226 } else { 2227 tipc_link_tunnel(l_ptr, &tunnel_hdr, msg, 2228 msg_link_selector(msg)); 2229 } 2230 crs = crs->next; 2231 } 2232 } 2233 2234 void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel) 2235 { 2236 struct sk_buff *iter; 2237 struct tipc_msg tunnel_hdr; 2238 2239 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 2240 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); 2241 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); 2242 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 2243 iter = l_ptr->first_out; 2244 while (iter) { 2245 struct sk_buff *outbuf; 2246 struct tipc_msg *msg = buf_msg(iter); 2247 u32 length = msg_size(msg); 2248 2249 if (msg_user(msg) == MSG_BUNDLER) 2250 msg_set_type(msg, CLOSED_MSG); 2251 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ 2252 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 2253 msg_set_size(&tunnel_hdr, length + INT_H_SIZE); 2254 outbuf = tipc_buf_acquire(length + INT_H_SIZE); 2255 if (outbuf == NULL) { 2256 pr_warn("%sunable to send duplicate msg\n", 2257 link_co_err); 2258 return; 2259 } 2260 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); 2261 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data, 2262 length); 2263 tipc_link_send_buf(tunnel, outbuf); 2264 if (!tipc_link_is_up(l_ptr)) 2265 return; 2266 iter = iter->next; 2267 } 2268 } 2269 2270 /** 2271 * buf_extract - extracts embedded TIPC message from another message 2272 * @skb: encapsulating message buffer 2273 * @from_pos: offset to extract from 2274 * 2275 * Returns a new message buffer containing an embedded message. The 2276 * encapsulating message itself is left unchanged. 2277 */ 2278 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) 2279 { 2280 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); 2281 u32 size = msg_size(msg); 2282 struct sk_buff *eb; 2283 2284 eb = tipc_buf_acquire(size); 2285 if (eb) 2286 skb_copy_to_linear_data(eb, msg, size); 2287 return eb; 2288 } 2289 2290 /* 2291 * link_recv_changeover_msg(): Receive tunneled packet sent 2292 * via other link. Node is locked. Return extracted buffer. 2293 */ 2294 static int link_recv_changeover_msg(struct tipc_link **l_ptr, 2295 struct sk_buff **buf) 2296 { 2297 struct sk_buff *tunnel_buf = *buf; 2298 struct tipc_link *dest_link; 2299 struct tipc_msg *msg; 2300 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf); 2301 u32 msg_typ = msg_type(tunnel_msg); 2302 u32 msg_count = msg_msgcnt(tunnel_msg); 2303 2304 dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)]; 2305 if (!dest_link) 2306 goto exit; 2307 if (dest_link == *l_ptr) { 2308 pr_err("Unexpected changeover message on link <%s>\n", 2309 (*l_ptr)->name); 2310 goto exit; 2311 } 2312 *l_ptr = dest_link; 2313 msg = msg_get_wrapped(tunnel_msg); 2314 2315 if (msg_typ == DUPLICATE_MSG) { 2316 if (less(msg_seqno(msg), mod(dest_link->next_in_no))) 2317 goto exit; 2318 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2319 if (*buf == NULL) { 2320 pr_warn("%sduplicate msg dropped\n", link_co_err); 2321 goto exit; 2322 } 2323 kfree_skb(tunnel_buf); 2324 return 1; 2325 } 2326 2327 /* First original message ?: */ 2328 if (tipc_link_is_up(dest_link)) { 2329 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg, 2330 dest_link->name); 2331 tipc_link_reset(dest_link); 2332 dest_link->exp_msg_count = msg_count; 2333 if (!msg_count) 2334 goto exit; 2335 } else if (dest_link->exp_msg_count == START_CHANGEOVER) { 2336 dest_link->exp_msg_count = msg_count; 2337 if (!msg_count) 2338 goto exit; 2339 } 2340 2341 /* Receive original message */ 2342 if (dest_link->exp_msg_count == 0) { 2343 pr_warn("%sgot too many tunnelled messages\n", link_co_err); 2344 goto exit; 2345 } 2346 dest_link->exp_msg_count--; 2347 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) { 2348 goto exit; 2349 } else { 2350 *buf = buf_extract(tunnel_buf, INT_H_SIZE); 2351 if (*buf != NULL) { 2352 kfree_skb(tunnel_buf); 2353 return 1; 2354 } else { 2355 pr_warn("%soriginal msg dropped\n", link_co_err); 2356 } 2357 } 2358 exit: 2359 *buf = NULL; 2360 kfree_skb(tunnel_buf); 2361 return 0; 2362 } 2363 2364 /* 2365 * Bundler functionality: 2366 */ 2367 void tipc_link_recv_bundle(struct sk_buff *buf) 2368 { 2369 u32 msgcount = msg_msgcnt(buf_msg(buf)); 2370 u32 pos = INT_H_SIZE; 2371 struct sk_buff *obuf; 2372 2373 while (msgcount--) { 2374 obuf = buf_extract(buf, pos); 2375 if (obuf == NULL) { 2376 pr_warn("Link unable to unbundle message(s)\n"); 2377 break; 2378 } 2379 pos += align(msg_size(buf_msg(obuf))); 2380 tipc_net_route_msg(obuf); 2381 } 2382 kfree_skb(buf); 2383 } 2384 2385 /* 2386 * Fragmentation/defragmentation: 2387 */ 2388 2389 /* 2390 * link_send_long_buf: Entry for buffers needing fragmentation. 2391 * The buffer is complete, inclusive total message length. 2392 * Returns user data length. 2393 */ 2394 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 2395 { 2396 struct sk_buff *buf_chain = NULL; 2397 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain; 2398 struct tipc_msg *inmsg = buf_msg(buf); 2399 struct tipc_msg fragm_hdr; 2400 u32 insize = msg_size(inmsg); 2401 u32 dsz = msg_data_sz(inmsg); 2402 unchar *crs = buf->data; 2403 u32 rest = insize; 2404 u32 pack_sz = l_ptr->max_pkt; 2405 u32 fragm_sz = pack_sz - INT_H_SIZE; 2406 u32 fragm_no = 0; 2407 u32 destaddr; 2408 2409 if (msg_short(inmsg)) 2410 destaddr = l_ptr->addr; 2411 else 2412 destaddr = msg_destnode(inmsg); 2413 2414 /* Prepare reusable fragment header: */ 2415 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2416 INT_H_SIZE, destaddr); 2417 2418 /* Chop up message: */ 2419 while (rest > 0) { 2420 struct sk_buff *fragm; 2421 2422 if (rest <= fragm_sz) { 2423 fragm_sz = rest; 2424 msg_set_type(&fragm_hdr, LAST_FRAGMENT); 2425 } 2426 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 2427 if (fragm == NULL) { 2428 kfree_skb(buf); 2429 while (buf_chain) { 2430 buf = buf_chain; 2431 buf_chain = buf_chain->next; 2432 kfree_skb(buf); 2433 } 2434 return -ENOMEM; 2435 } 2436 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 2437 fragm_no++; 2438 msg_set_fragm_no(&fragm_hdr, fragm_no); 2439 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE); 2440 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs, 2441 fragm_sz); 2442 buf_chain_tail->next = fragm; 2443 buf_chain_tail = fragm; 2444 2445 rest -= fragm_sz; 2446 crs += fragm_sz; 2447 msg_set_type(&fragm_hdr, FRAGMENT); 2448 } 2449 kfree_skb(buf); 2450 2451 /* Append chain of fragments to send queue & send them */ 2452 l_ptr->long_msg_seq_no++; 2453 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 2454 l_ptr->stats.sent_fragments += fragm_no; 2455 l_ptr->stats.sent_fragmented++; 2456 tipc_link_push_queue(l_ptr); 2457 2458 return dsz; 2459 } 2460 2461 /* 2462 * A pending message being re-assembled must store certain values 2463 * to handle subsequent fragments correctly. The following functions 2464 * help storing these values in unused, available fields in the 2465 * pending message. This makes dynamic memory allocation unnecessary. 2466 */ 2467 static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno) 2468 { 2469 msg_set_seqno(buf_msg(buf), seqno); 2470 } 2471 2472 static u32 get_fragm_size(struct sk_buff *buf) 2473 { 2474 return msg_ack(buf_msg(buf)); 2475 } 2476 2477 static void set_fragm_size(struct sk_buff *buf, u32 sz) 2478 { 2479 msg_set_ack(buf_msg(buf), sz); 2480 } 2481 2482 static u32 get_expected_frags(struct sk_buff *buf) 2483 { 2484 return msg_bcast_ack(buf_msg(buf)); 2485 } 2486 2487 static void set_expected_frags(struct sk_buff *buf, u32 exp) 2488 { 2489 msg_set_bcast_ack(buf_msg(buf), exp); 2490 } 2491 2492 static u32 get_timer_cnt(struct sk_buff *buf) 2493 { 2494 return msg_reroute_cnt(buf_msg(buf)); 2495 } 2496 2497 static void incr_timer_cnt(struct sk_buff *buf) 2498 { 2499 msg_incr_reroute_cnt(buf_msg(buf)); 2500 } 2501 2502 /* 2503 * tipc_link_recv_fragment(): Called with node lock on. Returns 2504 * the reassembled buffer if message is complete. 2505 */ 2506 int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, 2507 struct tipc_msg **m) 2508 { 2509 struct sk_buff *prev = NULL; 2510 struct sk_buff *fbuf = *fb; 2511 struct tipc_msg *fragm = buf_msg(fbuf); 2512 struct sk_buff *pbuf = *pending; 2513 u32 long_msg_seq_no = msg_long_msgno(fragm); 2514 2515 *fb = NULL; 2516 2517 /* Is there an incomplete message waiting for this fragment? */ 2518 while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) || 2519 (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) { 2520 prev = pbuf; 2521 pbuf = pbuf->next; 2522 } 2523 2524 if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) { 2525 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm); 2526 u32 msg_sz = msg_size(imsg); 2527 u32 fragm_sz = msg_data_sz(fragm); 2528 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz); 2529 u32 max = TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE; 2530 if (msg_type(imsg) == TIPC_MCAST_MSG) 2531 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE; 2532 if (msg_size(imsg) > max) { 2533 kfree_skb(fbuf); 2534 return 0; 2535 } 2536 pbuf = tipc_buf_acquire(msg_size(imsg)); 2537 if (pbuf != NULL) { 2538 pbuf->next = *pending; 2539 *pending = pbuf; 2540 skb_copy_to_linear_data(pbuf, imsg, 2541 msg_data_sz(fragm)); 2542 /* Prepare buffer for subsequent fragments. */ 2543 set_long_msg_seqno(pbuf, long_msg_seq_no); 2544 set_fragm_size(pbuf, fragm_sz); 2545 set_expected_frags(pbuf, exp_fragm_cnt - 1); 2546 } else { 2547 pr_debug("Link unable to reassemble fragmented message\n"); 2548 kfree_skb(fbuf); 2549 return -1; 2550 } 2551 kfree_skb(fbuf); 2552 return 0; 2553 } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) { 2554 u32 dsz = msg_data_sz(fragm); 2555 u32 fsz = get_fragm_size(pbuf); 2556 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz); 2557 u32 exp_frags = get_expected_frags(pbuf) - 1; 2558 skb_copy_to_linear_data_offset(pbuf, crs, 2559 msg_data(fragm), dsz); 2560 kfree_skb(fbuf); 2561 2562 /* Is message complete? */ 2563 if (exp_frags == 0) { 2564 if (prev) 2565 prev->next = pbuf->next; 2566 else 2567 *pending = pbuf->next; 2568 msg_reset_reroute_cnt(buf_msg(pbuf)); 2569 *fb = pbuf; 2570 *m = buf_msg(pbuf); 2571 return 1; 2572 } 2573 set_expected_frags(pbuf, exp_frags); 2574 return 0; 2575 } 2576 kfree_skb(fbuf); 2577 return 0; 2578 } 2579 2580 /** 2581 * link_check_defragm_bufs - flush stale incoming message fragments 2582 * @l_ptr: pointer to link 2583 */ 2584 static void link_check_defragm_bufs(struct tipc_link *l_ptr) 2585 { 2586 struct sk_buff *prev = NULL; 2587 struct sk_buff *next = NULL; 2588 struct sk_buff *buf = l_ptr->defragm_buf; 2589 2590 if (!buf) 2591 return; 2592 if (!link_working_working(l_ptr)) 2593 return; 2594 while (buf) { 2595 u32 cnt = get_timer_cnt(buf); 2596 2597 next = buf->next; 2598 if (cnt < 4) { 2599 incr_timer_cnt(buf); 2600 prev = buf; 2601 } else { 2602 if (prev) 2603 prev->next = buf->next; 2604 else 2605 l_ptr->defragm_buf = buf->next; 2606 kfree_skb(buf); 2607 } 2608 buf = next; 2609 } 2610 } 2611 2612 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2613 { 2614 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2615 return; 2616 2617 l_ptr->tolerance = tolerance; 2618 l_ptr->continuity_interval = 2619 ((tolerance / 4) > 500) ? 500 : tolerance / 4; 2620 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); 2621 } 2622 2623 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) 2624 { 2625 /* Data messages from this node, inclusive FIRST_FRAGM */ 2626 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; 2627 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; 2628 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; 2629 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; 2630 /* Transiting data messages,inclusive FIRST_FRAGM */ 2631 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; 2632 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; 2633 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; 2634 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; 2635 l_ptr->queue_limit[CONN_MANAGER] = 1200; 2636 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; 2637 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; 2638 /* FRAGMENT and LAST_FRAGMENT packets */ 2639 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; 2640 } 2641 2642 /** 2643 * link_find_link - locate link by name 2644 * @name: ptr to link name string 2645 * @node: ptr to area to be filled with ptr to associated node 2646 * 2647 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted; 2648 * this also prevents link deletion. 2649 * 2650 * Returns pointer to link (or 0 if invalid link name). 2651 */ 2652 static struct tipc_link *link_find_link(const char *name, 2653 struct tipc_node **node) 2654 { 2655 struct tipc_link_name link_name_parts; 2656 struct tipc_bearer *b_ptr; 2657 struct tipc_link *l_ptr; 2658 2659 if (!link_name_validate(name, &link_name_parts)) 2660 return NULL; 2661 2662 b_ptr = tipc_bearer_find_interface(link_name_parts.if_local); 2663 if (!b_ptr) 2664 return NULL; 2665 2666 *node = tipc_node_find(link_name_parts.addr_peer); 2667 if (!*node) 2668 return NULL; 2669 2670 l_ptr = (*node)->links[b_ptr->identity]; 2671 if (!l_ptr || strcmp(l_ptr->name, name)) 2672 return NULL; 2673 2674 return l_ptr; 2675 } 2676 2677 /** 2678 * link_value_is_valid -- validate proposed link tolerance/priority/window 2679 * 2680 * @cmd: value type (TIPC_CMD_SET_LINK_*) 2681 * @new_value: the new value 2682 * 2683 * Returns 1 if value is within range, 0 if not. 2684 */ 2685 static int link_value_is_valid(u16 cmd, u32 new_value) 2686 { 2687 switch (cmd) { 2688 case TIPC_CMD_SET_LINK_TOL: 2689 return (new_value >= TIPC_MIN_LINK_TOL) && 2690 (new_value <= TIPC_MAX_LINK_TOL); 2691 case TIPC_CMD_SET_LINK_PRI: 2692 return (new_value <= TIPC_MAX_LINK_PRI); 2693 case TIPC_CMD_SET_LINK_WINDOW: 2694 return (new_value >= TIPC_MIN_LINK_WIN) && 2695 (new_value <= TIPC_MAX_LINK_WIN); 2696 } 2697 return 0; 2698 } 2699 2700 /** 2701 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media 2702 * @name: ptr to link, bearer, or media name 2703 * @new_value: new value of link, bearer, or media setting 2704 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2705 * 2706 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2707 * 2708 * Returns 0 if value updated and negative value on error. 2709 */ 2710 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) 2711 { 2712 struct tipc_node *node; 2713 struct tipc_link *l_ptr; 2714 struct tipc_bearer *b_ptr; 2715 struct tipc_media *m_ptr; 2716 2717 l_ptr = link_find_link(name, &node); 2718 if (l_ptr) { 2719 /* 2720 * acquire node lock for tipc_link_send_proto_msg(). 2721 * see "TIPC locking policy" in net.c. 2722 */ 2723 tipc_node_lock(node); 2724 switch (cmd) { 2725 case TIPC_CMD_SET_LINK_TOL: 2726 link_set_supervision_props(l_ptr, new_value); 2727 tipc_link_send_proto_msg(l_ptr, 2728 STATE_MSG, 0, 0, new_value, 0, 0); 2729 break; 2730 case TIPC_CMD_SET_LINK_PRI: 2731 l_ptr->priority = new_value; 2732 tipc_link_send_proto_msg(l_ptr, 2733 STATE_MSG, 0, 0, 0, new_value, 0); 2734 break; 2735 case TIPC_CMD_SET_LINK_WINDOW: 2736 tipc_link_set_queue_limits(l_ptr, new_value); 2737 break; 2738 } 2739 tipc_node_unlock(node); 2740 return 0; 2741 } 2742 2743 b_ptr = tipc_bearer_find(name); 2744 if (b_ptr) { 2745 switch (cmd) { 2746 case TIPC_CMD_SET_LINK_TOL: 2747 b_ptr->tolerance = new_value; 2748 return 0; 2749 case TIPC_CMD_SET_LINK_PRI: 2750 b_ptr->priority = new_value; 2751 return 0; 2752 case TIPC_CMD_SET_LINK_WINDOW: 2753 b_ptr->window = new_value; 2754 return 0; 2755 } 2756 return -EINVAL; 2757 } 2758 2759 m_ptr = tipc_media_find(name); 2760 if (!m_ptr) 2761 return -ENODEV; 2762 switch (cmd) { 2763 case TIPC_CMD_SET_LINK_TOL: 2764 m_ptr->tolerance = new_value; 2765 return 0; 2766 case TIPC_CMD_SET_LINK_PRI: 2767 m_ptr->priority = new_value; 2768 return 0; 2769 case TIPC_CMD_SET_LINK_WINDOW: 2770 m_ptr->window = new_value; 2771 return 0; 2772 } 2773 return -EINVAL; 2774 } 2775 2776 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, 2777 u16 cmd) 2778 { 2779 struct tipc_link_config *args; 2780 u32 new_value; 2781 int res; 2782 2783 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) 2784 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2785 2786 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area); 2787 new_value = ntohl(args->value); 2788 2789 if (!link_value_is_valid(cmd, new_value)) 2790 return tipc_cfg_reply_error_string( 2791 "cannot change, value invalid"); 2792 2793 if (!strcmp(args->name, tipc_bclink_name)) { 2794 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && 2795 (tipc_bclink_set_queue_limits(new_value) == 0)) 2796 return tipc_cfg_reply_none(); 2797 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 2798 " (cannot change setting on broadcast link)"); 2799 } 2800 2801 read_lock_bh(&tipc_net_lock); 2802 res = link_cmd_set_value(args->name, new_value, cmd); 2803 read_unlock_bh(&tipc_net_lock); 2804 if (res) 2805 return tipc_cfg_reply_error_string("cannot change link setting"); 2806 2807 return tipc_cfg_reply_none(); 2808 } 2809 2810 /** 2811 * link_reset_statistics - reset link statistics 2812 * @l_ptr: pointer to link 2813 */ 2814 static void link_reset_statistics(struct tipc_link *l_ptr) 2815 { 2816 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 2817 l_ptr->stats.sent_info = l_ptr->next_out_no; 2818 l_ptr->stats.recv_info = l_ptr->next_in_no; 2819 } 2820 2821 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) 2822 { 2823 char *link_name; 2824 struct tipc_link *l_ptr; 2825 struct tipc_node *node; 2826 2827 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2828 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2829 2830 link_name = (char *)TLV_DATA(req_tlv_area); 2831 if (!strcmp(link_name, tipc_bclink_name)) { 2832 if (tipc_bclink_reset_stats()) 2833 return tipc_cfg_reply_error_string("link not found"); 2834 return tipc_cfg_reply_none(); 2835 } 2836 2837 read_lock_bh(&tipc_net_lock); 2838 l_ptr = link_find_link(link_name, &node); 2839 if (!l_ptr) { 2840 read_unlock_bh(&tipc_net_lock); 2841 return tipc_cfg_reply_error_string("link not found"); 2842 } 2843 2844 tipc_node_lock(node); 2845 link_reset_statistics(l_ptr); 2846 tipc_node_unlock(node); 2847 read_unlock_bh(&tipc_net_lock); 2848 return tipc_cfg_reply_none(); 2849 } 2850 2851 /** 2852 * percent - convert count to a percentage of total (rounding up or down) 2853 */ 2854 static u32 percent(u32 count, u32 total) 2855 { 2856 return (count * 100 + (total / 2)) / total; 2857 } 2858 2859 /** 2860 * tipc_link_stats - print link statistics 2861 * @name: link name 2862 * @buf: print buffer area 2863 * @buf_size: size of print buffer area 2864 * 2865 * Returns length of print buffer data string (or 0 if error) 2866 */ 2867 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) 2868 { 2869 struct tipc_link *l; 2870 struct tipc_stats *s; 2871 struct tipc_node *node; 2872 char *status; 2873 u32 profile_total = 0; 2874 int ret; 2875 2876 if (!strcmp(name, tipc_bclink_name)) 2877 return tipc_bclink_stats(buf, buf_size); 2878 2879 read_lock_bh(&tipc_net_lock); 2880 l = link_find_link(name, &node); 2881 if (!l) { 2882 read_unlock_bh(&tipc_net_lock); 2883 return 0; 2884 } 2885 tipc_node_lock(node); 2886 s = &l->stats; 2887 2888 if (tipc_link_is_active(l)) 2889 status = "ACTIVE"; 2890 else if (tipc_link_is_up(l)) 2891 status = "STANDBY"; 2892 else 2893 status = "DEFUNCT"; 2894 2895 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n" 2896 " %s MTU:%u Priority:%u Tolerance:%u ms" 2897 " Window:%u packets\n", 2898 l->name, status, l->max_pkt, l->priority, 2899 l->tolerance, l->queue_limit[0]); 2900 2901 ret += tipc_snprintf(buf + ret, buf_size - ret, 2902 " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 2903 l->next_in_no - s->recv_info, s->recv_fragments, 2904 s->recv_fragmented, s->recv_bundles, 2905 s->recv_bundled); 2906 2907 ret += tipc_snprintf(buf + ret, buf_size - ret, 2908 " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 2909 l->next_out_no - s->sent_info, s->sent_fragments, 2910 s->sent_fragmented, s->sent_bundles, 2911 s->sent_bundled); 2912 2913 profile_total = s->msg_length_counts; 2914 if (!profile_total) 2915 profile_total = 1; 2916 2917 ret += tipc_snprintf(buf + ret, buf_size - ret, 2918 " TX profile sample:%u packets average:%u octets\n" 2919 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% " 2920 "-16384:%u%% -32768:%u%% -66000:%u%%\n", 2921 s->msg_length_counts, 2922 s->msg_lengths_total / profile_total, 2923 percent(s->msg_length_profile[0], profile_total), 2924 percent(s->msg_length_profile[1], profile_total), 2925 percent(s->msg_length_profile[2], profile_total), 2926 percent(s->msg_length_profile[3], profile_total), 2927 percent(s->msg_length_profile[4], profile_total), 2928 percent(s->msg_length_profile[5], profile_total), 2929 percent(s->msg_length_profile[6], profile_total)); 2930 2931 ret += tipc_snprintf(buf + ret, buf_size - ret, 2932 " RX states:%u probes:%u naks:%u defs:%u" 2933 " dups:%u\n", s->recv_states, s->recv_probes, 2934 s->recv_nacks, s->deferred_recv, s->duplicates); 2935 2936 ret += tipc_snprintf(buf + ret, buf_size - ret, 2937 " TX states:%u probes:%u naks:%u acks:%u" 2938 " dups:%u\n", s->sent_states, s->sent_probes, 2939 s->sent_nacks, s->sent_acks, s->retransmitted); 2940 2941 ret += tipc_snprintf(buf + ret, buf_size - ret, 2942 " Congestion bearer:%u link:%u Send queue" 2943 " max:%u avg:%u\n", s->bearer_congs, s->link_congs, 2944 s->max_queue_sz, s->queue_sz_counts ? 2945 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2946 2947 tipc_node_unlock(node); 2948 read_unlock_bh(&tipc_net_lock); 2949 return ret; 2950 } 2951 2952 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) 2953 { 2954 struct sk_buff *buf; 2955 struct tlv_desc *rep_tlv; 2956 int str_len; 2957 int pb_len; 2958 char *pb; 2959 2960 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2961 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2962 2963 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); 2964 if (!buf) 2965 return NULL; 2966 2967 rep_tlv = (struct tlv_desc *)buf->data; 2968 pb = TLV_DATA(rep_tlv); 2969 pb_len = ULTRA_STRING_MAX_LEN; 2970 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), 2971 pb, pb_len); 2972 if (!str_len) { 2973 kfree_skb(buf); 2974 return tipc_cfg_reply_error_string("link not found"); 2975 } 2976 str_len += 1; /* for "\0" */ 2977 skb_put(buf, TLV_SPACE(str_len)); 2978 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 2979 2980 return buf; 2981 } 2982 2983 /** 2984 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination 2985 * @dest: network address of destination node 2986 * @selector: used to select from set of active links 2987 * 2988 * If no active link can be found, uses default maximum packet size. 2989 */ 2990 u32 tipc_link_get_max_pkt(u32 dest, u32 selector) 2991 { 2992 struct tipc_node *n_ptr; 2993 struct tipc_link *l_ptr; 2994 u32 res = MAX_PKT_DEFAULT; 2995 2996 if (dest == tipc_own_addr) 2997 return MAX_MSG_SIZE; 2998 2999 read_lock_bh(&tipc_net_lock); 3000 n_ptr = tipc_node_find(dest); 3001 if (n_ptr) { 3002 tipc_node_lock(n_ptr); 3003 l_ptr = n_ptr->active_links[selector & 1]; 3004 if (l_ptr) 3005 res = l_ptr->max_pkt; 3006 tipc_node_unlock(n_ptr); 3007 } 3008 read_unlock_bh(&tipc_net_lock); 3009 return res; 3010 } 3011 3012 static void link_print(struct tipc_link *l_ptr, const char *str) 3013 { 3014 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name); 3015 3016 if (link_working_unknown(l_ptr)) 3017 pr_cont(":WU\n"); 3018 else if (link_reset_reset(l_ptr)) 3019 pr_cont(":RR\n"); 3020 else if (link_reset_unknown(l_ptr)) 3021 pr_cont(":RU\n"); 3022 else if (link_working_working(l_ptr)) 3023 pr_cont(":WW\n"); 3024 else 3025 pr_cont("\n"); 3026 } 3027