1 /* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, Ericsson AB 5 * Copyright (c) 2005-2006, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include "core.h" 38 #include "config.h" 39 #include "node.h" 40 #include "cluster.h" 41 #include "net.h" 42 #include "addr.h" 43 #include "node_subscr.h" 44 #include "link.h" 45 #include "port.h" 46 #include "bearer.h" 47 #include "name_distr.h" 48 49 void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str); 50 static void node_lost_contact(struct tipc_node *n_ptr); 51 static void node_established_contact(struct tipc_node *n_ptr); 52 53 /* sorted list of nodes within cluster */ 54 static struct tipc_node *tipc_nodes = NULL; 55 56 static DEFINE_SPINLOCK(node_create_lock); 57 58 u32 tipc_own_tag = 0; 59 60 /** 61 * tipc_node_create - create neighboring node 62 * 63 * Currently, this routine is called by neighbor discovery code, which holds 64 * net_lock for reading only. We must take node_create_lock to ensure a node 65 * isn't created twice if two different bearers discover the node at the same 66 * time. (It would be preferable to switch to holding net_lock in write mode, 67 * but this is a non-trivial change.) 68 */ 69 70 struct tipc_node *tipc_node_create(u32 addr) 71 { 72 struct cluster *c_ptr; 73 struct tipc_node *n_ptr; 74 struct tipc_node **curr_node; 75 76 spin_lock_bh(&node_create_lock); 77 78 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 79 if (addr < n_ptr->addr) 80 break; 81 if (addr == n_ptr->addr) { 82 spin_unlock_bh(&node_create_lock); 83 return n_ptr; 84 } 85 } 86 87 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); 88 if (!n_ptr) { 89 spin_unlock_bh(&node_create_lock); 90 warn("Node creation failed, no memory\n"); 91 return NULL; 92 } 93 94 c_ptr = tipc_cltr_find(addr); 95 if (!c_ptr) { 96 c_ptr = tipc_cltr_create(addr); 97 } 98 if (!c_ptr) { 99 spin_unlock_bh(&node_create_lock); 100 kfree(n_ptr); 101 return NULL; 102 } 103 104 n_ptr->addr = addr; 105 spin_lock_init(&n_ptr->lock); 106 INIT_LIST_HEAD(&n_ptr->nsub); 107 n_ptr->owner = c_ptr; 108 tipc_cltr_attach_node(c_ptr, n_ptr); 109 n_ptr->last_router = -1; 110 111 /* Insert node into ordered list */ 112 for (curr_node = &tipc_nodes; *curr_node; 113 curr_node = &(*curr_node)->next) { 114 if (addr < (*curr_node)->addr) { 115 n_ptr->next = *curr_node; 116 break; 117 } 118 } 119 (*curr_node) = n_ptr; 120 spin_unlock_bh(&node_create_lock); 121 return n_ptr; 122 } 123 124 void tipc_node_delete(struct tipc_node *n_ptr) 125 { 126 if (!n_ptr) 127 return; 128 129 dbg("node %x deleted\n", n_ptr->addr); 130 kfree(n_ptr); 131 } 132 133 134 /** 135 * tipc_node_link_up - handle addition of link 136 * 137 * Link becomes active (alone or shared) or standby, depending on its priority. 138 */ 139 140 void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr) 141 { 142 struct link **active = &n_ptr->active_links[0]; 143 144 n_ptr->working_links++; 145 146 info("Established link <%s> on network plane %c\n", 147 l_ptr->name, l_ptr->b_ptr->net_plane); 148 149 if (!active[0]) { 150 dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]); 151 active[0] = active[1] = l_ptr; 152 node_established_contact(n_ptr); 153 return; 154 } 155 if (l_ptr->priority < active[0]->priority) { 156 info("New link <%s> becomes standby\n", l_ptr->name); 157 return; 158 } 159 tipc_link_send_duplicate(active[0], l_ptr); 160 if (l_ptr->priority == active[0]->priority) { 161 active[0] = l_ptr; 162 return; 163 } 164 info("Old link <%s> becomes standby\n", active[0]->name); 165 if (active[1] != active[0]) 166 info("Old link <%s> becomes standby\n", active[1]->name); 167 active[0] = active[1] = l_ptr; 168 } 169 170 /** 171 * node_select_active_links - select active link 172 */ 173 174 static void node_select_active_links(struct tipc_node *n_ptr) 175 { 176 struct link **active = &n_ptr->active_links[0]; 177 u32 i; 178 u32 highest_prio = 0; 179 180 active[0] = active[1] = NULL; 181 182 for (i = 0; i < MAX_BEARERS; i++) { 183 struct link *l_ptr = n_ptr->links[i]; 184 185 if (!l_ptr || !tipc_link_is_up(l_ptr) || 186 (l_ptr->priority < highest_prio)) 187 continue; 188 189 if (l_ptr->priority > highest_prio) { 190 highest_prio = l_ptr->priority; 191 active[0] = active[1] = l_ptr; 192 } else { 193 active[1] = l_ptr; 194 } 195 } 196 } 197 198 /** 199 * tipc_node_link_down - handle loss of link 200 */ 201 202 void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr) 203 { 204 struct link **active; 205 206 n_ptr->working_links--; 207 208 if (!tipc_link_is_active(l_ptr)) { 209 info("Lost standby link <%s> on network plane %c\n", 210 l_ptr->name, l_ptr->b_ptr->net_plane); 211 return; 212 } 213 info("Lost link <%s> on network plane %c\n", 214 l_ptr->name, l_ptr->b_ptr->net_plane); 215 216 active = &n_ptr->active_links[0]; 217 if (active[0] == l_ptr) 218 active[0] = active[1]; 219 if (active[1] == l_ptr) 220 active[1] = active[0]; 221 if (active[0] == l_ptr) 222 node_select_active_links(n_ptr); 223 if (tipc_node_is_up(n_ptr)) 224 tipc_link_changeover(l_ptr); 225 else 226 node_lost_contact(n_ptr); 227 } 228 229 int tipc_node_has_active_links(struct tipc_node *n_ptr) 230 { 231 return n_ptr->active_links[0] != NULL; 232 } 233 234 int tipc_node_has_redundant_links(struct tipc_node *n_ptr) 235 { 236 return n_ptr->working_links > 1; 237 } 238 239 static int tipc_node_has_active_routes(struct tipc_node *n_ptr) 240 { 241 return n_ptr && (n_ptr->last_router >= 0); 242 } 243 244 int tipc_node_is_up(struct tipc_node *n_ptr) 245 { 246 return tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr); 247 } 248 249 struct tipc_node *tipc_node_attach_link(struct link *l_ptr) 250 { 251 struct tipc_node *n_ptr = tipc_node_find(l_ptr->addr); 252 253 if (!n_ptr) 254 n_ptr = tipc_node_create(l_ptr->addr); 255 if (n_ptr) { 256 u32 bearer_id = l_ptr->b_ptr->identity; 257 char addr_string[16]; 258 259 if (n_ptr->link_cnt >= 2) { 260 err("Attempt to create third link to %s\n", 261 tipc_addr_string_fill(addr_string, n_ptr->addr)); 262 return NULL; 263 } 264 265 if (!n_ptr->links[bearer_id]) { 266 n_ptr->links[bearer_id] = l_ptr; 267 tipc_net.zones[tipc_zone(l_ptr->addr)]->links++; 268 n_ptr->link_cnt++; 269 return n_ptr; 270 } 271 err("Attempt to establish second link on <%s> to %s\n", 272 l_ptr->b_ptr->publ.name, 273 tipc_addr_string_fill(addr_string, l_ptr->addr)); 274 } 275 return NULL; 276 } 277 278 void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr) 279 { 280 n_ptr->links[l_ptr->b_ptr->identity] = NULL; 281 tipc_net.zones[tipc_zone(l_ptr->addr)]->links--; 282 n_ptr->link_cnt--; 283 } 284 285 /* 286 * Routing table management - five cases to handle: 287 * 288 * 1: A link towards a zone/cluster external node comes up. 289 * => Send a multicast message updating routing tables of all 290 * system nodes within own cluster that the new destination 291 * can be reached via this node. 292 * (node.establishedContact()=>cluster.multicastNewRoute()) 293 * 294 * 2: A link towards a slave node comes up. 295 * => Send a multicast message updating routing tables of all 296 * system nodes within own cluster that the new destination 297 * can be reached via this node. 298 * (node.establishedContact()=>cluster.multicastNewRoute()) 299 * => Send a message to the slave node about existence 300 * of all system nodes within cluster: 301 * (node.establishedContact()=>cluster.sendLocalRoutes()) 302 * 303 * 3: A new cluster local system node becomes available. 304 * => Send message(s) to this particular node containing 305 * information about all cluster external and slave 306 * nodes which can be reached via this node. 307 * (node.establishedContact()==>network.sendExternalRoutes()) 308 * (node.establishedContact()==>network.sendSlaveRoutes()) 309 * => Send messages to all directly connected slave nodes 310 * containing information about the existence of the new node 311 * (node.establishedContact()=>cluster.multicastNewRoute()) 312 * 313 * 4: The link towards a zone/cluster external node or slave 314 * node goes down. 315 * => Send a multcast message updating routing tables of all 316 * nodes within cluster that the new destination can not any 317 * longer be reached via this node. 318 * (node.lostAllLinks()=>cluster.bcastLostRoute()) 319 * 320 * 5: A cluster local system node becomes unavailable. 321 * => Remove all references to this node from the local 322 * routing tables. Note: This is a completely node 323 * local operation. 324 * (node.lostAllLinks()=>network.removeAsRouter()) 325 * => Send messages to all directly connected slave nodes 326 * containing information about loss of the node 327 * (node.establishedContact()=>cluster.multicastLostRoute()) 328 * 329 */ 330 331 static void node_established_contact(struct tipc_node *n_ptr) 332 { 333 struct cluster *c_ptr; 334 335 dbg("node_established_contact:-> %x\n", n_ptr->addr); 336 if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) { 337 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 338 } 339 340 /* Syncronize broadcast acks */ 341 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 342 343 if (is_slave(tipc_own_addr)) 344 return; 345 if (!in_own_cluster(n_ptr->addr)) { 346 /* Usage case 1 (see above) */ 347 c_ptr = tipc_cltr_find(tipc_own_addr); 348 if (!c_ptr) 349 c_ptr = tipc_cltr_create(tipc_own_addr); 350 if (c_ptr) 351 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, 352 tipc_max_nodes); 353 return; 354 } 355 356 c_ptr = n_ptr->owner; 357 if (is_slave(n_ptr->addr)) { 358 /* Usage case 2 (see above) */ 359 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes); 360 tipc_cltr_send_local_routes(c_ptr, n_ptr->addr); 361 return; 362 } 363 364 if (n_ptr->bclink.supported) { 365 tipc_nmap_add(&tipc_cltr_bcast_nodes, n_ptr->addr); 366 if (n_ptr->addr < tipc_own_addr) 367 tipc_own_tag++; 368 } 369 370 /* Case 3 (see above) */ 371 tipc_net_send_external_routes(n_ptr->addr); 372 tipc_cltr_send_slave_routes(c_ptr, n_ptr->addr); 373 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE, 374 tipc_highest_allowed_slave); 375 } 376 377 static void node_cleanup_finished(unsigned long node_addr) 378 { 379 struct tipc_node *n_ptr; 380 381 read_lock_bh(&tipc_net_lock); 382 n_ptr = tipc_node_find(node_addr); 383 if (n_ptr) { 384 tipc_node_lock(n_ptr); 385 n_ptr->cleanup_required = 0; 386 tipc_node_unlock(n_ptr); 387 } 388 read_unlock_bh(&tipc_net_lock); 389 } 390 391 static void node_lost_contact(struct tipc_node *n_ptr) 392 { 393 struct cluster *c_ptr; 394 struct tipc_node_subscr *ns, *tns; 395 char addr_string[16]; 396 u32 i; 397 398 /* Clean up broadcast reception remains */ 399 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0; 400 while (n_ptr->bclink.deferred_head) { 401 struct sk_buff* buf = n_ptr->bclink.deferred_head; 402 n_ptr->bclink.deferred_head = buf->next; 403 buf_discard(buf); 404 } 405 if (n_ptr->bclink.defragm) { 406 buf_discard(n_ptr->bclink.defragm); 407 n_ptr->bclink.defragm = NULL; 408 } 409 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { 410 tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000)); 411 } 412 413 /* Update routing tables */ 414 if (is_slave(tipc_own_addr)) { 415 tipc_net_remove_as_router(n_ptr->addr); 416 } else { 417 if (!in_own_cluster(n_ptr->addr)) { 418 /* Case 4 (see above) */ 419 c_ptr = tipc_cltr_find(tipc_own_addr); 420 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1, 421 tipc_max_nodes); 422 } else { 423 /* Case 5 (see above) */ 424 c_ptr = tipc_cltr_find(n_ptr->addr); 425 if (is_slave(n_ptr->addr)) { 426 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1, 427 tipc_max_nodes); 428 } else { 429 if (n_ptr->bclink.supported) { 430 tipc_nmap_remove(&tipc_cltr_bcast_nodes, 431 n_ptr->addr); 432 if (n_ptr->addr < tipc_own_addr) 433 tipc_own_tag--; 434 } 435 tipc_net_remove_as_router(n_ptr->addr); 436 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 437 LOWEST_SLAVE, 438 tipc_highest_allowed_slave); 439 } 440 } 441 } 442 if (tipc_node_has_active_routes(n_ptr)) 443 return; 444 445 info("Lost contact with %s\n", 446 tipc_addr_string_fill(addr_string, n_ptr->addr)); 447 448 /* Abort link changeover */ 449 for (i = 0; i < MAX_BEARERS; i++) { 450 struct link *l_ptr = n_ptr->links[i]; 451 if (!l_ptr) 452 continue; 453 l_ptr->reset_checkpoint = l_ptr->next_in_no; 454 l_ptr->exp_msg_count = 0; 455 tipc_link_reset_fragments(l_ptr); 456 } 457 458 /* Notify subscribers */ 459 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) { 460 ns->node = NULL; 461 list_del_init(&ns->nodesub_list); 462 tipc_k_signal((Handler)ns->handle_node_down, 463 (unsigned long)ns->usr_handle); 464 } 465 466 /* Prevent re-contact with node until all cleanup is done */ 467 468 n_ptr->cleanup_required = 1; 469 tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr); 470 } 471 472 /** 473 * tipc_node_select_next_hop - find the next-hop node for a message 474 * 475 * Called by when cluster local lookup has failed. 476 */ 477 478 struct tipc_node *tipc_node_select_next_hop(u32 addr, u32 selector) 479 { 480 struct tipc_node *n_ptr; 481 u32 router_addr; 482 483 if (!tipc_addr_domain_valid(addr)) 484 return NULL; 485 486 /* Look for direct link to destination processsor */ 487 n_ptr = tipc_node_find(addr); 488 if (n_ptr && tipc_node_has_active_links(n_ptr)) 489 return n_ptr; 490 491 /* Cluster local system nodes *must* have direct links */ 492 if (!is_slave(addr) && in_own_cluster(addr)) 493 return NULL; 494 495 /* Look for cluster local router with direct link to node */ 496 router_addr = tipc_node_select_router(n_ptr, selector); 497 if (router_addr) 498 return tipc_node_select(router_addr, selector); 499 500 /* Slave nodes can only be accessed within own cluster via a 501 known router with direct link -- if no router was found,give up */ 502 if (is_slave(addr)) 503 return NULL; 504 505 /* Inter zone/cluster -- find any direct link to remote cluster */ 506 addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 507 n_ptr = tipc_net_select_remote_node(addr, selector); 508 if (n_ptr && tipc_node_has_active_links(n_ptr)) 509 return n_ptr; 510 511 /* Last resort -- look for any router to anywhere in remote zone */ 512 router_addr = tipc_net_select_router(addr, selector); 513 if (router_addr) 514 return tipc_node_select(router_addr, selector); 515 516 return NULL; 517 } 518 519 /** 520 * tipc_node_select_router - select router to reach specified node 521 * 522 * Uses a deterministic and fair algorithm for selecting router node. 523 */ 524 525 u32 tipc_node_select_router(struct tipc_node *n_ptr, u32 ref) 526 { 527 u32 ulim; 528 u32 mask; 529 u32 start; 530 u32 r; 531 532 if (!n_ptr) 533 return 0; 534 535 if (n_ptr->last_router < 0) 536 return 0; 537 ulim = ((n_ptr->last_router + 1) * 32) - 1; 538 539 /* Start entry must be random */ 540 mask = tipc_max_nodes; 541 while (mask > ulim) 542 mask >>= 1; 543 start = ref & mask; 544 r = start; 545 546 /* Lookup upwards with wrap-around */ 547 do { 548 if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1) 549 break; 550 } while (++r <= ulim); 551 if (r > ulim) { 552 r = 1; 553 do { 554 if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1) 555 break; 556 } while (++r < start); 557 assert(r != start); 558 } 559 assert(r && (r <= ulim)); 560 return tipc_addr(own_zone(), own_cluster(), r); 561 } 562 563 void tipc_node_add_router(struct tipc_node *n_ptr, u32 router) 564 { 565 u32 r_num = tipc_node(router); 566 567 n_ptr->routers[r_num / 32] = 568 ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]); 569 n_ptr->last_router = tipc_max_nodes / 32; 570 while ((--n_ptr->last_router >= 0) && 571 !n_ptr->routers[n_ptr->last_router]); 572 } 573 574 void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router) 575 { 576 u32 r_num = tipc_node(router); 577 578 if (n_ptr->last_router < 0) 579 return; /* No routes */ 580 581 n_ptr->routers[r_num / 32] = 582 ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32])); 583 n_ptr->last_router = tipc_max_nodes / 32; 584 while ((--n_ptr->last_router >= 0) && 585 !n_ptr->routers[n_ptr->last_router]); 586 587 if (!tipc_node_is_up(n_ptr)) 588 node_lost_contact(n_ptr); 589 } 590 591 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 592 { 593 u32 domain; 594 struct sk_buff *buf; 595 struct tipc_node *n_ptr; 596 struct tipc_node_info node_info; 597 u32 payload_size; 598 599 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 600 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 601 602 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 603 if (!tipc_addr_domain_valid(domain)) 604 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 605 " (network address)"); 606 607 read_lock_bh(&tipc_net_lock); 608 if (!tipc_nodes) { 609 read_unlock_bh(&tipc_net_lock); 610 return tipc_cfg_reply_none(); 611 } 612 613 /* For now, get space for all other nodes 614 (will need to modify this when slave nodes are supported */ 615 616 payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1); 617 if (payload_size > 32768u) { 618 read_unlock_bh(&tipc_net_lock); 619 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 620 " (too many nodes)"); 621 } 622 buf = tipc_cfg_reply_alloc(payload_size); 623 if (!buf) { 624 read_unlock_bh(&tipc_net_lock); 625 return NULL; 626 } 627 628 /* Add TLVs for all nodes in scope */ 629 630 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 631 if (!tipc_in_scope(domain, n_ptr->addr)) 632 continue; 633 node_info.addr = htonl(n_ptr->addr); 634 node_info.up = htonl(tipc_node_is_up(n_ptr)); 635 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 636 &node_info, sizeof(node_info)); 637 } 638 639 read_unlock_bh(&tipc_net_lock); 640 return buf; 641 } 642 643 struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) 644 { 645 u32 domain; 646 struct sk_buff *buf; 647 struct tipc_node *n_ptr; 648 struct tipc_link_info link_info; 649 u32 payload_size; 650 651 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 652 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 653 654 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 655 if (!tipc_addr_domain_valid(domain)) 656 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 657 " (network address)"); 658 659 if (tipc_mode != TIPC_NET_MODE) 660 return tipc_cfg_reply_none(); 661 662 read_lock_bh(&tipc_net_lock); 663 664 /* Get space for all unicast links + multicast link */ 665 666 payload_size = TLV_SPACE(sizeof(link_info)) * 667 (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1); 668 if (payload_size > 32768u) { 669 read_unlock_bh(&tipc_net_lock); 670 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 671 " (too many links)"); 672 } 673 buf = tipc_cfg_reply_alloc(payload_size); 674 if (!buf) { 675 read_unlock_bh(&tipc_net_lock); 676 return NULL; 677 } 678 679 /* Add TLV for broadcast link */ 680 681 link_info.dest = htonl(tipc_own_addr & 0xfffff00); 682 link_info.up = htonl(1); 683 strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME); 684 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 685 686 /* Add TLVs for any other links in scope */ 687 688 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 689 u32 i; 690 691 if (!tipc_in_scope(domain, n_ptr->addr)) 692 continue; 693 tipc_node_lock(n_ptr); 694 for (i = 0; i < MAX_BEARERS; i++) { 695 if (!n_ptr->links[i]) 696 continue; 697 link_info.dest = htonl(n_ptr->addr); 698 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i])); 699 strcpy(link_info.str, n_ptr->links[i]->name); 700 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 701 &link_info, sizeof(link_info)); 702 } 703 tipc_node_unlock(n_ptr); 704 } 705 706 read_unlock_bh(&tipc_net_lock); 707 return buf; 708 } 709