xref: /linux/net/tipc/node.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * net/tipc/node.c: TIPC node management routines
3  *
4  * Copyright (c) 2000-2006, Ericsson AB
5  * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "config.h"
39 #include "node.h"
40 #include "name_distr.h"
41 
42 static void node_lost_contact(struct tipc_node *n_ptr);
43 static void node_established_contact(struct tipc_node *n_ptr);
44 
45 static DEFINE_SPINLOCK(node_create_lock);
46 
47 static struct hlist_head node_htable[NODE_HTABLE_SIZE];
48 LIST_HEAD(tipc_node_list);
49 static u32 tipc_num_nodes;
50 
51 static atomic_t tipc_num_links = ATOMIC_INIT(0);
52 u32 tipc_own_tag;
53 
54 /**
55  * tipc_node_find - locate specified node object, if it exists
56  */
57 
58 struct tipc_node *tipc_node_find(u32 addr)
59 {
60 	struct tipc_node *node;
61 	struct hlist_node *pos;
62 
63 	if (unlikely(!in_own_cluster(addr)))
64 		return NULL;
65 
66 	hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) {
67 		if (node->addr == addr)
68 			return node;
69 	}
70 	return NULL;
71 }
72 
73 /**
74  * tipc_node_create - create neighboring node
75  *
76  * Currently, this routine is called by neighbor discovery code, which holds
77  * net_lock for reading only.  We must take node_create_lock to ensure a node
78  * isn't created twice if two different bearers discover the node at the same
79  * time.  (It would be preferable to switch to holding net_lock in write mode,
80  * but this is a non-trivial change.)
81  */
82 
83 struct tipc_node *tipc_node_create(u32 addr)
84 {
85 	struct tipc_node *n_ptr, *temp_node;
86 
87 	spin_lock_bh(&node_create_lock);
88 
89 	n_ptr = tipc_node_find(addr);
90 	if (n_ptr) {
91 		spin_unlock_bh(&node_create_lock);
92 		return n_ptr;
93 	}
94 
95 	n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
96 	if (!n_ptr) {
97 		spin_unlock_bh(&node_create_lock);
98 		warn("Node creation failed, no memory\n");
99 		return NULL;
100 	}
101 
102 	n_ptr->addr = addr;
103 	spin_lock_init(&n_ptr->lock);
104 	INIT_HLIST_NODE(&n_ptr->hash);
105 	INIT_LIST_HEAD(&n_ptr->list);
106 	INIT_LIST_HEAD(&n_ptr->nsub);
107 
108 	hlist_add_head(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
109 
110 	list_for_each_entry(temp_node, &tipc_node_list, list) {
111 		if (n_ptr->addr < temp_node->addr)
112 			break;
113 	}
114 	list_add_tail(&n_ptr->list, &temp_node->list);
115 	n_ptr->block_setup = WAIT_PEER_DOWN;
116 
117 	tipc_num_nodes++;
118 
119 	spin_unlock_bh(&node_create_lock);
120 	return n_ptr;
121 }
122 
123 void tipc_node_delete(struct tipc_node *n_ptr)
124 {
125 	list_del(&n_ptr->list);
126 	hlist_del(&n_ptr->hash);
127 	kfree(n_ptr);
128 
129 	tipc_num_nodes--;
130 }
131 
132 
133 /**
134  * tipc_node_link_up - handle addition of link
135  *
136  * Link becomes active (alone or shared) or standby, depending on its priority.
137  */
138 
139 void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr)
140 {
141 	struct link **active = &n_ptr->active_links[0];
142 
143 	n_ptr->working_links++;
144 
145 	info("Established link <%s> on network plane %c\n",
146 	     l_ptr->name, l_ptr->b_ptr->net_plane);
147 
148 	if (!active[0]) {
149 		active[0] = active[1] = l_ptr;
150 		node_established_contact(n_ptr);
151 		return;
152 	}
153 	if (l_ptr->priority < active[0]->priority) {
154 		info("New link <%s> becomes standby\n", l_ptr->name);
155 		return;
156 	}
157 	tipc_link_send_duplicate(active[0], l_ptr);
158 	if (l_ptr->priority == active[0]->priority) {
159 		active[0] = l_ptr;
160 		return;
161 	}
162 	info("Old link <%s> becomes standby\n", active[0]->name);
163 	if (active[1] != active[0])
164 		info("Old link <%s> becomes standby\n", active[1]->name);
165 	active[0] = active[1] = l_ptr;
166 }
167 
168 /**
169  * node_select_active_links - select active link
170  */
171 
172 static void node_select_active_links(struct tipc_node *n_ptr)
173 {
174 	struct link **active = &n_ptr->active_links[0];
175 	u32 i;
176 	u32 highest_prio = 0;
177 
178 	active[0] = active[1] = NULL;
179 
180 	for (i = 0; i < MAX_BEARERS; i++) {
181 		struct link *l_ptr = n_ptr->links[i];
182 
183 		if (!l_ptr || !tipc_link_is_up(l_ptr) ||
184 		    (l_ptr->priority < highest_prio))
185 			continue;
186 
187 		if (l_ptr->priority > highest_prio) {
188 			highest_prio = l_ptr->priority;
189 			active[0] = active[1] = l_ptr;
190 		} else {
191 			active[1] = l_ptr;
192 		}
193 	}
194 }
195 
196 /**
197  * tipc_node_link_down - handle loss of link
198  */
199 
200 void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
201 {
202 	struct link **active;
203 
204 	n_ptr->working_links--;
205 
206 	if (!tipc_link_is_active(l_ptr)) {
207 		info("Lost standby link <%s> on network plane %c\n",
208 		     l_ptr->name, l_ptr->b_ptr->net_plane);
209 		return;
210 	}
211 	info("Lost link <%s> on network plane %c\n",
212 		l_ptr->name, l_ptr->b_ptr->net_plane);
213 
214 	active = &n_ptr->active_links[0];
215 	if (active[0] == l_ptr)
216 		active[0] = active[1];
217 	if (active[1] == l_ptr)
218 		active[1] = active[0];
219 	if (active[0] == l_ptr)
220 		node_select_active_links(n_ptr);
221 	if (tipc_node_is_up(n_ptr))
222 		tipc_link_changeover(l_ptr);
223 	else
224 		node_lost_contact(n_ptr);
225 }
226 
227 int tipc_node_active_links(struct tipc_node *n_ptr)
228 {
229 	return n_ptr->active_links[0] != NULL;
230 }
231 
232 int tipc_node_redundant_links(struct tipc_node *n_ptr)
233 {
234 	return n_ptr->working_links > 1;
235 }
236 
237 int tipc_node_is_up(struct tipc_node *n_ptr)
238 {
239 	return tipc_node_active_links(n_ptr);
240 }
241 
242 void tipc_node_attach_link(struct tipc_node *n_ptr, struct link *l_ptr)
243 {
244 	n_ptr->links[l_ptr->b_ptr->identity] = l_ptr;
245 	atomic_inc(&tipc_num_links);
246 	n_ptr->link_cnt++;
247 }
248 
249 void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr)
250 {
251 	n_ptr->links[l_ptr->b_ptr->identity] = NULL;
252 	atomic_dec(&tipc_num_links);
253 	n_ptr->link_cnt--;
254 }
255 
256 /*
257  * Routing table management - five cases to handle:
258  *
259  * 1: A link towards a zone/cluster external node comes up.
260  *    => Send a multicast message updating routing tables of all
261  *    system nodes within own cluster that the new destination
262  *    can be reached via this node.
263  *    (node.establishedContact()=>cluster.multicastNewRoute())
264  *
265  * 2: A link towards a slave node comes up.
266  *    => Send a multicast message updating routing tables of all
267  *    system nodes within own cluster that the new destination
268  *    can be reached via this node.
269  *    (node.establishedContact()=>cluster.multicastNewRoute())
270  *    => Send a  message to the slave node about existence
271  *    of all system nodes within cluster:
272  *    (node.establishedContact()=>cluster.sendLocalRoutes())
273  *
274  * 3: A new cluster local system node becomes available.
275  *    => Send message(s) to this particular node containing
276  *    information about all cluster external and slave
277  *     nodes which can be reached via this node.
278  *    (node.establishedContact()==>network.sendExternalRoutes())
279  *    (node.establishedContact()==>network.sendSlaveRoutes())
280  *    => Send messages to all directly connected slave nodes
281  *    containing information about the existence of the new node
282  *    (node.establishedContact()=>cluster.multicastNewRoute())
283  *
284  * 4: The link towards a zone/cluster external node or slave
285  *    node goes down.
286  *    => Send a multcast message updating routing tables of all
287  *    nodes within cluster that the new destination can not any
288  *    longer be reached via this node.
289  *    (node.lostAllLinks()=>cluster.bcastLostRoute())
290  *
291  * 5: A cluster local system node becomes unavailable.
292  *    => Remove all references to this node from the local
293  *    routing tables. Note: This is a completely node
294  *    local operation.
295  *    (node.lostAllLinks()=>network.removeAsRouter())
296  *    => Send messages to all directly connected slave nodes
297  *    containing information about loss of the node
298  *    (node.establishedContact()=>cluster.multicastLostRoute())
299  *
300  */
301 
302 static void node_established_contact(struct tipc_node *n_ptr)
303 {
304 	tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
305 
306 	/* Syncronize broadcast acks */
307 	n_ptr->bclink.acked = tipc_bclink_get_last_sent();
308 
309 	if (n_ptr->bclink.supported) {
310 		tipc_nmap_add(&tipc_bcast_nmap, n_ptr->addr);
311 		if (n_ptr->addr < tipc_own_addr)
312 			tipc_own_tag++;
313 	}
314 }
315 
316 static void node_name_purge_complete(unsigned long node_addr)
317 {
318 	struct tipc_node *n_ptr;
319 
320 	read_lock_bh(&tipc_net_lock);
321 	n_ptr = tipc_node_find(node_addr);
322 	if (n_ptr) {
323 		tipc_node_lock(n_ptr);
324 		n_ptr->block_setup &= ~WAIT_NAMES_GONE;
325 		tipc_node_unlock(n_ptr);
326 	}
327 	read_unlock_bh(&tipc_net_lock);
328 }
329 
330 static void node_lost_contact(struct tipc_node *n_ptr)
331 {
332 	char addr_string[16];
333 	u32 i;
334 
335 	info("Lost contact with %s\n",
336 	     tipc_addr_string_fill(addr_string, n_ptr->addr));
337 
338 	/* Flush broadcast link info associated with lost node */
339 
340 	if (n_ptr->bclink.supported) {
341 		n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
342 		while (n_ptr->bclink.deferred_head) {
343 			struct sk_buff *buf = n_ptr->bclink.deferred_head;
344 			n_ptr->bclink.deferred_head = buf->next;
345 			buf_discard(buf);
346 		}
347 
348 		if (n_ptr->bclink.defragm) {
349 			buf_discard(n_ptr->bclink.defragm);
350 			n_ptr->bclink.defragm = NULL;
351 		}
352 
353 		tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
354 		tipc_bclink_acknowledge(n_ptr,
355 					mod(n_ptr->bclink.acked + 10000));
356 		if (n_ptr->addr < tipc_own_addr)
357 			tipc_own_tag--;
358 
359 		n_ptr->bclink.supported = 0;
360 	}
361 
362 	/* Abort link changeover */
363 	for (i = 0; i < MAX_BEARERS; i++) {
364 		struct link *l_ptr = n_ptr->links[i];
365 		if (!l_ptr)
366 			continue;
367 		l_ptr->reset_checkpoint = l_ptr->next_in_no;
368 		l_ptr->exp_msg_count = 0;
369 		tipc_link_reset_fragments(l_ptr);
370 	}
371 
372 	/* Notify subscribers */
373 	tipc_nodesub_notify(n_ptr);
374 
375 	/* Prevent re-contact with node until cleanup is done */
376 
377 	n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE;
378 	tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr);
379 }
380 
381 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
382 {
383 	u32 domain;
384 	struct sk_buff *buf;
385 	struct tipc_node *n_ptr;
386 	struct tipc_node_info node_info;
387 	u32 payload_size;
388 
389 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
390 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
391 
392 	domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
393 	if (!tipc_addr_domain_valid(domain))
394 		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
395 						   " (network address)");
396 
397 	read_lock_bh(&tipc_net_lock);
398 	if (!tipc_num_nodes) {
399 		read_unlock_bh(&tipc_net_lock);
400 		return tipc_cfg_reply_none();
401 	}
402 
403 	/* For now, get space for all other nodes */
404 
405 	payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
406 	if (payload_size > 32768u) {
407 		read_unlock_bh(&tipc_net_lock);
408 		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
409 						   " (too many nodes)");
410 	}
411 	buf = tipc_cfg_reply_alloc(payload_size);
412 	if (!buf) {
413 		read_unlock_bh(&tipc_net_lock);
414 		return NULL;
415 	}
416 
417 	/* Add TLVs for all nodes in scope */
418 
419 	list_for_each_entry(n_ptr, &tipc_node_list, list) {
420 		if (!tipc_in_scope(domain, n_ptr->addr))
421 			continue;
422 		node_info.addr = htonl(n_ptr->addr);
423 		node_info.up = htonl(tipc_node_is_up(n_ptr));
424 		tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
425 				    &node_info, sizeof(node_info));
426 	}
427 
428 	read_unlock_bh(&tipc_net_lock);
429 	return buf;
430 }
431 
432 struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
433 {
434 	u32 domain;
435 	struct sk_buff *buf;
436 	struct tipc_node *n_ptr;
437 	struct tipc_link_info link_info;
438 	u32 payload_size;
439 
440 	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
441 		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
442 
443 	domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
444 	if (!tipc_addr_domain_valid(domain))
445 		return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
446 						   " (network address)");
447 
448 	if (tipc_mode != TIPC_NET_MODE)
449 		return tipc_cfg_reply_none();
450 
451 	read_lock_bh(&tipc_net_lock);
452 
453 	/* Get space for all unicast links + multicast link */
454 
455 	payload_size = TLV_SPACE(sizeof(link_info)) *
456 		(atomic_read(&tipc_num_links) + 1);
457 	if (payload_size > 32768u) {
458 		read_unlock_bh(&tipc_net_lock);
459 		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
460 						   " (too many links)");
461 	}
462 	buf = tipc_cfg_reply_alloc(payload_size);
463 	if (!buf) {
464 		read_unlock_bh(&tipc_net_lock);
465 		return NULL;
466 	}
467 
468 	/* Add TLV for broadcast link */
469 
470 	link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
471 	link_info.up = htonl(1);
472 	strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
473 	tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
474 
475 	/* Add TLVs for any other links in scope */
476 
477 	list_for_each_entry(n_ptr, &tipc_node_list, list) {
478 		u32 i;
479 
480 		if (!tipc_in_scope(domain, n_ptr->addr))
481 			continue;
482 		tipc_node_lock(n_ptr);
483 		for (i = 0; i < MAX_BEARERS; i++) {
484 			if (!n_ptr->links[i])
485 				continue;
486 			link_info.dest = htonl(n_ptr->addr);
487 			link_info.up = htonl(tipc_link_is_up(n_ptr->links[i]));
488 			strcpy(link_info.str, n_ptr->links[i]->name);
489 			tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
490 					    &link_info, sizeof(link_info));
491 		}
492 		tipc_node_unlock(n_ptr);
493 	}
494 
495 	read_unlock_bh(&tipc_net_lock);
496 	return buf;
497 }
498