xref: /linux/net/tipc/node.c (revision 8c87553e2db672254d858991c42964030ad7da45)
1  /*
2   * net/tipc/node.c: TIPC node management routines
3   *
4   * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
5   * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
6   * All rights reserved.
7   *
8   * Redistribution and use in source and binary forms, with or without
9   * modification, are permitted provided that the following conditions are met:
10   *
11   * 1. Redistributions of source code must retain the above copyright
12   *    notice, this list of conditions and the following disclaimer.
13   * 2. Redistributions in binary form must reproduce the above copyright
14   *    notice, this list of conditions and the following disclaimer in the
15   *    documentation and/or other materials provided with the distribution.
16   * 3. Neither the names of the copyright holders nor the names of its
17   *    contributors may be used to endorse or promote products derived from
18   *    this software without specific prior written permission.
19   *
20   * Alternatively, this software may be distributed under the terms of the
21   * GNU General Public License ("GPL") version 2 as published by the Free
22   * Software Foundation.
23   *
24   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25   * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26   * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28   * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29   * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30   * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31   * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32   * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34   * POSSIBILITY OF SUCH DAMAGE.
35   */
36  
37  #include "core.h"
38  #include "link.h"
39  #include "node.h"
40  #include "name_distr.h"
41  #include "socket.h"
42  #include "bcast.h"
43  #include "monitor.h"
44  #include "discover.h"
45  #include "netlink.h"
46  #include "trace.h"
47  #include "crypto.h"
48  
49  #define INVALID_NODE_SIG	0x10000
50  #define NODE_CLEANUP_AFTER	300000
51  
52  /* Flags used to take different actions according to flag type
53   * TIPC_NOTIFY_NODE_DOWN: notify node is down
54   * TIPC_NOTIFY_NODE_UP: notify node is up
55   * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
56   */
57  enum {
58  	TIPC_NOTIFY_NODE_DOWN		= (1 << 3),
59  	TIPC_NOTIFY_NODE_UP		= (1 << 4),
60  	TIPC_NOTIFY_LINK_UP		= (1 << 6),
61  	TIPC_NOTIFY_LINK_DOWN		= (1 << 7)
62  };
63  
64  struct tipc_link_entry {
65  	struct tipc_link *link;
66  	spinlock_t lock; /* per link */
67  	u32 mtu;
68  	struct sk_buff_head inputq;
69  	struct tipc_media_addr maddr;
70  };
71  
72  struct tipc_bclink_entry {
73  	struct tipc_link *link;
74  	struct sk_buff_head inputq1;
75  	struct sk_buff_head arrvq;
76  	struct sk_buff_head inputq2;
77  	struct sk_buff_head namedq;
78  	u16 named_rcv_nxt;
79  	bool named_open;
80  };
81  
82  /**
83   * struct tipc_node - TIPC node structure
84   * @addr: network address of node
85   * @kref: reference counter to node object
86   * @lock: rwlock governing access to structure
87   * @net: the applicable net namespace
88   * @hash: links to adjacent nodes in unsorted hash chain
89   * @inputq: pointer to input queue containing messages for msg event
90   * @namedq: pointer to name table input queue with name table messages
91   * @active_links: bearer ids of active links, used as index into links[] array
92   * @links: array containing references to all links to node
93   * @bc_entry: broadcast link entry
94   * @action_flags: bit mask of different types of node actions
95   * @state: connectivity state vs peer node
96   * @preliminary: a preliminary node or not
97   * @failover_sent: failover sent or not
98   * @sync_point: sequence number where synch/failover is finished
99   * @list: links to adjacent nodes in sorted list of cluster's nodes
100   * @working_links: number of working links to node (both active and standby)
101   * @link_cnt: number of links to node
102   * @capabilities: bitmap, indicating peer node's functional capabilities
103   * @signature: node instance identifier
104   * @link_id: local and remote bearer ids of changing link, if any
105   * @peer_id: 128-bit ID of peer
106   * @peer_id_string: ID string of peer
107   * @publ_list: list of publications
108   * @conn_sks: list of connections (FIXME)
109   * @timer: node's keepalive timer
110   * @keepalive_intv: keepalive interval in milliseconds
111   * @rcu: rcu struct for tipc_node
112   * @delete_at: indicates the time for deleting a down node
113   * @peer_net: peer's net namespace
114   * @peer_hash_mix: hash for this peer (FIXME)
115   * @crypto_rx: RX crypto handler
116   */
117  struct tipc_node {
118  	u32 addr;
119  	struct kref kref;
120  	rwlock_t lock;
121  	struct net *net;
122  	struct hlist_node hash;
123  	int active_links[2];
124  	struct tipc_link_entry links[MAX_BEARERS];
125  	struct tipc_bclink_entry bc_entry;
126  	int action_flags;
127  	struct list_head list;
128  	int state;
129  	bool preliminary;
130  	bool failover_sent;
131  	u16 sync_point;
132  	int link_cnt;
133  	u16 working_links;
134  	u16 capabilities;
135  	u32 signature;
136  	u32 link_id;
137  	u8 peer_id[16];
138  	char peer_id_string[NODE_ID_STR_LEN];
139  	struct list_head publ_list;
140  	struct list_head conn_sks;
141  	unsigned long keepalive_intv;
142  	struct timer_list timer;
143  	struct rcu_head rcu;
144  	unsigned long delete_at;
145  	struct net *peer_net;
146  	u32 peer_hash_mix;
147  #ifdef CONFIG_TIPC_CRYPTO
148  	struct tipc_crypto *crypto_rx;
149  #endif
150  };
151  
152  /* Node FSM states and events:
153   */
154  enum {
155  	SELF_DOWN_PEER_DOWN    = 0xdd,
156  	SELF_UP_PEER_UP        = 0xaa,
157  	SELF_DOWN_PEER_LEAVING = 0xd1,
158  	SELF_UP_PEER_COMING    = 0xac,
159  	SELF_COMING_PEER_UP    = 0xca,
160  	SELF_LEAVING_PEER_DOWN = 0x1d,
161  	NODE_FAILINGOVER       = 0xf0,
162  	NODE_SYNCHING          = 0xcc
163  };
164  
165  enum {
166  	SELF_ESTABL_CONTACT_EVT = 0xece,
167  	SELF_LOST_CONTACT_EVT   = 0x1ce,
168  	PEER_ESTABL_CONTACT_EVT = 0x9ece,
169  	PEER_LOST_CONTACT_EVT   = 0x91ce,
170  	NODE_FAILOVER_BEGIN_EVT = 0xfbe,
171  	NODE_FAILOVER_END_EVT   = 0xfee,
172  	NODE_SYNCH_BEGIN_EVT    = 0xcbe,
173  	NODE_SYNCH_END_EVT      = 0xcee
174  };
175  
176  static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
177  				  struct sk_buff_head *xmitq,
178  				  struct tipc_media_addr **maddr);
179  static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
180  				bool delete);
181  static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
182  static void tipc_node_delete(struct tipc_node *node);
183  static void tipc_node_timeout(struct timer_list *t);
184  static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
185  static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
186  static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
187  static bool node_is_up(struct tipc_node *n);
188  static void tipc_node_delete_from_list(struct tipc_node *node);
189  
190  struct tipc_sock_conn {
191  	u32 port;
192  	u32 peer_port;
193  	u32 peer_node;
194  	struct list_head list;
195  };
196  
197  static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
198  {
199  	int bearer_id = n->active_links[sel & 1];
200  
201  	if (unlikely(bearer_id == INVALID_BEARER_ID))
202  		return NULL;
203  
204  	return n->links[bearer_id].link;
205  }
206  
207  int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
208  {
209  	struct tipc_node *n;
210  	int bearer_id;
211  	unsigned int mtu = MAX_MSG_SIZE;
212  
213  	n = tipc_node_find(net, addr);
214  	if (unlikely(!n))
215  		return mtu;
216  
217  	/* Allow MAX_MSG_SIZE when building connection oriented message
218  	 * if they are in the same core network
219  	 */
220  	if (n->peer_net && connected) {
221  		tipc_node_put(n);
222  		return mtu;
223  	}
224  
225  	bearer_id = n->active_links[sel & 1];
226  	if (likely(bearer_id != INVALID_BEARER_ID))
227  		mtu = n->links[bearer_id].mtu;
228  	tipc_node_put(n);
229  	return mtu;
230  }
231  
232  bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
233  {
234  	u8 *own_id = tipc_own_id(net);
235  	struct tipc_node *n;
236  
237  	if (!own_id)
238  		return true;
239  
240  	if (addr == tipc_own_addr(net)) {
241  		memcpy(id, own_id, TIPC_NODEID_LEN);
242  		return true;
243  	}
244  	n = tipc_node_find(net, addr);
245  	if (!n)
246  		return false;
247  
248  	memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
249  	tipc_node_put(n);
250  	return true;
251  }
252  
253  u16 tipc_node_get_capabilities(struct net *net, u32 addr)
254  {
255  	struct tipc_node *n;
256  	u16 caps;
257  
258  	n = tipc_node_find(net, addr);
259  	if (unlikely(!n))
260  		return TIPC_NODE_CAPABILITIES;
261  	caps = n->capabilities;
262  	tipc_node_put(n);
263  	return caps;
264  }
265  
266  u32 tipc_node_get_addr(struct tipc_node *node)
267  {
268  	return (node) ? node->addr : 0;
269  }
270  
271  char *tipc_node_get_id_str(struct tipc_node *node)
272  {
273  	return node->peer_id_string;
274  }
275  
276  #ifdef CONFIG_TIPC_CRYPTO
277  /**
278   * tipc_node_crypto_rx - Retrieve crypto RX handle from node
279   * @__n: target tipc_node
280   * Note: node ref counter must be held first!
281   */
282  struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
283  {
284  	return (__n) ? __n->crypto_rx : NULL;
285  }
286  
287  struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
288  {
289  	return container_of(pos, struct tipc_node, list)->crypto_rx;
290  }
291  
292  struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
293  {
294  	struct tipc_node *n;
295  
296  	n = tipc_node_find(net, addr);
297  	return (n) ? n->crypto_rx : NULL;
298  }
299  #endif
300  
301  static void tipc_node_free(struct rcu_head *rp)
302  {
303  	struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
304  
305  #ifdef CONFIG_TIPC_CRYPTO
306  	tipc_crypto_stop(&n->crypto_rx);
307  #endif
308  	kfree(n);
309  }
310  
311  static void tipc_node_kref_release(struct kref *kref)
312  {
313  	struct tipc_node *n = container_of(kref, struct tipc_node, kref);
314  
315  	kfree(n->bc_entry.link);
316  	call_rcu(&n->rcu, tipc_node_free);
317  }
318  
319  void tipc_node_put(struct tipc_node *node)
320  {
321  	kref_put(&node->kref, tipc_node_kref_release);
322  }
323  
324  void tipc_node_get(struct tipc_node *node)
325  {
326  	kref_get(&node->kref);
327  }
328  
329  /*
330   * tipc_node_find - locate specified node object, if it exists
331   */
332  static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
333  {
334  	struct tipc_net *tn = tipc_net(net);
335  	struct tipc_node *node;
336  	unsigned int thash = tipc_hashfn(addr);
337  
338  	rcu_read_lock();
339  	hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
340  		if (node->addr != addr || node->preliminary)
341  			continue;
342  		if (!kref_get_unless_zero(&node->kref))
343  			node = NULL;
344  		break;
345  	}
346  	rcu_read_unlock();
347  	return node;
348  }
349  
350  /* tipc_node_find_by_id - locate specified node object by its 128-bit id
351   * Note: this function is called only when a discovery request failed
352   * to find the node by its 32-bit id, and is not time critical
353   */
354  static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
355  {
356  	struct tipc_net *tn = tipc_net(net);
357  	struct tipc_node *n;
358  	bool found = false;
359  
360  	rcu_read_lock();
361  	list_for_each_entry_rcu(n, &tn->node_list, list) {
362  		read_lock_bh(&n->lock);
363  		if (!memcmp(id, n->peer_id, 16) &&
364  		    kref_get_unless_zero(&n->kref))
365  			found = true;
366  		read_unlock_bh(&n->lock);
367  		if (found)
368  			break;
369  	}
370  	rcu_read_unlock();
371  	return found ? n : NULL;
372  }
373  
374  static void tipc_node_read_lock(struct tipc_node *n)
375  	__acquires(n->lock)
376  {
377  	read_lock_bh(&n->lock);
378  }
379  
380  static void tipc_node_read_unlock(struct tipc_node *n)
381  	__releases(n->lock)
382  {
383  	read_unlock_bh(&n->lock);
384  }
385  
386  static void tipc_node_write_lock(struct tipc_node *n)
387  	__acquires(n->lock)
388  {
389  	write_lock_bh(&n->lock);
390  }
391  
392  static void tipc_node_write_unlock_fast(struct tipc_node *n)
393  	__releases(n->lock)
394  {
395  	write_unlock_bh(&n->lock);
396  }
397  
398  static void tipc_node_write_unlock(struct tipc_node *n)
399  	__releases(n->lock)
400  {
401  	struct tipc_socket_addr sk;
402  	struct net *net = n->net;
403  	u32 flags = n->action_flags;
404  	struct list_head *publ_list;
405  	struct tipc_uaddr ua;
406  	u32 bearer_id, node;
407  
408  	if (likely(!flags)) {
409  		write_unlock_bh(&n->lock);
410  		return;
411  	}
412  
413  	tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
414  		   TIPC_LINK_STATE, n->addr, n->addr);
415  	sk.ref = n->link_id;
416  	sk.node = tipc_own_addr(net);
417  	node = n->addr;
418  	bearer_id = n->link_id & 0xffff;
419  	publ_list = &n->publ_list;
420  
421  	n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
422  			     TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
423  
424  	write_unlock_bh(&n->lock);
425  
426  	if (flags & TIPC_NOTIFY_NODE_DOWN)
427  		tipc_publ_notify(net, publ_list, node, n->capabilities);
428  
429  	if (flags & TIPC_NOTIFY_NODE_UP)
430  		tipc_named_node_up(net, node, n->capabilities);
431  
432  	if (flags & TIPC_NOTIFY_LINK_UP) {
433  		tipc_mon_peer_up(net, node, bearer_id);
434  		tipc_nametbl_publish(net, &ua, &sk, sk.ref);
435  	}
436  	if (flags & TIPC_NOTIFY_LINK_DOWN) {
437  		tipc_mon_peer_down(net, node, bearer_id);
438  		tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
439  	}
440  }
441  
442  static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
443  {
444  	int net_id = tipc_netid(n->net);
445  	struct tipc_net *tn_peer;
446  	struct net *tmp;
447  	u32 hash_chk;
448  
449  	if (n->peer_net)
450  		return;
451  
452  	for_each_net_rcu(tmp) {
453  		tn_peer = tipc_net(tmp);
454  		if (!tn_peer)
455  			continue;
456  		/* Integrity checking whether node exists in namespace or not */
457  		if (tn_peer->net_id != net_id)
458  			continue;
459  		if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
460  			continue;
461  		hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
462  		if (hash_mixes ^ hash_chk)
463  			continue;
464  		n->peer_net = tmp;
465  		n->peer_hash_mix = hash_mixes;
466  		break;
467  	}
468  }
469  
470  struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
471  				   u16 capabilities, u32 hash_mixes,
472  				   bool preliminary)
473  {
474  	struct tipc_net *tn = net_generic(net, tipc_net_id);
475  	struct tipc_link *l, *snd_l = tipc_bc_sndlink(net);
476  	struct tipc_node *n, *temp_node;
477  	unsigned long intv;
478  	int bearer_id;
479  	int i;
480  
481  	spin_lock_bh(&tn->node_list_lock);
482  	n = tipc_node_find(net, addr) ?:
483  		tipc_node_find_by_id(net, peer_id);
484  	if (n) {
485  		if (!n->preliminary)
486  			goto update;
487  		if (preliminary)
488  			goto exit;
489  		/* A preliminary node becomes "real" now, refresh its data */
490  		tipc_node_write_lock(n);
491  		if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
492  					 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
493  					 n->capabilities, &n->bc_entry.inputq1,
494  					 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
495  			pr_warn("Broadcast rcv link refresh failed, no memory\n");
496  			tipc_node_write_unlock_fast(n);
497  			tipc_node_put(n);
498  			n = NULL;
499  			goto exit;
500  		}
501  		n->preliminary = false;
502  		n->addr = addr;
503  		hlist_del_rcu(&n->hash);
504  		hlist_add_head_rcu(&n->hash,
505  				   &tn->node_htable[tipc_hashfn(addr)]);
506  		list_del_rcu(&n->list);
507  		list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
508  			if (n->addr < temp_node->addr)
509  				break;
510  		}
511  		list_add_tail_rcu(&n->list, &temp_node->list);
512  		tipc_node_write_unlock_fast(n);
513  
514  update:
515  		if (n->peer_hash_mix ^ hash_mixes)
516  			tipc_node_assign_peer_net(n, hash_mixes);
517  		if (n->capabilities == capabilities)
518  			goto exit;
519  		/* Same node may come back with new capabilities */
520  		tipc_node_write_lock(n);
521  		n->capabilities = capabilities;
522  		for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
523  			l = n->links[bearer_id].link;
524  			if (l)
525  				tipc_link_update_caps(l, capabilities);
526  		}
527  		tipc_node_write_unlock_fast(n);
528  
529  		/* Calculate cluster capabilities */
530  		tn->capabilities = TIPC_NODE_CAPABILITIES;
531  		list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
532  			tn->capabilities &= temp_node->capabilities;
533  		}
534  
535  		tipc_bcast_toggle_rcast(net,
536  					(tn->capabilities & TIPC_BCAST_RCAST));
537  
538  		goto exit;
539  	}
540  	n = kzalloc(sizeof(*n), GFP_ATOMIC);
541  	if (!n) {
542  		pr_warn("Node creation failed, no memory\n");
543  		goto exit;
544  	}
545  	tipc_nodeid2string(n->peer_id_string, peer_id);
546  #ifdef CONFIG_TIPC_CRYPTO
547  	if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
548  		pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
549  		kfree(n);
550  		n = NULL;
551  		goto exit;
552  	}
553  #endif
554  	n->addr = addr;
555  	n->preliminary = preliminary;
556  	memcpy(&n->peer_id, peer_id, 16);
557  	n->net = net;
558  	n->peer_net = NULL;
559  	n->peer_hash_mix = 0;
560  	/* Assign kernel local namespace if exists */
561  	tipc_node_assign_peer_net(n, hash_mixes);
562  	n->capabilities = capabilities;
563  	kref_init(&n->kref);
564  	rwlock_init(&n->lock);
565  	INIT_HLIST_NODE(&n->hash);
566  	INIT_LIST_HEAD(&n->list);
567  	INIT_LIST_HEAD(&n->publ_list);
568  	INIT_LIST_HEAD(&n->conn_sks);
569  	skb_queue_head_init(&n->bc_entry.namedq);
570  	skb_queue_head_init(&n->bc_entry.inputq1);
571  	__skb_queue_head_init(&n->bc_entry.arrvq);
572  	skb_queue_head_init(&n->bc_entry.inputq2);
573  	for (i = 0; i < MAX_BEARERS; i++)
574  		spin_lock_init(&n->links[i].lock);
575  	n->state = SELF_DOWN_PEER_LEAVING;
576  	n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
577  	n->signature = INVALID_NODE_SIG;
578  	n->active_links[0] = INVALID_BEARER_ID;
579  	n->active_links[1] = INVALID_BEARER_ID;
580  	if (!preliminary &&
581  	    !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
582  				 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
583  				 n->capabilities, &n->bc_entry.inputq1,
584  				 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
585  		pr_warn("Broadcast rcv link creation failed, no memory\n");
586  		tipc_node_put(n);
587  		n = NULL;
588  		goto exit;
589  	}
590  	tipc_node_get(n);
591  	timer_setup(&n->timer, tipc_node_timeout, 0);
592  	/* Start a slow timer anyway, crypto needs it */
593  	n->keepalive_intv = 10000;
594  	intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
595  	if (!mod_timer(&n->timer, intv))
596  		tipc_node_get(n);
597  	hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
598  	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
599  		if (n->addr < temp_node->addr)
600  			break;
601  	}
602  	list_add_tail_rcu(&n->list, &temp_node->list);
603  	/* Calculate cluster capabilities */
604  	tn->capabilities = TIPC_NODE_CAPABILITIES;
605  	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
606  		tn->capabilities &= temp_node->capabilities;
607  	}
608  	tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
609  	trace_tipc_node_create(n, true, " ");
610  exit:
611  	spin_unlock_bh(&tn->node_list_lock);
612  	return n;
613  }
614  
615  static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
616  {
617  	unsigned long tol = tipc_link_tolerance(l);
618  	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
619  
620  	/* Link with lowest tolerance determines timer interval */
621  	if (intv < n->keepalive_intv)
622  		n->keepalive_intv = intv;
623  
624  	/* Ensure link's abort limit corresponds to current tolerance */
625  	tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
626  }
627  
628  static void tipc_node_delete_from_list(struct tipc_node *node)
629  {
630  #ifdef CONFIG_TIPC_CRYPTO
631  	tipc_crypto_key_flush(node->crypto_rx);
632  #endif
633  	list_del_rcu(&node->list);
634  	hlist_del_rcu(&node->hash);
635  	tipc_node_put(node);
636  }
637  
638  static void tipc_node_delete(struct tipc_node *node)
639  {
640  	trace_tipc_node_delete(node, true, " ");
641  	tipc_node_delete_from_list(node);
642  
643  	del_timer_sync(&node->timer);
644  	tipc_node_put(node);
645  }
646  
647  void tipc_node_stop(struct net *net)
648  {
649  	struct tipc_net *tn = tipc_net(net);
650  	struct tipc_node *node, *t_node;
651  
652  	spin_lock_bh(&tn->node_list_lock);
653  	list_for_each_entry_safe(node, t_node, &tn->node_list, list)
654  		tipc_node_delete(node);
655  	spin_unlock_bh(&tn->node_list_lock);
656  }
657  
658  void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
659  {
660  	struct tipc_node *n;
661  
662  	if (in_own_node(net, addr))
663  		return;
664  
665  	n = tipc_node_find(net, addr);
666  	if (!n) {
667  		pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
668  		return;
669  	}
670  	tipc_node_write_lock(n);
671  	list_add_tail(subscr, &n->publ_list);
672  	tipc_node_write_unlock_fast(n);
673  	tipc_node_put(n);
674  }
675  
676  void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
677  {
678  	struct tipc_node *n;
679  
680  	if (in_own_node(net, addr))
681  		return;
682  
683  	n = tipc_node_find(net, addr);
684  	if (!n) {
685  		pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
686  		return;
687  	}
688  	tipc_node_write_lock(n);
689  	list_del_init(subscr);
690  	tipc_node_write_unlock_fast(n);
691  	tipc_node_put(n);
692  }
693  
694  int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
695  {
696  	struct tipc_node *node;
697  	struct tipc_sock_conn *conn;
698  	int err = 0;
699  
700  	if (in_own_node(net, dnode))
701  		return 0;
702  
703  	node = tipc_node_find(net, dnode);
704  	if (!node) {
705  		pr_warn("Connecting sock to node 0x%x failed\n", dnode);
706  		return -EHOSTUNREACH;
707  	}
708  	conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
709  	if (!conn) {
710  		err = -EHOSTUNREACH;
711  		goto exit;
712  	}
713  	conn->peer_node = dnode;
714  	conn->port = port;
715  	conn->peer_port = peer_port;
716  
717  	tipc_node_write_lock(node);
718  	list_add_tail(&conn->list, &node->conn_sks);
719  	tipc_node_write_unlock(node);
720  exit:
721  	tipc_node_put(node);
722  	return err;
723  }
724  
725  void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
726  {
727  	struct tipc_node *node;
728  	struct tipc_sock_conn *conn, *safe;
729  
730  	if (in_own_node(net, dnode))
731  		return;
732  
733  	node = tipc_node_find(net, dnode);
734  	if (!node)
735  		return;
736  
737  	tipc_node_write_lock(node);
738  	list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
739  		if (port != conn->port)
740  			continue;
741  		list_del(&conn->list);
742  		kfree(conn);
743  	}
744  	tipc_node_write_unlock(node);
745  	tipc_node_put(node);
746  }
747  
748  static void  tipc_node_clear_links(struct tipc_node *node)
749  {
750  	int i;
751  
752  	for (i = 0; i < MAX_BEARERS; i++) {
753  		struct tipc_link_entry *le = &node->links[i];
754  
755  		if (le->link) {
756  			kfree(le->link);
757  			le->link = NULL;
758  			node->link_cnt--;
759  		}
760  	}
761  }
762  
763  /* tipc_node_cleanup - delete nodes that does not
764   * have active links for NODE_CLEANUP_AFTER time
765   */
766  static bool tipc_node_cleanup(struct tipc_node *peer)
767  {
768  	struct tipc_node *temp_node;
769  	struct tipc_net *tn = tipc_net(peer->net);
770  	bool deleted = false;
771  
772  	/* If lock held by tipc_node_stop() the node will be deleted anyway */
773  	if (!spin_trylock_bh(&tn->node_list_lock))
774  		return false;
775  
776  	tipc_node_write_lock(peer);
777  
778  	if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
779  		tipc_node_clear_links(peer);
780  		tipc_node_delete_from_list(peer);
781  		deleted = true;
782  	}
783  	tipc_node_write_unlock(peer);
784  
785  	if (!deleted) {
786  		spin_unlock_bh(&tn->node_list_lock);
787  		return deleted;
788  	}
789  
790  	/* Calculate cluster capabilities */
791  	tn->capabilities = TIPC_NODE_CAPABILITIES;
792  	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
793  		tn->capabilities &= temp_node->capabilities;
794  	}
795  	tipc_bcast_toggle_rcast(peer->net,
796  				(tn->capabilities & TIPC_BCAST_RCAST));
797  	spin_unlock_bh(&tn->node_list_lock);
798  	return deleted;
799  }
800  
801  /* tipc_node_timeout - handle expiration of node timer
802   */
803  static void tipc_node_timeout(struct timer_list *t)
804  {
805  	struct tipc_node *n = from_timer(n, t, timer);
806  	struct tipc_link_entry *le;
807  	struct sk_buff_head xmitq;
808  	int remains = n->link_cnt;
809  	int bearer_id;
810  	int rc = 0;
811  
812  	trace_tipc_node_timeout(n, false, " ");
813  	if (!node_is_up(n) && tipc_node_cleanup(n)) {
814  		/*Removing the reference of Timer*/
815  		tipc_node_put(n);
816  		return;
817  	}
818  
819  #ifdef CONFIG_TIPC_CRYPTO
820  	/* Take any crypto key related actions first */
821  	tipc_crypto_timeout(n->crypto_rx);
822  #endif
823  	__skb_queue_head_init(&xmitq);
824  
825  	/* Initial node interval to value larger (10 seconds), then it will be
826  	 * recalculated with link lowest tolerance
827  	 */
828  	tipc_node_read_lock(n);
829  	n->keepalive_intv = 10000;
830  	tipc_node_read_unlock(n);
831  	for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
832  		tipc_node_read_lock(n);
833  		le = &n->links[bearer_id];
834  		if (le->link) {
835  			spin_lock_bh(&le->lock);
836  			/* Link tolerance may change asynchronously: */
837  			tipc_node_calculate_timer(n, le->link);
838  			rc = tipc_link_timeout(le->link, &xmitq);
839  			spin_unlock_bh(&le->lock);
840  			remains--;
841  		}
842  		tipc_node_read_unlock(n);
843  		tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
844  		if (rc & TIPC_LINK_DOWN_EVT)
845  			tipc_node_link_down(n, bearer_id, false);
846  	}
847  	mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
848  }
849  
850  /**
851   * __tipc_node_link_up - handle addition of link
852   * @n: target tipc_node
853   * @bearer_id: id of the bearer
854   * @xmitq: queue for messages to be xmited on
855   * Node lock must be held by caller
856   * Link becomes active (alone or shared) or standby, depending on its priority.
857   */
858  static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
859  				struct sk_buff_head *xmitq)
860  {
861  	int *slot0 = &n->active_links[0];
862  	int *slot1 = &n->active_links[1];
863  	struct tipc_link *ol = node_active_link(n, 0);
864  	struct tipc_link *nl = n->links[bearer_id].link;
865  
866  	if (!nl || tipc_link_is_up(nl))
867  		return;
868  
869  	tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
870  	if (!tipc_link_is_up(nl))
871  		return;
872  
873  	n->working_links++;
874  	n->action_flags |= TIPC_NOTIFY_LINK_UP;
875  	n->link_id = tipc_link_id(nl);
876  
877  	/* Leave room for tunnel header when returning 'mtu' to users: */
878  	n->links[bearer_id].mtu = tipc_link_mss(nl);
879  
880  	tipc_bearer_add_dest(n->net, bearer_id, n->addr);
881  	tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
882  
883  	pr_debug("Established link <%s> on network plane %c\n",
884  		 tipc_link_name(nl), tipc_link_plane(nl));
885  	trace_tipc_node_link_up(n, true, " ");
886  
887  	/* Ensure that a STATE message goes first */
888  	tipc_link_build_state_msg(nl, xmitq);
889  
890  	/* First link? => give it both slots */
891  	if (!ol) {
892  		*slot0 = bearer_id;
893  		*slot1 = bearer_id;
894  		tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
895  		n->action_flags |= TIPC_NOTIFY_NODE_UP;
896  		tipc_link_set_active(nl, true);
897  		tipc_bcast_add_peer(n->net, nl, xmitq);
898  		return;
899  	}
900  
901  	/* Second link => redistribute slots */
902  	if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
903  		pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
904  		*slot0 = bearer_id;
905  		*slot1 = bearer_id;
906  		tipc_link_set_active(nl, true);
907  		tipc_link_set_active(ol, false);
908  	} else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
909  		tipc_link_set_active(nl, true);
910  		*slot1 = bearer_id;
911  	} else {
912  		pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
913  	}
914  
915  	/* Prepare synchronization with first link */
916  	tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
917  }
918  
919  /**
920   * tipc_node_link_up - handle addition of link
921   * @n: target tipc_node
922   * @bearer_id: id of the bearer
923   * @xmitq: queue for messages to be xmited on
924   *
925   * Link becomes active (alone or shared) or standby, depending on its priority.
926   */
927  static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
928  			      struct sk_buff_head *xmitq)
929  {
930  	struct tipc_media_addr *maddr;
931  
932  	tipc_node_write_lock(n);
933  	__tipc_node_link_up(n, bearer_id, xmitq);
934  	maddr = &n->links[bearer_id].maddr;
935  	tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
936  	tipc_node_write_unlock(n);
937  }
938  
939  /**
940   * tipc_node_link_failover() - start failover in case "half-failover"
941   *
942   * This function is only called in a very special situation where link
943   * failover can be already started on peer node but not on this node.
944   * This can happen when e.g.::
945   *
946   *	1. Both links <1A-2A>, <1B-2B> down
947   *	2. Link endpoint 2A up, but 1A still down (e.g. due to network
948   *	disturbance, wrong session, etc.)
949   *	3. Link <1B-2B> up
950   *	4. Link endpoint 2A down (e.g. due to link tolerance timeout)
951   *	5. Node 2 starts failover onto link <1B-2B>
952   *
953   *	==> Node 1 does never start link/node failover!
954   *
955   * @n: tipc node structure
956   * @l: link peer endpoint failingover (- can be NULL)
957   * @tnl: tunnel link
958   * @xmitq: queue for messages to be xmited on tnl link later
959   */
960  static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
961  				    struct tipc_link *tnl,
962  				    struct sk_buff_head *xmitq)
963  {
964  	/* Avoid to be "self-failover" that can never end */
965  	if (!tipc_link_is_up(tnl))
966  		return;
967  
968  	/* Don't rush, failure link may be in the process of resetting */
969  	if (l && !tipc_link_is_reset(l))
970  		return;
971  
972  	tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
973  	tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
974  
975  	n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
976  	tipc_link_failover_prepare(l, tnl, xmitq);
977  
978  	if (l)
979  		tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
980  	tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
981  }
982  
983  /**
984   * __tipc_node_link_down - handle loss of link
985   * @n: target tipc_node
986   * @bearer_id: id of the bearer
987   * @xmitq: queue for messages to be xmited on
988   * @maddr: output media address of the bearer
989   */
990  static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
991  				  struct sk_buff_head *xmitq,
992  				  struct tipc_media_addr **maddr)
993  {
994  	struct tipc_link_entry *le = &n->links[*bearer_id];
995  	int *slot0 = &n->active_links[0];
996  	int *slot1 = &n->active_links[1];
997  	int i, highest = 0, prio;
998  	struct tipc_link *l, *_l, *tnl;
999  
1000  	l = n->links[*bearer_id].link;
1001  	if (!l || tipc_link_is_reset(l))
1002  		return;
1003  
1004  	n->working_links--;
1005  	n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
1006  	n->link_id = tipc_link_id(l);
1007  
1008  	tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
1009  
1010  	pr_debug("Lost link <%s> on network plane %c\n",
1011  		 tipc_link_name(l), tipc_link_plane(l));
1012  
1013  	/* Select new active link if any available */
1014  	*slot0 = INVALID_BEARER_ID;
1015  	*slot1 = INVALID_BEARER_ID;
1016  	for (i = 0; i < MAX_BEARERS; i++) {
1017  		_l = n->links[i].link;
1018  		if (!_l || !tipc_link_is_up(_l))
1019  			continue;
1020  		if (_l == l)
1021  			continue;
1022  		prio = tipc_link_prio(_l);
1023  		if (prio < highest)
1024  			continue;
1025  		if (prio > highest) {
1026  			highest = prio;
1027  			*slot0 = i;
1028  			*slot1 = i;
1029  			continue;
1030  		}
1031  		*slot1 = i;
1032  	}
1033  
1034  	if (!node_is_up(n)) {
1035  		if (tipc_link_peer_is_down(l))
1036  			tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1037  		tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
1038  		trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
1039  		tipc_link_fsm_evt(l, LINK_RESET_EVT);
1040  		tipc_link_reset(l);
1041  		tipc_link_build_reset_msg(l, xmitq);
1042  		*maddr = &n->links[*bearer_id].maddr;
1043  		node_lost_contact(n, &le->inputq);
1044  		tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1045  		return;
1046  	}
1047  	tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1048  
1049  	/* There is still a working link => initiate failover */
1050  	*bearer_id = n->active_links[0];
1051  	tnl = n->links[*bearer_id].link;
1052  	tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1053  	tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
1054  	n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
1055  	tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
1056  	trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
1057  	tipc_link_reset(l);
1058  	tipc_link_fsm_evt(l, LINK_RESET_EVT);
1059  	tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1060  	tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
1061  	*maddr = &n->links[*bearer_id].maddr;
1062  }
1063  
1064  static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
1065  {
1066  	struct tipc_link_entry *le = &n->links[bearer_id];
1067  	struct tipc_media_addr *maddr = NULL;
1068  	struct tipc_link *l = le->link;
1069  	int old_bearer_id = bearer_id;
1070  	struct sk_buff_head xmitq;
1071  
1072  	if (!l)
1073  		return;
1074  
1075  	__skb_queue_head_init(&xmitq);
1076  
1077  	tipc_node_write_lock(n);
1078  	if (!tipc_link_is_establishing(l)) {
1079  		__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1080  	} else {
1081  		/* Defuse pending tipc_node_link_up() */
1082  		tipc_link_reset(l);
1083  		tipc_link_fsm_evt(l, LINK_RESET_EVT);
1084  	}
1085  	if (delete) {
1086  		kfree(l);
1087  		le->link = NULL;
1088  		n->link_cnt--;
1089  	}
1090  	trace_tipc_node_link_down(n, true, "node link down or deleted!");
1091  	tipc_node_write_unlock(n);
1092  	if (delete)
1093  		tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
1094  	if (!skb_queue_empty(&xmitq))
1095  		tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1096  	tipc_sk_rcv(n->net, &le->inputq);
1097  }
1098  
1099  static bool node_is_up(struct tipc_node *n)
1100  {
1101  	return n->active_links[0] != INVALID_BEARER_ID;
1102  }
1103  
1104  bool tipc_node_is_up(struct net *net, u32 addr)
1105  {
1106  	struct tipc_node *n;
1107  	bool retval = false;
1108  
1109  	if (in_own_node(net, addr))
1110  		return true;
1111  
1112  	n = tipc_node_find(net, addr);
1113  	if (!n)
1114  		return false;
1115  	retval = node_is_up(n);
1116  	tipc_node_put(n);
1117  	return retval;
1118  }
1119  
1120  static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
1121  {
1122  	struct tipc_node *n;
1123  
1124  	addr ^= tipc_net(net)->random;
1125  	while ((n = tipc_node_find(net, addr))) {
1126  		tipc_node_put(n);
1127  		addr++;
1128  	}
1129  	return addr;
1130  }
1131  
1132  /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
1133   * Returns suggested address if any, otherwise 0
1134   */
1135  u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
1136  {
1137  	struct tipc_net *tn = tipc_net(net);
1138  	struct tipc_node *n;
1139  	bool preliminary;
1140  	u32 sugg_addr;
1141  
1142  	/* Suggest new address if some other peer is using this one */
1143  	n = tipc_node_find(net, addr);
1144  	if (n) {
1145  		if (!memcmp(n->peer_id, id, NODE_ID_LEN))
1146  			addr = 0;
1147  		tipc_node_put(n);
1148  		if (!addr)
1149  			return 0;
1150  		return tipc_node_suggest_addr(net, addr);
1151  	}
1152  
1153  	/* Suggest previously used address if peer is known */
1154  	n = tipc_node_find_by_id(net, id);
1155  	if (n) {
1156  		sugg_addr = n->addr;
1157  		preliminary = n->preliminary;
1158  		tipc_node_put(n);
1159  		if (!preliminary)
1160  			return sugg_addr;
1161  	}
1162  
1163  	/* Even this node may be in conflict */
1164  	if (tn->trial_addr == addr)
1165  		return tipc_node_suggest_addr(net, addr);
1166  
1167  	return 0;
1168  }
1169  
1170  void tipc_node_check_dest(struct net *net, u32 addr,
1171  			  u8 *peer_id, struct tipc_bearer *b,
1172  			  u16 capabilities, u32 signature, u32 hash_mixes,
1173  			  struct tipc_media_addr *maddr,
1174  			  bool *respond, bool *dupl_addr)
1175  {
1176  	struct tipc_node *n;
1177  	struct tipc_link *l;
1178  	struct tipc_link_entry *le;
1179  	bool addr_match = false;
1180  	bool sign_match = false;
1181  	bool link_up = false;
1182  	bool link_is_reset = false;
1183  	bool accept_addr = false;
1184  	bool reset = false;
1185  	char *if_name;
1186  	unsigned long intv;
1187  	u16 session;
1188  
1189  	*dupl_addr = false;
1190  	*respond = false;
1191  
1192  	n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
1193  			     false);
1194  	if (!n)
1195  		return;
1196  
1197  	tipc_node_write_lock(n);
1198  
1199  	le = &n->links[b->identity];
1200  
1201  	/* Prepare to validate requesting node's signature and media address */
1202  	l = le->link;
1203  	link_up = l && tipc_link_is_up(l);
1204  	link_is_reset = l && tipc_link_is_reset(l);
1205  	addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
1206  	sign_match = (signature == n->signature);
1207  
1208  	/* These three flags give us eight permutations: */
1209  
1210  	if (sign_match && addr_match && link_up) {
1211  		/* All is fine. Ignore requests. */
1212  		/* Peer node is not a container/local namespace */
1213  		if (!n->peer_hash_mix)
1214  			n->peer_hash_mix = hash_mixes;
1215  	} else if (sign_match && addr_match && !link_up) {
1216  		/* Respond. The link will come up in due time */
1217  		*respond = true;
1218  	} else if (sign_match && !addr_match && link_up) {
1219  		/* Peer has changed i/f address without rebooting.
1220  		 * If so, the link will reset soon, and the next
1221  		 * discovery will be accepted. So we can ignore it.
1222  		 * It may also be a cloned or malicious peer having
1223  		 * chosen the same node address and signature as an
1224  		 * existing one.
1225  		 * Ignore requests until the link goes down, if ever.
1226  		 */
1227  		*dupl_addr = true;
1228  	} else if (sign_match && !addr_match && !link_up) {
1229  		/* Peer link has changed i/f address without rebooting.
1230  		 * It may also be a cloned or malicious peer; we can't
1231  		 * distinguish between the two.
1232  		 * The signature is correct, so we must accept.
1233  		 */
1234  		accept_addr = true;
1235  		*respond = true;
1236  		reset = true;
1237  	} else if (!sign_match && addr_match && link_up) {
1238  		/* Peer node rebooted. Two possibilities:
1239  		 *  - Delayed re-discovery; this link endpoint has already
1240  		 *    reset and re-established contact with the peer, before
1241  		 *    receiving a discovery message from that node.
1242  		 *    (The peer happened to receive one from this node first).
1243  		 *  - The peer came back so fast that our side has not
1244  		 *    discovered it yet. Probing from this side will soon
1245  		 *    reset the link, since there can be no working link
1246  		 *    endpoint at the peer end, and the link will re-establish.
1247  		 *  Accept the signature, since it comes from a known peer.
1248  		 */
1249  		n->signature = signature;
1250  	} else if (!sign_match && addr_match && !link_up) {
1251  		/*  The peer node has rebooted.
1252  		 *  Accept signature, since it is a known peer.
1253  		 */
1254  		n->signature = signature;
1255  		*respond = true;
1256  	} else if (!sign_match && !addr_match && link_up) {
1257  		/* Peer rebooted with new address, or a new/duplicate peer.
1258  		 * Ignore until the link goes down, if ever.
1259  		 */
1260  		*dupl_addr = true;
1261  	} else if (!sign_match && !addr_match && !link_up) {
1262  		/* Peer rebooted with new address, or it is a new peer.
1263  		 * Accept signature and address.
1264  		 */
1265  		n->signature = signature;
1266  		accept_addr = true;
1267  		*respond = true;
1268  		reset = true;
1269  	}
1270  
1271  	if (!accept_addr)
1272  		goto exit;
1273  
1274  	/* Now create new link if not already existing */
1275  	if (!l) {
1276  		if (n->link_cnt == 2)
1277  			goto exit;
1278  
1279  		if_name = strchr(b->name, ':') + 1;
1280  		get_random_bytes(&session, sizeof(u16));
1281  		if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1282  				      b->net_plane, b->mtu, b->priority,
1283  				      b->min_win, b->max_win, session,
1284  				      tipc_own_addr(net), addr, peer_id,
1285  				      n->capabilities,
1286  				      tipc_bc_sndlink(n->net), n->bc_entry.link,
1287  				      &le->inputq,
1288  				      &n->bc_entry.namedq, &l)) {
1289  			*respond = false;
1290  			goto exit;
1291  		}
1292  		trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
1293  		tipc_link_reset(l);
1294  		tipc_link_fsm_evt(l, LINK_RESET_EVT);
1295  		if (n->state == NODE_FAILINGOVER)
1296  			tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1297  		link_is_reset = tipc_link_is_reset(l);
1298  		le->link = l;
1299  		n->link_cnt++;
1300  		tipc_node_calculate_timer(n, l);
1301  		if (n->link_cnt == 1) {
1302  			intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
1303  			if (!mod_timer(&n->timer, intv))
1304  				tipc_node_get(n);
1305  		}
1306  	}
1307  	memcpy(&le->maddr, maddr, sizeof(*maddr));
1308  exit:
1309  	tipc_node_write_unlock(n);
1310  	if (reset && !link_is_reset)
1311  		tipc_node_link_down(n, b->identity, false);
1312  	tipc_node_put(n);
1313  }
1314  
1315  void tipc_node_delete_links(struct net *net, int bearer_id)
1316  {
1317  	struct tipc_net *tn = net_generic(net, tipc_net_id);
1318  	struct tipc_node *n;
1319  
1320  	rcu_read_lock();
1321  	list_for_each_entry_rcu(n, &tn->node_list, list) {
1322  		tipc_node_link_down(n, bearer_id, true);
1323  	}
1324  	rcu_read_unlock();
1325  }
1326  
1327  static void tipc_node_reset_links(struct tipc_node *n)
1328  {
1329  	int i;
1330  
1331  	pr_warn("Resetting all links to %x\n", n->addr);
1332  
1333  	trace_tipc_node_reset_links(n, true, " ");
1334  	for (i = 0; i < MAX_BEARERS; i++) {
1335  		tipc_node_link_down(n, i, false);
1336  	}
1337  }
1338  
1339  /* tipc_node_fsm_evt - node finite state machine
1340   * Determines when contact is allowed with peer node
1341   */
1342  static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1343  {
1344  	int state = n->state;
1345  
1346  	switch (state) {
1347  	case SELF_DOWN_PEER_DOWN:
1348  		switch (evt) {
1349  		case SELF_ESTABL_CONTACT_EVT:
1350  			state = SELF_UP_PEER_COMING;
1351  			break;
1352  		case PEER_ESTABL_CONTACT_EVT:
1353  			state = SELF_COMING_PEER_UP;
1354  			break;
1355  		case SELF_LOST_CONTACT_EVT:
1356  		case PEER_LOST_CONTACT_EVT:
1357  			break;
1358  		case NODE_SYNCH_END_EVT:
1359  		case NODE_SYNCH_BEGIN_EVT:
1360  		case NODE_FAILOVER_BEGIN_EVT:
1361  		case NODE_FAILOVER_END_EVT:
1362  		default:
1363  			goto illegal_evt;
1364  		}
1365  		break;
1366  	case SELF_UP_PEER_UP:
1367  		switch (evt) {
1368  		case SELF_LOST_CONTACT_EVT:
1369  			state = SELF_DOWN_PEER_LEAVING;
1370  			break;
1371  		case PEER_LOST_CONTACT_EVT:
1372  			state = SELF_LEAVING_PEER_DOWN;
1373  			break;
1374  		case NODE_SYNCH_BEGIN_EVT:
1375  			state = NODE_SYNCHING;
1376  			break;
1377  		case NODE_FAILOVER_BEGIN_EVT:
1378  			state = NODE_FAILINGOVER;
1379  			break;
1380  		case SELF_ESTABL_CONTACT_EVT:
1381  		case PEER_ESTABL_CONTACT_EVT:
1382  		case NODE_SYNCH_END_EVT:
1383  		case NODE_FAILOVER_END_EVT:
1384  			break;
1385  		default:
1386  			goto illegal_evt;
1387  		}
1388  		break;
1389  	case SELF_DOWN_PEER_LEAVING:
1390  		switch (evt) {
1391  		case PEER_LOST_CONTACT_EVT:
1392  			state = SELF_DOWN_PEER_DOWN;
1393  			break;
1394  		case SELF_ESTABL_CONTACT_EVT:
1395  		case PEER_ESTABL_CONTACT_EVT:
1396  		case SELF_LOST_CONTACT_EVT:
1397  			break;
1398  		case NODE_SYNCH_END_EVT:
1399  		case NODE_SYNCH_BEGIN_EVT:
1400  		case NODE_FAILOVER_BEGIN_EVT:
1401  		case NODE_FAILOVER_END_EVT:
1402  		default:
1403  			goto illegal_evt;
1404  		}
1405  		break;
1406  	case SELF_UP_PEER_COMING:
1407  		switch (evt) {
1408  		case PEER_ESTABL_CONTACT_EVT:
1409  			state = SELF_UP_PEER_UP;
1410  			break;
1411  		case SELF_LOST_CONTACT_EVT:
1412  			state = SELF_DOWN_PEER_DOWN;
1413  			break;
1414  		case SELF_ESTABL_CONTACT_EVT:
1415  		case PEER_LOST_CONTACT_EVT:
1416  		case NODE_SYNCH_END_EVT:
1417  		case NODE_FAILOVER_BEGIN_EVT:
1418  			break;
1419  		case NODE_SYNCH_BEGIN_EVT:
1420  		case NODE_FAILOVER_END_EVT:
1421  		default:
1422  			goto illegal_evt;
1423  		}
1424  		break;
1425  	case SELF_COMING_PEER_UP:
1426  		switch (evt) {
1427  		case SELF_ESTABL_CONTACT_EVT:
1428  			state = SELF_UP_PEER_UP;
1429  			break;
1430  		case PEER_LOST_CONTACT_EVT:
1431  			state = SELF_DOWN_PEER_DOWN;
1432  			break;
1433  		case SELF_LOST_CONTACT_EVT:
1434  		case PEER_ESTABL_CONTACT_EVT:
1435  			break;
1436  		case NODE_SYNCH_END_EVT:
1437  		case NODE_SYNCH_BEGIN_EVT:
1438  		case NODE_FAILOVER_BEGIN_EVT:
1439  		case NODE_FAILOVER_END_EVT:
1440  		default:
1441  			goto illegal_evt;
1442  		}
1443  		break;
1444  	case SELF_LEAVING_PEER_DOWN:
1445  		switch (evt) {
1446  		case SELF_LOST_CONTACT_EVT:
1447  			state = SELF_DOWN_PEER_DOWN;
1448  			break;
1449  		case SELF_ESTABL_CONTACT_EVT:
1450  		case PEER_ESTABL_CONTACT_EVT:
1451  		case PEER_LOST_CONTACT_EVT:
1452  			break;
1453  		case NODE_SYNCH_END_EVT:
1454  		case NODE_SYNCH_BEGIN_EVT:
1455  		case NODE_FAILOVER_BEGIN_EVT:
1456  		case NODE_FAILOVER_END_EVT:
1457  		default:
1458  			goto illegal_evt;
1459  		}
1460  		break;
1461  	case NODE_FAILINGOVER:
1462  		switch (evt) {
1463  		case SELF_LOST_CONTACT_EVT:
1464  			state = SELF_DOWN_PEER_LEAVING;
1465  			break;
1466  		case PEER_LOST_CONTACT_EVT:
1467  			state = SELF_LEAVING_PEER_DOWN;
1468  			break;
1469  		case NODE_FAILOVER_END_EVT:
1470  			state = SELF_UP_PEER_UP;
1471  			break;
1472  		case NODE_FAILOVER_BEGIN_EVT:
1473  		case SELF_ESTABL_CONTACT_EVT:
1474  		case PEER_ESTABL_CONTACT_EVT:
1475  			break;
1476  		case NODE_SYNCH_BEGIN_EVT:
1477  		case NODE_SYNCH_END_EVT:
1478  		default:
1479  			goto illegal_evt;
1480  		}
1481  		break;
1482  	case NODE_SYNCHING:
1483  		switch (evt) {
1484  		case SELF_LOST_CONTACT_EVT:
1485  			state = SELF_DOWN_PEER_LEAVING;
1486  			break;
1487  		case PEER_LOST_CONTACT_EVT:
1488  			state = SELF_LEAVING_PEER_DOWN;
1489  			break;
1490  		case NODE_SYNCH_END_EVT:
1491  			state = SELF_UP_PEER_UP;
1492  			break;
1493  		case NODE_FAILOVER_BEGIN_EVT:
1494  			state = NODE_FAILINGOVER;
1495  			break;
1496  		case NODE_SYNCH_BEGIN_EVT:
1497  		case SELF_ESTABL_CONTACT_EVT:
1498  		case PEER_ESTABL_CONTACT_EVT:
1499  			break;
1500  		case NODE_FAILOVER_END_EVT:
1501  		default:
1502  			goto illegal_evt;
1503  		}
1504  		break;
1505  	default:
1506  		pr_err("Unknown node fsm state %x\n", state);
1507  		break;
1508  	}
1509  	trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1510  	n->state = state;
1511  	return;
1512  
1513  illegal_evt:
1514  	pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1515  	trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1516  }
1517  
1518  static void node_lost_contact(struct tipc_node *n,
1519  			      struct sk_buff_head *inputq)
1520  {
1521  	struct tipc_sock_conn *conn, *safe;
1522  	struct tipc_link *l;
1523  	struct list_head *conns = &n->conn_sks;
1524  	struct sk_buff *skb;
1525  	uint i;
1526  
1527  	pr_debug("Lost contact with %x\n", n->addr);
1528  	n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1529  	trace_tipc_node_lost_contact(n, true, " ");
1530  
1531  	/* Clean up broadcast state */
1532  	tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1533  	skb_queue_purge(&n->bc_entry.namedq);
1534  
1535  	/* Abort any ongoing link failover */
1536  	for (i = 0; i < MAX_BEARERS; i++) {
1537  		l = n->links[i].link;
1538  		if (l)
1539  			tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1540  	}
1541  
1542  	/* Notify publications from this node */
1543  	n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1544  	n->peer_net = NULL;
1545  	n->peer_hash_mix = 0;
1546  	/* Notify sockets connected to node */
1547  	list_for_each_entry_safe(conn, safe, conns, list) {
1548  		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1549  				      SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1550  				      conn->peer_node, conn->port,
1551  				      conn->peer_port, TIPC_ERR_NO_NODE);
1552  		if (likely(skb))
1553  			skb_queue_tail(inputq, skb);
1554  		list_del(&conn->list);
1555  		kfree(conn);
1556  	}
1557  }
1558  
1559  /**
1560   * tipc_node_get_linkname - get the name of a link
1561   *
1562   * @net: the applicable net namespace
1563   * @bearer_id: id of the bearer
1564   * @addr: peer node address
1565   * @linkname: link name output buffer
1566   * @len: size of @linkname output buffer
1567   *
1568   * Return: 0 on success
1569   */
1570  int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1571  			   char *linkname, size_t len)
1572  {
1573  	struct tipc_link *link;
1574  	int err = -EINVAL;
1575  	struct tipc_node *node = tipc_node_find(net, addr);
1576  
1577  	if (!node)
1578  		return err;
1579  
1580  	if (bearer_id >= MAX_BEARERS)
1581  		goto exit;
1582  
1583  	tipc_node_read_lock(node);
1584  	link = node->links[bearer_id].link;
1585  	if (link) {
1586  		strncpy(linkname, tipc_link_name(link), len);
1587  		err = 0;
1588  	}
1589  	tipc_node_read_unlock(node);
1590  exit:
1591  	tipc_node_put(node);
1592  	return err;
1593  }
1594  
1595  /* Caller should hold node lock for the passed node */
1596  static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1597  {
1598  	void *hdr;
1599  	struct nlattr *attrs;
1600  
1601  	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1602  			  NLM_F_MULTI, TIPC_NL_NODE_GET);
1603  	if (!hdr)
1604  		return -EMSGSIZE;
1605  
1606  	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
1607  	if (!attrs)
1608  		goto msg_full;
1609  
1610  	if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1611  		goto attr_msg_full;
1612  	if (node_is_up(node))
1613  		if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1614  			goto attr_msg_full;
1615  
1616  	nla_nest_end(msg->skb, attrs);
1617  	genlmsg_end(msg->skb, hdr);
1618  
1619  	return 0;
1620  
1621  attr_msg_full:
1622  	nla_nest_cancel(msg->skb, attrs);
1623  msg_full:
1624  	genlmsg_cancel(msg->skb, hdr);
1625  
1626  	return -EMSGSIZE;
1627  }
1628  
1629  static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
1630  {
1631  	struct tipc_msg *hdr = buf_msg(skb_peek(list));
1632  	struct sk_buff_head inputq;
1633  
1634  	switch (msg_user(hdr)) {
1635  	case TIPC_LOW_IMPORTANCE:
1636  	case TIPC_MEDIUM_IMPORTANCE:
1637  	case TIPC_HIGH_IMPORTANCE:
1638  	case TIPC_CRITICAL_IMPORTANCE:
1639  		if (msg_connected(hdr) || msg_named(hdr) ||
1640  		    msg_direct(hdr)) {
1641  			tipc_loopback_trace(peer_net, list);
1642  			spin_lock_init(&list->lock);
1643  			tipc_sk_rcv(peer_net, list);
1644  			return;
1645  		}
1646  		if (msg_mcast(hdr)) {
1647  			tipc_loopback_trace(peer_net, list);
1648  			skb_queue_head_init(&inputq);
1649  			tipc_sk_mcast_rcv(peer_net, list, &inputq);
1650  			__skb_queue_purge(list);
1651  			skb_queue_purge(&inputq);
1652  			return;
1653  		}
1654  		return;
1655  	case MSG_FRAGMENTER:
1656  		if (tipc_msg_assemble(list)) {
1657  			tipc_loopback_trace(peer_net, list);
1658  			skb_queue_head_init(&inputq);
1659  			tipc_sk_mcast_rcv(peer_net, list, &inputq);
1660  			__skb_queue_purge(list);
1661  			skb_queue_purge(&inputq);
1662  		}
1663  		return;
1664  	case GROUP_PROTOCOL:
1665  	case CONN_MANAGER:
1666  		tipc_loopback_trace(peer_net, list);
1667  		spin_lock_init(&list->lock);
1668  		tipc_sk_rcv(peer_net, list);
1669  		return;
1670  	case LINK_PROTOCOL:
1671  	case NAME_DISTRIBUTOR:
1672  	case TUNNEL_PROTOCOL:
1673  	case BCAST_PROTOCOL:
1674  		return;
1675  	default:
1676  		return;
1677  	}
1678  }
1679  
1680  /**
1681   * tipc_node_xmit() - general link level function for message sending
1682   * @net: the applicable net namespace
1683   * @list: chain of buffers containing message
1684   * @dnode: address of destination node
1685   * @selector: a number used for deterministic link selection
1686   * Consumes the buffer chain.
1687   * Return: 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1688   */
1689  int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1690  		   u32 dnode, int selector)
1691  {
1692  	struct tipc_link_entry *le = NULL;
1693  	struct tipc_node *n;
1694  	struct sk_buff_head xmitq;
1695  	bool node_up = false;
1696  	struct net *peer_net;
1697  	int bearer_id;
1698  	int rc;
1699  
1700  	if (in_own_node(net, dnode)) {
1701  		tipc_loopback_trace(net, list);
1702  		spin_lock_init(&list->lock);
1703  		tipc_sk_rcv(net, list);
1704  		return 0;
1705  	}
1706  
1707  	n = tipc_node_find(net, dnode);
1708  	if (unlikely(!n)) {
1709  		__skb_queue_purge(list);
1710  		return -EHOSTUNREACH;
1711  	}
1712  
1713  	rcu_read_lock();
1714  	tipc_node_read_lock(n);
1715  	node_up = node_is_up(n);
1716  	peer_net = n->peer_net;
1717  	tipc_node_read_unlock(n);
1718  	if (node_up && peer_net && check_net(peer_net)) {
1719  		/* xmit inner linux container */
1720  		tipc_lxc_xmit(peer_net, list);
1721  		if (likely(skb_queue_empty(list))) {
1722  			rcu_read_unlock();
1723  			tipc_node_put(n);
1724  			return 0;
1725  		}
1726  	}
1727  	rcu_read_unlock();
1728  
1729  	tipc_node_read_lock(n);
1730  	bearer_id = n->active_links[selector & 1];
1731  	if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1732  		tipc_node_read_unlock(n);
1733  		tipc_node_put(n);
1734  		__skb_queue_purge(list);
1735  		return -EHOSTUNREACH;
1736  	}
1737  
1738  	__skb_queue_head_init(&xmitq);
1739  	le = &n->links[bearer_id];
1740  	spin_lock_bh(&le->lock);
1741  	rc = tipc_link_xmit(le->link, list, &xmitq);
1742  	spin_unlock_bh(&le->lock);
1743  	tipc_node_read_unlock(n);
1744  
1745  	if (unlikely(rc == -ENOBUFS))
1746  		tipc_node_link_down(n, bearer_id, false);
1747  	else
1748  		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1749  
1750  	tipc_node_put(n);
1751  
1752  	return rc;
1753  }
1754  
1755  /* tipc_node_xmit_skb(): send single buffer to destination
1756   * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE
1757   * messages, which will not be rejected
1758   * The only exception is datagram messages rerouted after secondary
1759   * lookup, which are rare and safe to dispose of anyway.
1760   */
1761  int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1762  		       u32 selector)
1763  {
1764  	struct sk_buff_head head;
1765  
1766  	__skb_queue_head_init(&head);
1767  	__skb_queue_tail(&head, skb);
1768  	tipc_node_xmit(net, &head, dnode, selector);
1769  	return 0;
1770  }
1771  
1772  /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1773   * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1774   */
1775  int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1776  {
1777  	struct sk_buff *skb;
1778  	u32 selector, dnode;
1779  
1780  	while ((skb = __skb_dequeue(xmitq))) {
1781  		selector = msg_origport(buf_msg(skb));
1782  		dnode = msg_destnode(buf_msg(skb));
1783  		tipc_node_xmit_skb(net, skb, dnode, selector);
1784  	}
1785  	return 0;
1786  }
1787  
1788  void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
1789  {
1790  	struct sk_buff_head xmitq;
1791  	struct sk_buff *txskb;
1792  	struct tipc_node *n;
1793  	u16 dummy;
1794  	u32 dst;
1795  
1796  	/* Use broadcast if all nodes support it */
1797  	if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
1798  		__skb_queue_head_init(&xmitq);
1799  		__skb_queue_tail(&xmitq, skb);
1800  		tipc_bcast_xmit(net, &xmitq, &dummy);
1801  		return;
1802  	}
1803  
1804  	/* Otherwise use legacy replicast method */
1805  	rcu_read_lock();
1806  	list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1807  		dst = n->addr;
1808  		if (in_own_node(net, dst))
1809  			continue;
1810  		if (!node_is_up(n))
1811  			continue;
1812  		txskb = pskb_copy(skb, GFP_ATOMIC);
1813  		if (!txskb)
1814  			break;
1815  		msg_set_destnode(buf_msg(txskb), dst);
1816  		tipc_node_xmit_skb(net, txskb, dst, 0);
1817  	}
1818  	rcu_read_unlock();
1819  	kfree_skb(skb);
1820  }
1821  
1822  static void tipc_node_mcast_rcv(struct tipc_node *n)
1823  {
1824  	struct tipc_bclink_entry *be = &n->bc_entry;
1825  
1826  	/* 'arrvq' is under inputq2's lock protection */
1827  	spin_lock_bh(&be->inputq2.lock);
1828  	spin_lock_bh(&be->inputq1.lock);
1829  	skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1830  	spin_unlock_bh(&be->inputq1.lock);
1831  	spin_unlock_bh(&be->inputq2.lock);
1832  	tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1833  }
1834  
1835  static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1836  				  int bearer_id, struct sk_buff_head *xmitq)
1837  {
1838  	struct tipc_link *ucl;
1839  	int rc;
1840  
1841  	rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
1842  
1843  	if (rc & TIPC_LINK_DOWN_EVT) {
1844  		tipc_node_reset_links(n);
1845  		return;
1846  	}
1847  
1848  	if (!(rc & TIPC_LINK_SND_STATE))
1849  		return;
1850  
1851  	/* If probe message, a STATE response will be sent anyway */
1852  	if (msg_probe(hdr))
1853  		return;
1854  
1855  	/* Produce a STATE message carrying broadcast NACK */
1856  	tipc_node_read_lock(n);
1857  	ucl = n->links[bearer_id].link;
1858  	if (ucl)
1859  		tipc_link_build_state_msg(ucl, xmitq);
1860  	tipc_node_read_unlock(n);
1861  }
1862  
1863  /**
1864   * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1865   * @net: the applicable net namespace
1866   * @skb: TIPC packet
1867   * @bearer_id: id of bearer message arrived on
1868   *
1869   * Invoked with no locks held.
1870   */
1871  static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1872  {
1873  	int rc;
1874  	struct sk_buff_head xmitq;
1875  	struct tipc_bclink_entry *be;
1876  	struct tipc_link_entry *le;
1877  	struct tipc_msg *hdr = buf_msg(skb);
1878  	int usr = msg_user(hdr);
1879  	u32 dnode = msg_destnode(hdr);
1880  	struct tipc_node *n;
1881  
1882  	__skb_queue_head_init(&xmitq);
1883  
1884  	/* If NACK for other node, let rcv link for that node peek into it */
1885  	if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1886  		n = tipc_node_find(net, dnode);
1887  	else
1888  		n = tipc_node_find(net, msg_prevnode(hdr));
1889  	if (!n) {
1890  		kfree_skb(skb);
1891  		return;
1892  	}
1893  	be = &n->bc_entry;
1894  	le = &n->links[bearer_id];
1895  
1896  	rc = tipc_bcast_rcv(net, be->link, skb);
1897  
1898  	/* Broadcast ACKs are sent on a unicast link */
1899  	if (rc & TIPC_LINK_SND_STATE) {
1900  		tipc_node_read_lock(n);
1901  		tipc_link_build_state_msg(le->link, &xmitq);
1902  		tipc_node_read_unlock(n);
1903  	}
1904  
1905  	if (!skb_queue_empty(&xmitq))
1906  		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1907  
1908  	if (!skb_queue_empty(&be->inputq1))
1909  		tipc_node_mcast_rcv(n);
1910  
1911  	/* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
1912  	if (!skb_queue_empty(&n->bc_entry.namedq))
1913  		tipc_named_rcv(net, &n->bc_entry.namedq,
1914  			       &n->bc_entry.named_rcv_nxt,
1915  			       &n->bc_entry.named_open);
1916  
1917  	/* If reassembly or retransmission failure => reset all links to peer */
1918  	if (rc & TIPC_LINK_DOWN_EVT)
1919  		tipc_node_reset_links(n);
1920  
1921  	tipc_node_put(n);
1922  }
1923  
1924  /**
1925   * tipc_node_check_state - check and if necessary update node state
1926   * @n: target tipc_node
1927   * @skb: TIPC packet
1928   * @bearer_id: identity of bearer delivering the packet
1929   * @xmitq: queue for messages to be xmited on
1930   * Return: true if state and msg are ok, otherwise false
1931   */
1932  static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1933  				  int bearer_id, struct sk_buff_head *xmitq)
1934  {
1935  	struct tipc_msg *hdr = buf_msg(skb);
1936  	int usr = msg_user(hdr);
1937  	int mtyp = msg_type(hdr);
1938  	u16 oseqno = msg_seqno(hdr);
1939  	u16 exp_pkts = msg_msgcnt(hdr);
1940  	u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1941  	int state = n->state;
1942  	struct tipc_link *l, *tnl, *pl = NULL;
1943  	struct tipc_media_addr *maddr;
1944  	int pb_id;
1945  
1946  	if (trace_tipc_node_check_state_enabled()) {
1947  		trace_tipc_skb_dump(skb, false, "skb for node state check");
1948  		trace_tipc_node_check_state(n, true, " ");
1949  	}
1950  	l = n->links[bearer_id].link;
1951  	if (!l)
1952  		return false;
1953  	rcv_nxt = tipc_link_rcv_nxt(l);
1954  
1955  
1956  	if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1957  		return true;
1958  
1959  	/* Find parallel link, if any */
1960  	for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1961  		if ((pb_id != bearer_id) && n->links[pb_id].link) {
1962  			pl = n->links[pb_id].link;
1963  			break;
1964  		}
1965  	}
1966  
1967  	if (!tipc_link_validate_msg(l, hdr)) {
1968  		trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
1969  		trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
1970  		return false;
1971  	}
1972  
1973  	/* Check and update node accesibility if applicable */
1974  	if (state == SELF_UP_PEER_COMING) {
1975  		if (!tipc_link_is_up(l))
1976  			return true;
1977  		if (!msg_peer_link_is_up(hdr))
1978  			return true;
1979  		tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1980  	}
1981  
1982  	if (state == SELF_DOWN_PEER_LEAVING) {
1983  		if (msg_peer_node_is_up(hdr))
1984  			return false;
1985  		tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1986  		return true;
1987  	}
1988  
1989  	if (state == SELF_LEAVING_PEER_DOWN)
1990  		return false;
1991  
1992  	/* Ignore duplicate packets */
1993  	if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1994  		return true;
1995  
1996  	/* Initiate or update failover mode if applicable */
1997  	if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1998  		syncpt = oseqno + exp_pkts - 1;
1999  		if (pl && !tipc_link_is_reset(pl)) {
2000  			__tipc_node_link_down(n, &pb_id, xmitq, &maddr);
2001  			trace_tipc_node_link_down(n, true,
2002  						  "node link down <- failover!");
2003  			tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
2004  							tipc_link_inputq(l));
2005  		}
2006  
2007  		/* If parallel link was already down, and this happened before
2008  		 * the tunnel link came up, node failover was never started.
2009  		 * Ensure that a FAILOVER_MSG is sent to get peer out of
2010  		 * NODE_FAILINGOVER state, also this node must accept
2011  		 * TUNNEL_MSGs from peer.
2012  		 */
2013  		if (n->state != NODE_FAILINGOVER)
2014  			tipc_node_link_failover(n, pl, l, xmitq);
2015  
2016  		/* If pkts arrive out of order, use lowest calculated syncpt */
2017  		if (less(syncpt, n->sync_point))
2018  			n->sync_point = syncpt;
2019  	}
2020  
2021  	/* Open parallel link when tunnel link reaches synch point */
2022  	if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
2023  		if (!more(rcv_nxt, n->sync_point))
2024  			return true;
2025  		tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
2026  		if (pl)
2027  			tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
2028  		return true;
2029  	}
2030  
2031  	/* No syncing needed if only one link */
2032  	if (!pl || !tipc_link_is_up(pl))
2033  		return true;
2034  
2035  	/* Initiate synch mode if applicable */
2036  	if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
2037  		if (n->capabilities & TIPC_TUNNEL_ENHANCED)
2038  			syncpt = msg_syncpt(hdr);
2039  		else
2040  			syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
2041  		if (!tipc_link_is_up(l))
2042  			__tipc_node_link_up(n, bearer_id, xmitq);
2043  		if (n->state == SELF_UP_PEER_UP) {
2044  			n->sync_point = syncpt;
2045  			tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
2046  			tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
2047  		}
2048  	}
2049  
2050  	/* Open tunnel link when parallel link reaches synch point */
2051  	if (n->state == NODE_SYNCHING) {
2052  		if (tipc_link_is_synching(l)) {
2053  			tnl = l;
2054  		} else {
2055  			tnl = pl;
2056  			pl = l;
2057  		}
2058  		inputq_len = skb_queue_len(tipc_link_inputq(pl));
2059  		dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
2060  		if (more(dlv_nxt, n->sync_point)) {
2061  			tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
2062  			tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
2063  			return true;
2064  		}
2065  		if (l == pl)
2066  			return true;
2067  		if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
2068  			return true;
2069  		if (usr == LINK_PROTOCOL)
2070  			return true;
2071  		return false;
2072  	}
2073  	return true;
2074  }
2075  
2076  /**
2077   * tipc_rcv - process TIPC packets/messages arriving from off-node
2078   * @net: the applicable net namespace
2079   * @skb: TIPC packet
2080   * @b: pointer to bearer message arrived on
2081   *
2082   * Invoked with no locks held. Bearer pointer must point to a valid bearer
2083   * structure (i.e. cannot be NULL), but bearer can be inactive.
2084   */
2085  void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
2086  {
2087  	struct sk_buff_head xmitq;
2088  	struct tipc_link_entry *le;
2089  	struct tipc_msg *hdr;
2090  	struct tipc_node *n;
2091  	int bearer_id = b->identity;
2092  	u32 self = tipc_own_addr(net);
2093  	int usr, rc = 0;
2094  	u16 bc_ack;
2095  #ifdef CONFIG_TIPC_CRYPTO
2096  	struct tipc_ehdr *ehdr;
2097  
2098  	/* Check if message must be decrypted first */
2099  	if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
2100  		goto rcv;
2101  
2102  	ehdr = (struct tipc_ehdr *)skb->data;
2103  	if (likely(ehdr->user != LINK_CONFIG)) {
2104  		n = tipc_node_find(net, ntohl(ehdr->addr));
2105  		if (unlikely(!n))
2106  			goto discard;
2107  	} else {
2108  		n = tipc_node_find_by_id(net, ehdr->id);
2109  	}
2110  	tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
2111  	if (!skb)
2112  		return;
2113  
2114  rcv:
2115  #endif
2116  	/* Ensure message is well-formed before touching the header */
2117  	if (unlikely(!tipc_msg_validate(&skb)))
2118  		goto discard;
2119  	__skb_queue_head_init(&xmitq);
2120  	hdr = buf_msg(skb);
2121  	usr = msg_user(hdr);
2122  	bc_ack = msg_bcast_ack(hdr);
2123  
2124  	/* Handle arrival of discovery or broadcast packet */
2125  	if (unlikely(msg_non_seq(hdr))) {
2126  		if (unlikely(usr == LINK_CONFIG))
2127  			return tipc_disc_rcv(net, skb, b);
2128  		else
2129  			return tipc_node_bc_rcv(net, skb, bearer_id);
2130  	}
2131  
2132  	/* Discard unicast link messages destined for another node */
2133  	if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
2134  		goto discard;
2135  
2136  	/* Locate neighboring node that sent packet */
2137  	n = tipc_node_find(net, msg_prevnode(hdr));
2138  	if (unlikely(!n))
2139  		goto discard;
2140  	le = &n->links[bearer_id];
2141  
2142  	/* Ensure broadcast reception is in synch with peer's send state */
2143  	if (unlikely(usr == LINK_PROTOCOL)) {
2144  		if (unlikely(skb_linearize(skb))) {
2145  			tipc_node_put(n);
2146  			goto discard;
2147  		}
2148  		hdr = buf_msg(skb);
2149  		tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2150  	} else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
2151  		tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
2152  	}
2153  
2154  	/* Receive packet directly if conditions permit */
2155  	tipc_node_read_lock(n);
2156  	if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
2157  		spin_lock_bh(&le->lock);
2158  		if (le->link) {
2159  			rc = tipc_link_rcv(le->link, skb, &xmitq);
2160  			skb = NULL;
2161  		}
2162  		spin_unlock_bh(&le->lock);
2163  	}
2164  	tipc_node_read_unlock(n);
2165  
2166  	/* Check/update node state before receiving */
2167  	if (unlikely(skb)) {
2168  		if (unlikely(skb_linearize(skb)))
2169  			goto out_node_put;
2170  		tipc_node_write_lock(n);
2171  		if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2172  			if (le->link) {
2173  				rc = tipc_link_rcv(le->link, skb, &xmitq);
2174  				skb = NULL;
2175  			}
2176  		}
2177  		tipc_node_write_unlock(n);
2178  	}
2179  
2180  	if (unlikely(rc & TIPC_LINK_UP_EVT))
2181  		tipc_node_link_up(n, bearer_id, &xmitq);
2182  
2183  	if (unlikely(rc & TIPC_LINK_DOWN_EVT))
2184  		tipc_node_link_down(n, bearer_id, false);
2185  
2186  	if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
2187  		tipc_named_rcv(net, &n->bc_entry.namedq,
2188  			       &n->bc_entry.named_rcv_nxt,
2189  			       &n->bc_entry.named_open);
2190  
2191  	if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
2192  		tipc_node_mcast_rcv(n);
2193  
2194  	if (!skb_queue_empty(&le->inputq))
2195  		tipc_sk_rcv(net, &le->inputq);
2196  
2197  	if (!skb_queue_empty(&xmitq))
2198  		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2199  
2200  out_node_put:
2201  	tipc_node_put(n);
2202  discard:
2203  	kfree_skb(skb);
2204  }
2205  
2206  void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
2207  			      int prop)
2208  {
2209  	struct tipc_net *tn = tipc_net(net);
2210  	int bearer_id = b->identity;
2211  	struct sk_buff_head xmitq;
2212  	struct tipc_link_entry *e;
2213  	struct tipc_node *n;
2214  
2215  	__skb_queue_head_init(&xmitq);
2216  
2217  	rcu_read_lock();
2218  
2219  	list_for_each_entry_rcu(n, &tn->node_list, list) {
2220  		tipc_node_write_lock(n);
2221  		e = &n->links[bearer_id];
2222  		if (e->link) {
2223  			if (prop == TIPC_NLA_PROP_TOL)
2224  				tipc_link_set_tolerance(e->link, b->tolerance,
2225  							&xmitq);
2226  			else if (prop == TIPC_NLA_PROP_MTU)
2227  				tipc_link_set_mtu(e->link, b->mtu);
2228  
2229  			/* Update MTU for node link entry */
2230  			e->mtu = tipc_link_mss(e->link);
2231  		}
2232  
2233  		tipc_node_write_unlock(n);
2234  		tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2235  	}
2236  
2237  	rcu_read_unlock();
2238  }
2239  
2240  int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
2241  {
2242  	struct net *net = sock_net(skb->sk);
2243  	struct tipc_net *tn = net_generic(net, tipc_net_id);
2244  	struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
2245  	struct tipc_node *peer, *temp_node;
2246  	u8 node_id[NODE_ID_LEN];
2247  	u64 *w0 = (u64 *)&node_id[0];
2248  	u64 *w1 = (u64 *)&node_id[8];
2249  	u32 addr;
2250  	int err;
2251  
2252  	/* We identify the peer by its net */
2253  	if (!info->attrs[TIPC_NLA_NET])
2254  		return -EINVAL;
2255  
2256  	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
2257  					  info->attrs[TIPC_NLA_NET],
2258  					  tipc_nl_net_policy, info->extack);
2259  	if (err)
2260  		return err;
2261  
2262  	/* attrs[TIPC_NLA_NET_NODEID] and attrs[TIPC_NLA_NET_ADDR] are
2263  	 * mutually exclusive cases
2264  	 */
2265  	if (attrs[TIPC_NLA_NET_ADDR]) {
2266  		addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
2267  		if (!addr)
2268  			return -EINVAL;
2269  	}
2270  
2271  	if (attrs[TIPC_NLA_NET_NODEID]) {
2272  		if (!attrs[TIPC_NLA_NET_NODEID_W1])
2273  			return -EINVAL;
2274  		*w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
2275  		*w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
2276  		addr = hash128to32(node_id);
2277  	}
2278  
2279  	if (in_own_node(net, addr))
2280  		return -ENOTSUPP;
2281  
2282  	spin_lock_bh(&tn->node_list_lock);
2283  	peer = tipc_node_find(net, addr);
2284  	if (!peer) {
2285  		spin_unlock_bh(&tn->node_list_lock);
2286  		return -ENXIO;
2287  	}
2288  
2289  	tipc_node_write_lock(peer);
2290  	if (peer->state != SELF_DOWN_PEER_DOWN &&
2291  	    peer->state != SELF_DOWN_PEER_LEAVING) {
2292  		tipc_node_write_unlock(peer);
2293  		err = -EBUSY;
2294  		goto err_out;
2295  	}
2296  
2297  	tipc_node_clear_links(peer);
2298  	tipc_node_write_unlock(peer);
2299  	tipc_node_delete(peer);
2300  
2301  	/* Calculate cluster capabilities */
2302  	tn->capabilities = TIPC_NODE_CAPABILITIES;
2303  	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
2304  		tn->capabilities &= temp_node->capabilities;
2305  	}
2306  	tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
2307  	err = 0;
2308  err_out:
2309  	tipc_node_put(peer);
2310  	spin_unlock_bh(&tn->node_list_lock);
2311  
2312  	return err;
2313  }
2314  
2315  int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
2316  {
2317  	int err;
2318  	struct net *net = sock_net(skb->sk);
2319  	struct tipc_net *tn = net_generic(net, tipc_net_id);
2320  	int done = cb->args[0];
2321  	int last_addr = cb->args[1];
2322  	struct tipc_node *node;
2323  	struct tipc_nl_msg msg;
2324  
2325  	if (done)
2326  		return 0;
2327  
2328  	msg.skb = skb;
2329  	msg.portid = NETLINK_CB(cb->skb).portid;
2330  	msg.seq = cb->nlh->nlmsg_seq;
2331  
2332  	rcu_read_lock();
2333  	if (last_addr) {
2334  		node = tipc_node_find(net, last_addr);
2335  		if (!node) {
2336  			rcu_read_unlock();
2337  			/* We never set seq or call nl_dump_check_consistent()
2338  			 * this means that setting prev_seq here will cause the
2339  			 * consistence check to fail in the netlink callback
2340  			 * handler. Resulting in the NLMSG_DONE message having
2341  			 * the NLM_F_DUMP_INTR flag set if the node state
2342  			 * changed while we released the lock.
2343  			 */
2344  			cb->prev_seq = 1;
2345  			return -EPIPE;
2346  		}
2347  		tipc_node_put(node);
2348  	}
2349  
2350  	list_for_each_entry_rcu(node, &tn->node_list, list) {
2351  		if (node->preliminary)
2352  			continue;
2353  		if (last_addr) {
2354  			if (node->addr == last_addr)
2355  				last_addr = 0;
2356  			else
2357  				continue;
2358  		}
2359  
2360  		tipc_node_read_lock(node);
2361  		err = __tipc_nl_add_node(&msg, node);
2362  		if (err) {
2363  			last_addr = node->addr;
2364  			tipc_node_read_unlock(node);
2365  			goto out;
2366  		}
2367  
2368  		tipc_node_read_unlock(node);
2369  	}
2370  	done = 1;
2371  out:
2372  	cb->args[0] = done;
2373  	cb->args[1] = last_addr;
2374  	rcu_read_unlock();
2375  
2376  	return skb->len;
2377  }
2378  
2379  /* tipc_node_find_by_name - locate owner node of link by link's name
2380   * @net: the applicable net namespace
2381   * @name: pointer to link name string
2382   * @bearer_id: pointer to index in 'node->links' array where the link was found.
2383   *
2384   * Returns pointer to node owning the link, or 0 if no matching link is found.
2385   */
2386  static struct tipc_node *tipc_node_find_by_name(struct net *net,
2387  						const char *link_name,
2388  						unsigned int *bearer_id)
2389  {
2390  	struct tipc_net *tn = net_generic(net, tipc_net_id);
2391  	struct tipc_link *l;
2392  	struct tipc_node *n;
2393  	struct tipc_node *found_node = NULL;
2394  	int i;
2395  
2396  	*bearer_id = 0;
2397  	rcu_read_lock();
2398  	list_for_each_entry_rcu(n, &tn->node_list, list) {
2399  		tipc_node_read_lock(n);
2400  		for (i = 0; i < MAX_BEARERS; i++) {
2401  			l = n->links[i].link;
2402  			if (l && !strcmp(tipc_link_name(l), link_name)) {
2403  				*bearer_id = i;
2404  				found_node = n;
2405  				break;
2406  			}
2407  		}
2408  		tipc_node_read_unlock(n);
2409  		if (found_node)
2410  			break;
2411  	}
2412  	rcu_read_unlock();
2413  
2414  	return found_node;
2415  }
2416  
2417  int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
2418  {
2419  	int err;
2420  	int res = 0;
2421  	int bearer_id;
2422  	char *name;
2423  	struct tipc_link *link;
2424  	struct tipc_node *node;
2425  	struct sk_buff_head xmitq;
2426  	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2427  	struct net *net = sock_net(skb->sk);
2428  
2429  	__skb_queue_head_init(&xmitq);
2430  
2431  	if (!info->attrs[TIPC_NLA_LINK])
2432  		return -EINVAL;
2433  
2434  	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2435  					  info->attrs[TIPC_NLA_LINK],
2436  					  tipc_nl_link_policy, info->extack);
2437  	if (err)
2438  		return err;
2439  
2440  	if (!attrs[TIPC_NLA_LINK_NAME])
2441  		return -EINVAL;
2442  
2443  	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2444  
2445  	if (strcmp(name, tipc_bclink_name) == 0)
2446  		return tipc_nl_bc_link_set(net, attrs);
2447  
2448  	node = tipc_node_find_by_name(net, name, &bearer_id);
2449  	if (!node)
2450  		return -EINVAL;
2451  
2452  	tipc_node_read_lock(node);
2453  
2454  	link = node->links[bearer_id].link;
2455  	if (!link) {
2456  		res = -EINVAL;
2457  		goto out;
2458  	}
2459  
2460  	if (attrs[TIPC_NLA_LINK_PROP]) {
2461  		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2462  
2463  		err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
2464  		if (err) {
2465  			res = err;
2466  			goto out;
2467  		}
2468  
2469  		if (props[TIPC_NLA_PROP_TOL]) {
2470  			u32 tol;
2471  
2472  			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2473  			tipc_link_set_tolerance(link, tol, &xmitq);
2474  		}
2475  		if (props[TIPC_NLA_PROP_PRIO]) {
2476  			u32 prio;
2477  
2478  			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2479  			tipc_link_set_prio(link, prio, &xmitq);
2480  		}
2481  		if (props[TIPC_NLA_PROP_WIN]) {
2482  			u32 max_win;
2483  
2484  			max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2485  			tipc_link_set_queue_limits(link,
2486  						   tipc_link_min_win(link),
2487  						   max_win);
2488  		}
2489  	}
2490  
2491  out:
2492  	tipc_node_read_unlock(node);
2493  	tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
2494  			 NULL);
2495  	return res;
2496  }
2497  
2498  int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2499  {
2500  	struct net *net = genl_info_net(info);
2501  	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2502  	struct tipc_nl_msg msg;
2503  	char *name;
2504  	int err;
2505  
2506  	msg.portid = info->snd_portid;
2507  	msg.seq = info->snd_seq;
2508  
2509  	if (!info->attrs[TIPC_NLA_LINK])
2510  		return -EINVAL;
2511  
2512  	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2513  					  info->attrs[TIPC_NLA_LINK],
2514  					  tipc_nl_link_policy, info->extack);
2515  	if (err)
2516  		return err;
2517  
2518  	if (!attrs[TIPC_NLA_LINK_NAME])
2519  		return -EINVAL;
2520  
2521  	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2522  
2523  	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2524  	if (!msg.skb)
2525  		return -ENOMEM;
2526  
2527  	if (strcmp(name, tipc_bclink_name) == 0) {
2528  		err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
2529  		if (err)
2530  			goto err_free;
2531  	} else {
2532  		int bearer_id;
2533  		struct tipc_node *node;
2534  		struct tipc_link *link;
2535  
2536  		node = tipc_node_find_by_name(net, name, &bearer_id);
2537  		if (!node) {
2538  			err = -EINVAL;
2539  			goto err_free;
2540  		}
2541  
2542  		tipc_node_read_lock(node);
2543  		link = node->links[bearer_id].link;
2544  		if (!link) {
2545  			tipc_node_read_unlock(node);
2546  			err = -EINVAL;
2547  			goto err_free;
2548  		}
2549  
2550  		err = __tipc_nl_add_link(net, &msg, link, 0);
2551  		tipc_node_read_unlock(node);
2552  		if (err)
2553  			goto err_free;
2554  	}
2555  
2556  	return genlmsg_reply(msg.skb, info);
2557  
2558  err_free:
2559  	nlmsg_free(msg.skb);
2560  	return err;
2561  }
2562  
2563  int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2564  {
2565  	int err;
2566  	char *link_name;
2567  	unsigned int bearer_id;
2568  	struct tipc_link *link;
2569  	struct tipc_node *node;
2570  	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2571  	struct net *net = sock_net(skb->sk);
2572  	struct tipc_net *tn = tipc_net(net);
2573  	struct tipc_link_entry *le;
2574  
2575  	if (!info->attrs[TIPC_NLA_LINK])
2576  		return -EINVAL;
2577  
2578  	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2579  					  info->attrs[TIPC_NLA_LINK],
2580  					  tipc_nl_link_policy, info->extack);
2581  	if (err)
2582  		return err;
2583  
2584  	if (!attrs[TIPC_NLA_LINK_NAME])
2585  		return -EINVAL;
2586  
2587  	link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2588  
2589  	err = -EINVAL;
2590  	if (!strcmp(link_name, tipc_bclink_name)) {
2591  		err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
2592  		if (err)
2593  			return err;
2594  		return 0;
2595  	} else if (strstr(link_name, tipc_bclink_name)) {
2596  		rcu_read_lock();
2597  		list_for_each_entry_rcu(node, &tn->node_list, list) {
2598  			tipc_node_read_lock(node);
2599  			link = node->bc_entry.link;
2600  			if (link && !strcmp(link_name, tipc_link_name(link))) {
2601  				err = tipc_bclink_reset_stats(net, link);
2602  				tipc_node_read_unlock(node);
2603  				break;
2604  			}
2605  			tipc_node_read_unlock(node);
2606  		}
2607  		rcu_read_unlock();
2608  		return err;
2609  	}
2610  
2611  	node = tipc_node_find_by_name(net, link_name, &bearer_id);
2612  	if (!node)
2613  		return -EINVAL;
2614  
2615  	le = &node->links[bearer_id];
2616  	tipc_node_read_lock(node);
2617  	spin_lock_bh(&le->lock);
2618  	link = node->links[bearer_id].link;
2619  	if (!link) {
2620  		spin_unlock_bh(&le->lock);
2621  		tipc_node_read_unlock(node);
2622  		return -EINVAL;
2623  	}
2624  	tipc_link_reset_stats(link);
2625  	spin_unlock_bh(&le->lock);
2626  	tipc_node_read_unlock(node);
2627  	return 0;
2628  }
2629  
2630  /* Caller should hold node lock  */
2631  static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2632  				    struct tipc_node *node, u32 *prev_link,
2633  				    bool bc_link)
2634  {
2635  	u32 i;
2636  	int err;
2637  
2638  	for (i = *prev_link; i < MAX_BEARERS; i++) {
2639  		*prev_link = i;
2640  
2641  		if (!node->links[i].link)
2642  			continue;
2643  
2644  		err = __tipc_nl_add_link(net, msg,
2645  					 node->links[i].link, NLM_F_MULTI);
2646  		if (err)
2647  			return err;
2648  	}
2649  
2650  	if (bc_link) {
2651  		*prev_link = i;
2652  		err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
2653  		if (err)
2654  			return err;
2655  	}
2656  
2657  	*prev_link = 0;
2658  
2659  	return 0;
2660  }
2661  
2662  int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2663  {
2664  	struct net *net = sock_net(skb->sk);
2665  	struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
2666  	struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
2667  	struct tipc_net *tn = net_generic(net, tipc_net_id);
2668  	struct tipc_node *node;
2669  	struct tipc_nl_msg msg;
2670  	u32 prev_node = cb->args[0];
2671  	u32 prev_link = cb->args[1];
2672  	int done = cb->args[2];
2673  	bool bc_link = cb->args[3];
2674  	int err;
2675  
2676  	if (done)
2677  		return 0;
2678  
2679  	if (!prev_node) {
2680  		/* Check if broadcast-receiver links dumping is needed */
2681  		if (attrs && attrs[TIPC_NLA_LINK]) {
2682  			err = nla_parse_nested_deprecated(link,
2683  							  TIPC_NLA_LINK_MAX,
2684  							  attrs[TIPC_NLA_LINK],
2685  							  tipc_nl_link_policy,
2686  							  NULL);
2687  			if (unlikely(err))
2688  				return err;
2689  			if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
2690  				return -EINVAL;
2691  			bc_link = true;
2692  		}
2693  	}
2694  
2695  	msg.skb = skb;
2696  	msg.portid = NETLINK_CB(cb->skb).portid;
2697  	msg.seq = cb->nlh->nlmsg_seq;
2698  
2699  	rcu_read_lock();
2700  	if (prev_node) {
2701  		node = tipc_node_find(net, prev_node);
2702  		if (!node) {
2703  			/* We never set seq or call nl_dump_check_consistent()
2704  			 * this means that setting prev_seq here will cause the
2705  			 * consistence check to fail in the netlink callback
2706  			 * handler. Resulting in the last NLMSG_DONE message
2707  			 * having the NLM_F_DUMP_INTR flag set.
2708  			 */
2709  			cb->prev_seq = 1;
2710  			goto out;
2711  		}
2712  		tipc_node_put(node);
2713  
2714  		list_for_each_entry_continue_rcu(node, &tn->node_list,
2715  						 list) {
2716  			tipc_node_read_lock(node);
2717  			err = __tipc_nl_add_node_links(net, &msg, node,
2718  						       &prev_link, bc_link);
2719  			tipc_node_read_unlock(node);
2720  			if (err)
2721  				goto out;
2722  
2723  			prev_node = node->addr;
2724  		}
2725  	} else {
2726  		err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
2727  		if (err)
2728  			goto out;
2729  
2730  		list_for_each_entry_rcu(node, &tn->node_list, list) {
2731  			tipc_node_read_lock(node);
2732  			err = __tipc_nl_add_node_links(net, &msg, node,
2733  						       &prev_link, bc_link);
2734  			tipc_node_read_unlock(node);
2735  			if (err)
2736  				goto out;
2737  
2738  			prev_node = node->addr;
2739  		}
2740  	}
2741  	done = 1;
2742  out:
2743  	rcu_read_unlock();
2744  
2745  	cb->args[0] = prev_node;
2746  	cb->args[1] = prev_link;
2747  	cb->args[2] = done;
2748  	cb->args[3] = bc_link;
2749  
2750  	return skb->len;
2751  }
2752  
2753  int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2754  {
2755  	struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2756  	struct net *net = sock_net(skb->sk);
2757  	int err;
2758  
2759  	if (!info->attrs[TIPC_NLA_MON])
2760  		return -EINVAL;
2761  
2762  	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
2763  					  info->attrs[TIPC_NLA_MON],
2764  					  tipc_nl_monitor_policy,
2765  					  info->extack);
2766  	if (err)
2767  		return err;
2768  
2769  	if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2770  		u32 val;
2771  
2772  		val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2773  		err = tipc_nl_monitor_set_threshold(net, val);
2774  		if (err)
2775  			return err;
2776  	}
2777  
2778  	return 0;
2779  }
2780  
2781  static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2782  {
2783  	struct nlattr *attrs;
2784  	void *hdr;
2785  	u32 val;
2786  
2787  	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2788  			  0, TIPC_NL_MON_GET);
2789  	if (!hdr)
2790  		return -EMSGSIZE;
2791  
2792  	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
2793  	if (!attrs)
2794  		goto msg_full;
2795  
2796  	val = tipc_nl_monitor_get_threshold(net);
2797  
2798  	if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2799  		goto attr_msg_full;
2800  
2801  	nla_nest_end(msg->skb, attrs);
2802  	genlmsg_end(msg->skb, hdr);
2803  
2804  	return 0;
2805  
2806  attr_msg_full:
2807  	nla_nest_cancel(msg->skb, attrs);
2808  msg_full:
2809  	genlmsg_cancel(msg->skb, hdr);
2810  
2811  	return -EMSGSIZE;
2812  }
2813  
2814  int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2815  {
2816  	struct net *net = sock_net(skb->sk);
2817  	struct tipc_nl_msg msg;
2818  	int err;
2819  
2820  	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2821  	if (!msg.skb)
2822  		return -ENOMEM;
2823  	msg.portid = info->snd_portid;
2824  	msg.seq = info->snd_seq;
2825  
2826  	err = __tipc_nl_add_monitor_prop(net, &msg);
2827  	if (err) {
2828  		nlmsg_free(msg.skb);
2829  		return err;
2830  	}
2831  
2832  	return genlmsg_reply(msg.skb, info);
2833  }
2834  
2835  int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2836  {
2837  	struct net *net = sock_net(skb->sk);
2838  	u32 prev_bearer = cb->args[0];
2839  	struct tipc_nl_msg msg;
2840  	int bearer_id;
2841  	int err;
2842  
2843  	if (prev_bearer == MAX_BEARERS)
2844  		return 0;
2845  
2846  	msg.skb = skb;
2847  	msg.portid = NETLINK_CB(cb->skb).portid;
2848  	msg.seq = cb->nlh->nlmsg_seq;
2849  
2850  	rtnl_lock();
2851  	for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2852  		err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2853  		if (err)
2854  			break;
2855  	}
2856  	rtnl_unlock();
2857  	cb->args[0] = bearer_id;
2858  
2859  	return skb->len;
2860  }
2861  
2862  int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2863  				   struct netlink_callback *cb)
2864  {
2865  	struct net *net = sock_net(skb->sk);
2866  	u32 prev_node = cb->args[1];
2867  	u32 bearer_id = cb->args[2];
2868  	int done = cb->args[0];
2869  	struct tipc_nl_msg msg;
2870  	int err;
2871  
2872  	if (!prev_node) {
2873  		struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
2874  		struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2875  
2876  		if (!attrs[TIPC_NLA_MON])
2877  			return -EINVAL;
2878  
2879  		err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
2880  						  attrs[TIPC_NLA_MON],
2881  						  tipc_nl_monitor_policy,
2882  						  NULL);
2883  		if (err)
2884  			return err;
2885  
2886  		if (!mon[TIPC_NLA_MON_REF])
2887  			return -EINVAL;
2888  
2889  		bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2890  
2891  		if (bearer_id >= MAX_BEARERS)
2892  			return -EINVAL;
2893  	}
2894  
2895  	if (done)
2896  		return 0;
2897  
2898  	msg.skb = skb;
2899  	msg.portid = NETLINK_CB(cb->skb).portid;
2900  	msg.seq = cb->nlh->nlmsg_seq;
2901  
2902  	rtnl_lock();
2903  	err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2904  	if (!err)
2905  		done = 1;
2906  
2907  	rtnl_unlock();
2908  	cb->args[0] = done;
2909  	cb->args[1] = prev_node;
2910  	cb->args[2] = bearer_id;
2911  
2912  	return skb->len;
2913  }
2914  
2915  #ifdef CONFIG_TIPC_CRYPTO
2916  static int tipc_nl_retrieve_key(struct nlattr **attrs,
2917  				struct tipc_aead_key **pkey)
2918  {
2919  	struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
2920  	struct tipc_aead_key *key;
2921  
2922  	if (!attr)
2923  		return -ENODATA;
2924  
2925  	if (nla_len(attr) < sizeof(*key))
2926  		return -EINVAL;
2927  	key = (struct tipc_aead_key *)nla_data(attr);
2928  	if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
2929  	    nla_len(attr) < tipc_aead_key_size(key))
2930  		return -EINVAL;
2931  
2932  	*pkey = key;
2933  	return 0;
2934  }
2935  
2936  static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
2937  {
2938  	struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
2939  
2940  	if (!attr)
2941  		return -ENODATA;
2942  
2943  	if (nla_len(attr) < TIPC_NODEID_LEN)
2944  		return -EINVAL;
2945  
2946  	*node_id = (u8 *)nla_data(attr);
2947  	return 0;
2948  }
2949  
2950  static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
2951  {
2952  	struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
2953  
2954  	if (!attr)
2955  		return -ENODATA;
2956  
2957  	*intv = nla_get_u32(attr);
2958  	return 0;
2959  }
2960  
2961  static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2962  {
2963  	struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
2964  	struct net *net = sock_net(skb->sk);
2965  	struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
2966  	struct tipc_node *n = NULL;
2967  	struct tipc_aead_key *ukey;
2968  	bool rekeying = true, master_key = false;
2969  	u8 *id, *own_id, mode;
2970  	u32 intv = 0;
2971  	int rc = 0;
2972  
2973  	if (!info->attrs[TIPC_NLA_NODE])
2974  		return -EINVAL;
2975  
2976  	rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
2977  			      info->attrs[TIPC_NLA_NODE],
2978  			      tipc_nl_node_policy, info->extack);
2979  	if (rc)
2980  		return rc;
2981  
2982  	own_id = tipc_own_id(net);
2983  	if (!own_id) {
2984  		GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
2985  		return -EPERM;
2986  	}
2987  
2988  	rc = tipc_nl_retrieve_rekeying(attrs, &intv);
2989  	if (rc == -ENODATA)
2990  		rekeying = false;
2991  
2992  	rc = tipc_nl_retrieve_key(attrs, &ukey);
2993  	if (rc == -ENODATA && rekeying)
2994  		goto rekeying;
2995  	else if (rc)
2996  		return rc;
2997  
2998  	rc = tipc_aead_key_validate(ukey, info);
2999  	if (rc)
3000  		return rc;
3001  
3002  	rc = tipc_nl_retrieve_nodeid(attrs, &id);
3003  	switch (rc) {
3004  	case -ENODATA:
3005  		mode = CLUSTER_KEY;
3006  		master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
3007  		break;
3008  	case 0:
3009  		mode = PER_NODE_KEY;
3010  		if (memcmp(id, own_id, NODE_ID_LEN)) {
3011  			n = tipc_node_find_by_id(net, id) ?:
3012  				tipc_node_create(net, 0, id, 0xffffu, 0, true);
3013  			if (unlikely(!n))
3014  				return -ENOMEM;
3015  			c = n->crypto_rx;
3016  		}
3017  		break;
3018  	default:
3019  		return rc;
3020  	}
3021  
3022  	/* Initiate the TX/RX key */
3023  	rc = tipc_crypto_key_init(c, ukey, mode, master_key);
3024  	if (n)
3025  		tipc_node_put(n);
3026  
3027  	if (unlikely(rc < 0)) {
3028  		GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
3029  		return rc;
3030  	} else if (c == tx) {
3031  		/* Distribute TX key but not master one */
3032  		if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
3033  			GENL_SET_ERR_MSG(info, "failed to replicate new key");
3034  rekeying:
3035  		/* Schedule TX rekeying if needed */
3036  		tipc_crypto_rekeying_sched(tx, rekeying, intv);
3037  	}
3038  
3039  	return 0;
3040  }
3041  
3042  int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
3043  {
3044  	int err;
3045  
3046  	rtnl_lock();
3047  	err = __tipc_nl_node_set_key(skb, info);
3048  	rtnl_unlock();
3049  
3050  	return err;
3051  }
3052  
3053  static int __tipc_nl_node_flush_key(struct sk_buff *skb,
3054  				    struct genl_info *info)
3055  {
3056  	struct net *net = sock_net(skb->sk);
3057  	struct tipc_net *tn = tipc_net(net);
3058  	struct tipc_node *n;
3059  
3060  	tipc_crypto_key_flush(tn->crypto_tx);
3061  	rcu_read_lock();
3062  	list_for_each_entry_rcu(n, &tn->node_list, list)
3063  		tipc_crypto_key_flush(n->crypto_rx);
3064  	rcu_read_unlock();
3065  
3066  	return 0;
3067  }
3068  
3069  int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
3070  {
3071  	int err;
3072  
3073  	rtnl_lock();
3074  	err = __tipc_nl_node_flush_key(skb, info);
3075  	rtnl_unlock();
3076  
3077  	return err;
3078  }
3079  #endif
3080  
3081  /**
3082   * tipc_node_dump - dump TIPC node data
3083   * @n: tipc node to be dumped
3084   * @more: dump more?
3085   *        - false: dump only tipc node data
3086   *        - true: dump node link data as well
3087   * @buf: returned buffer of dump data in format
3088   */
3089  int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
3090  {
3091  	int i = 0;
3092  	size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
3093  
3094  	if (!n) {
3095  		i += scnprintf(buf, sz, "node data: (null)\n");
3096  		return i;
3097  	}
3098  
3099  	i += scnprintf(buf, sz, "node data: %x", n->addr);
3100  	i += scnprintf(buf + i, sz - i, " %x", n->state);
3101  	i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
3102  	i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
3103  	i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
3104  	i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
3105  	i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
3106  	i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
3107  	i += scnprintf(buf + i, sz - i, " %u", n->working_links);
3108  	i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
3109  	i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
3110  
3111  	if (!more)
3112  		return i;
3113  
3114  	i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
3115  	i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
3116  	i += scnprintf(buf + i, sz - i, " media: ");
3117  	i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
3118  	i += scnprintf(buf + i, sz - i, "\n");
3119  	i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
3120  	i += scnprintf(buf + i, sz - i, " inputq: ");
3121  	i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
3122  
3123  	i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
3124  	i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
3125  	i += scnprintf(buf + i, sz - i, " media: ");
3126  	i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
3127  	i += scnprintf(buf + i, sz - i, "\n");
3128  	i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
3129  	i += scnprintf(buf + i, sz - i, " inputq: ");
3130  	i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
3131  
3132  	i += scnprintf(buf + i, sz - i, "bclink:\n ");
3133  	i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
3134  
3135  	return i;
3136  }
3137  
3138  void tipc_node_pre_cleanup_net(struct net *exit_net)
3139  {
3140  	struct tipc_node *n;
3141  	struct tipc_net *tn;
3142  	struct net *tmp;
3143  
3144  	rcu_read_lock();
3145  	for_each_net_rcu(tmp) {
3146  		if (tmp == exit_net)
3147  			continue;
3148  		tn = tipc_net(tmp);
3149  		if (!tn)
3150  			continue;
3151  		spin_lock_bh(&tn->node_list_lock);
3152  		list_for_each_entry_rcu(n, &tn->node_list, list) {
3153  			if (!n->peer_net)
3154  				continue;
3155  			if (n->peer_net != exit_net)
3156  				continue;
3157  			tipc_node_write_lock(n);
3158  			n->peer_net = NULL;
3159  			n->peer_hash_mix = 0;
3160  			tipc_node_write_unlock_fast(n);
3161  			break;
3162  		}
3163  		spin_unlock_bh(&tn->node_list_lock);
3164  	}
3165  	rcu_read_unlock();
3166  }
3167