xref: /linux/net/tipc/node.c (revision 5e60f363b38fd40e4d8838b5d6f4d4ecee92c777)
1  /*
2   * net/tipc/node.c: TIPC node management routines
3   *
4   * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
5   * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
6   * All rights reserved.
7   *
8   * Redistribution and use in source and binary forms, with or without
9   * modification, are permitted provided that the following conditions are met:
10   *
11   * 1. Redistributions of source code must retain the above copyright
12   *    notice, this list of conditions and the following disclaimer.
13   * 2. Redistributions in binary form must reproduce the above copyright
14   *    notice, this list of conditions and the following disclaimer in the
15   *    documentation and/or other materials provided with the distribution.
16   * 3. Neither the names of the copyright holders nor the names of its
17   *    contributors may be used to endorse or promote products derived from
18   *    this software without specific prior written permission.
19   *
20   * Alternatively, this software may be distributed under the terms of the
21   * GNU General Public License ("GPL") version 2 as published by the Free
22   * Software Foundation.
23   *
24   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25   * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26   * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28   * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29   * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30   * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31   * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32   * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34   * POSSIBILITY OF SUCH DAMAGE.
35   */
36  
37  #include "core.h"
38  #include "link.h"
39  #include "node.h"
40  #include "name_distr.h"
41  #include "socket.h"
42  #include "bcast.h"
43  #include "monitor.h"
44  #include "discover.h"
45  #include "netlink.h"
46  #include "trace.h"
47  #include "crypto.h"
48  
49  #define INVALID_NODE_SIG	0x10000
50  #define NODE_CLEANUP_AFTER	300000
51  
52  /* Flags used to take different actions according to flag type
53   * TIPC_NOTIFY_NODE_DOWN: notify node is down
54   * TIPC_NOTIFY_NODE_UP: notify node is up
55   * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
56   */
57  enum {
58  	TIPC_NOTIFY_NODE_DOWN		= (1 << 3),
59  	TIPC_NOTIFY_NODE_UP		= (1 << 4),
60  	TIPC_NOTIFY_LINK_UP		= (1 << 6),
61  	TIPC_NOTIFY_LINK_DOWN		= (1 << 7)
62  };
63  
64  struct tipc_link_entry {
65  	struct tipc_link *link;
66  	spinlock_t lock; /* per link */
67  	u32 mtu;
68  	struct sk_buff_head inputq;
69  	struct tipc_media_addr maddr;
70  };
71  
72  struct tipc_bclink_entry {
73  	struct tipc_link *link;
74  	struct sk_buff_head inputq1;
75  	struct sk_buff_head arrvq;
76  	struct sk_buff_head inputq2;
77  	struct sk_buff_head namedq;
78  	u16 named_rcv_nxt;
79  	bool named_open;
80  };
81  
82  /**
83   * struct tipc_node - TIPC node structure
84   * @addr: network address of node
85   * @kref: reference counter to node object
86   * @lock: rwlock governing access to structure
87   * @net: the applicable net namespace
88   * @hash: links to adjacent nodes in unsorted hash chain
89   * @inputq: pointer to input queue containing messages for msg event
90   * @namedq: pointer to name table input queue with name table messages
91   * @active_links: bearer ids of active links, used as index into links[] array
92   * @links: array containing references to all links to node
93   * @bc_entry: broadcast link entry
94   * @action_flags: bit mask of different types of node actions
95   * @state: connectivity state vs peer node
96   * @preliminary: a preliminary node or not
97   * @failover_sent: failover sent or not
98   * @sync_point: sequence number where synch/failover is finished
99   * @list: links to adjacent nodes in sorted list of cluster's nodes
100   * @working_links: number of working links to node (both active and standby)
101   * @link_cnt: number of links to node
102   * @capabilities: bitmap, indicating peer node's functional capabilities
103   * @signature: node instance identifier
104   * @link_id: local and remote bearer ids of changing link, if any
105   * @peer_id: 128-bit ID of peer
106   * @peer_id_string: ID string of peer
107   * @publ_list: list of publications
108   * @conn_sks: list of connections (FIXME)
109   * @timer: node's keepalive timer
110   * @keepalive_intv: keepalive interval in milliseconds
111   * @rcu: rcu struct for tipc_node
112   * @delete_at: indicates the time for deleting a down node
113   * @peer_net: peer's net namespace
114   * @peer_hash_mix: hash for this peer (FIXME)
115   * @crypto_rx: RX crypto handler
116   */
117  struct tipc_node {
118  	u32 addr;
119  	struct kref kref;
120  	rwlock_t lock;
121  	struct net *net;
122  	struct hlist_node hash;
123  	int active_links[2];
124  	struct tipc_link_entry links[MAX_BEARERS];
125  	struct tipc_bclink_entry bc_entry;
126  	int action_flags;
127  	struct list_head list;
128  	int state;
129  	bool preliminary;
130  	bool failover_sent;
131  	u16 sync_point;
132  	int link_cnt;
133  	u16 working_links;
134  	u16 capabilities;
135  	u32 signature;
136  	u32 link_id;
137  	u8 peer_id[16];
138  	char peer_id_string[NODE_ID_STR_LEN];
139  	struct list_head publ_list;
140  	struct list_head conn_sks;
141  	unsigned long keepalive_intv;
142  	struct timer_list timer;
143  	struct rcu_head rcu;
144  	unsigned long delete_at;
145  	struct net *peer_net;
146  	u32 peer_hash_mix;
147  #ifdef CONFIG_TIPC_CRYPTO
148  	struct tipc_crypto *crypto_rx;
149  #endif
150  };
151  
152  /* Node FSM states and events:
153   */
154  enum {
155  	SELF_DOWN_PEER_DOWN    = 0xdd,
156  	SELF_UP_PEER_UP        = 0xaa,
157  	SELF_DOWN_PEER_LEAVING = 0xd1,
158  	SELF_UP_PEER_COMING    = 0xac,
159  	SELF_COMING_PEER_UP    = 0xca,
160  	SELF_LEAVING_PEER_DOWN = 0x1d,
161  	NODE_FAILINGOVER       = 0xf0,
162  	NODE_SYNCHING          = 0xcc
163  };
164  
165  enum {
166  	SELF_ESTABL_CONTACT_EVT = 0xece,
167  	SELF_LOST_CONTACT_EVT   = 0x1ce,
168  	PEER_ESTABL_CONTACT_EVT = 0x9ece,
169  	PEER_LOST_CONTACT_EVT   = 0x91ce,
170  	NODE_FAILOVER_BEGIN_EVT = 0xfbe,
171  	NODE_FAILOVER_END_EVT   = 0xfee,
172  	NODE_SYNCH_BEGIN_EVT    = 0xcbe,
173  	NODE_SYNCH_END_EVT      = 0xcee
174  };
175  
176  static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
177  				  struct sk_buff_head *xmitq,
178  				  struct tipc_media_addr **maddr);
179  static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
180  				bool delete);
181  static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
182  static void tipc_node_delete(struct tipc_node *node);
183  static void tipc_node_timeout(struct timer_list *t);
184  static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
185  static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
186  static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
187  static bool node_is_up(struct tipc_node *n);
188  static void tipc_node_delete_from_list(struct tipc_node *node);
189  
190  struct tipc_sock_conn {
191  	u32 port;
192  	u32 peer_port;
193  	u32 peer_node;
194  	struct list_head list;
195  };
196  
197  static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
198  {
199  	int bearer_id = n->active_links[sel & 1];
200  
201  	if (unlikely(bearer_id == INVALID_BEARER_ID))
202  		return NULL;
203  
204  	return n->links[bearer_id].link;
205  }
206  
207  int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
208  {
209  	struct tipc_node *n;
210  	int bearer_id;
211  	unsigned int mtu = MAX_MSG_SIZE;
212  
213  	n = tipc_node_find(net, addr);
214  	if (unlikely(!n))
215  		return mtu;
216  
217  	/* Allow MAX_MSG_SIZE when building connection oriented message
218  	 * if they are in the same core network
219  	 */
220  	if (n->peer_net && connected) {
221  		tipc_node_put(n);
222  		return mtu;
223  	}
224  
225  	bearer_id = n->active_links[sel & 1];
226  	if (likely(bearer_id != INVALID_BEARER_ID))
227  		mtu = n->links[bearer_id].mtu;
228  	tipc_node_put(n);
229  	return mtu;
230  }
231  
232  bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
233  {
234  	u8 *own_id = tipc_own_id(net);
235  	struct tipc_node *n;
236  
237  	if (!own_id)
238  		return true;
239  
240  	if (addr == tipc_own_addr(net)) {
241  		memcpy(id, own_id, TIPC_NODEID_LEN);
242  		return true;
243  	}
244  	n = tipc_node_find(net, addr);
245  	if (!n)
246  		return false;
247  
248  	memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
249  	tipc_node_put(n);
250  	return true;
251  }
252  
253  u16 tipc_node_get_capabilities(struct net *net, u32 addr)
254  {
255  	struct tipc_node *n;
256  	u16 caps;
257  
258  	n = tipc_node_find(net, addr);
259  	if (unlikely(!n))
260  		return TIPC_NODE_CAPABILITIES;
261  	caps = n->capabilities;
262  	tipc_node_put(n);
263  	return caps;
264  }
265  
266  u32 tipc_node_get_addr(struct tipc_node *node)
267  {
268  	return (node) ? node->addr : 0;
269  }
270  
271  char *tipc_node_get_id_str(struct tipc_node *node)
272  {
273  	return node->peer_id_string;
274  }
275  
276  #ifdef CONFIG_TIPC_CRYPTO
277  /**
278   * tipc_node_crypto_rx - Retrieve crypto RX handle from node
279   * @__n: target tipc_node
280   * Note: node ref counter must be held first!
281   */
282  struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
283  {
284  	return (__n) ? __n->crypto_rx : NULL;
285  }
286  
287  struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
288  {
289  	return container_of(pos, struct tipc_node, list)->crypto_rx;
290  }
291  
292  struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
293  {
294  	struct tipc_node *n;
295  
296  	n = tipc_node_find(net, addr);
297  	return (n) ? n->crypto_rx : NULL;
298  }
299  #endif
300  
301  static void tipc_node_free(struct rcu_head *rp)
302  {
303  	struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
304  
305  #ifdef CONFIG_TIPC_CRYPTO
306  	tipc_crypto_stop(&n->crypto_rx);
307  #endif
308  	kfree(n);
309  }
310  
311  static void tipc_node_kref_release(struct kref *kref)
312  {
313  	struct tipc_node *n = container_of(kref, struct tipc_node, kref);
314  
315  	kfree(n->bc_entry.link);
316  	call_rcu(&n->rcu, tipc_node_free);
317  }
318  
319  void tipc_node_put(struct tipc_node *node)
320  {
321  	kref_put(&node->kref, tipc_node_kref_release);
322  }
323  
324  void tipc_node_get(struct tipc_node *node)
325  {
326  	kref_get(&node->kref);
327  }
328  
329  /*
330   * tipc_node_find - locate specified node object, if it exists
331   */
332  static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
333  {
334  	struct tipc_net *tn = tipc_net(net);
335  	struct tipc_node *node;
336  	unsigned int thash = tipc_hashfn(addr);
337  
338  	rcu_read_lock();
339  	hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
340  		if (node->addr != addr || node->preliminary)
341  			continue;
342  		if (!kref_get_unless_zero(&node->kref))
343  			node = NULL;
344  		break;
345  	}
346  	rcu_read_unlock();
347  	return node;
348  }
349  
350  /* tipc_node_find_by_id - locate specified node object by its 128-bit id
351   * Note: this function is called only when a discovery request failed
352   * to find the node by its 32-bit id, and is not time critical
353   */
354  static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
355  {
356  	struct tipc_net *tn = tipc_net(net);
357  	struct tipc_node *n;
358  	bool found = false;
359  
360  	rcu_read_lock();
361  	list_for_each_entry_rcu(n, &tn->node_list, list) {
362  		read_lock_bh(&n->lock);
363  		if (!memcmp(id, n->peer_id, 16) &&
364  		    kref_get_unless_zero(&n->kref))
365  			found = true;
366  		read_unlock_bh(&n->lock);
367  		if (found)
368  			break;
369  	}
370  	rcu_read_unlock();
371  	return found ? n : NULL;
372  }
373  
374  static void tipc_node_read_lock(struct tipc_node *n)
375  	__acquires(n->lock)
376  {
377  	read_lock_bh(&n->lock);
378  }
379  
380  static void tipc_node_read_unlock(struct tipc_node *n)
381  	__releases(n->lock)
382  {
383  	read_unlock_bh(&n->lock);
384  }
385  
386  static void tipc_node_write_lock(struct tipc_node *n)
387  	__acquires(n->lock)
388  {
389  	write_lock_bh(&n->lock);
390  }
391  
392  static void tipc_node_write_unlock_fast(struct tipc_node *n)
393  	__releases(n->lock)
394  {
395  	write_unlock_bh(&n->lock);
396  }
397  
398  static void tipc_node_write_unlock(struct tipc_node *n)
399  	__releases(n->lock)
400  {
401  	struct tipc_socket_addr sk;
402  	struct net *net = n->net;
403  	u32 flags = n->action_flags;
404  	struct list_head *publ_list;
405  	struct tipc_uaddr ua;
406  	u32 bearer_id;
407  
408  	if (likely(!flags)) {
409  		write_unlock_bh(&n->lock);
410  		return;
411  	}
412  
413  	tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
414  		   TIPC_LINK_STATE, n->addr, n->addr);
415  	sk.ref = n->link_id;
416  	sk.node = n->addr;
417  	bearer_id = n->link_id & 0xffff;
418  	publ_list = &n->publ_list;
419  
420  	n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
421  			     TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
422  
423  	write_unlock_bh(&n->lock);
424  
425  	if (flags & TIPC_NOTIFY_NODE_DOWN)
426  		tipc_publ_notify(net, publ_list, sk.node, n->capabilities);
427  
428  	if (flags & TIPC_NOTIFY_NODE_UP)
429  		tipc_named_node_up(net, sk.node, n->capabilities);
430  
431  	if (flags & TIPC_NOTIFY_LINK_UP) {
432  		tipc_mon_peer_up(net, sk.node, bearer_id);
433  		tipc_nametbl_publish(net, &ua, &sk, sk.ref);
434  	}
435  	if (flags & TIPC_NOTIFY_LINK_DOWN) {
436  		tipc_mon_peer_down(net, sk.node, bearer_id);
437  		tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
438  	}
439  }
440  
441  static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
442  {
443  	int net_id = tipc_netid(n->net);
444  	struct tipc_net *tn_peer;
445  	struct net *tmp;
446  	u32 hash_chk;
447  
448  	if (n->peer_net)
449  		return;
450  
451  	for_each_net_rcu(tmp) {
452  		tn_peer = tipc_net(tmp);
453  		if (!tn_peer)
454  			continue;
455  		/* Integrity checking whether node exists in namespace or not */
456  		if (tn_peer->net_id != net_id)
457  			continue;
458  		if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
459  			continue;
460  		hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
461  		if (hash_mixes ^ hash_chk)
462  			continue;
463  		n->peer_net = tmp;
464  		n->peer_hash_mix = hash_mixes;
465  		break;
466  	}
467  }
468  
469  struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
470  				   u16 capabilities, u32 hash_mixes,
471  				   bool preliminary)
472  {
473  	struct tipc_net *tn = net_generic(net, tipc_net_id);
474  	struct tipc_node *n, *temp_node;
475  	struct tipc_link *l;
476  	unsigned long intv;
477  	int bearer_id;
478  	int i;
479  
480  	spin_lock_bh(&tn->node_list_lock);
481  	n = tipc_node_find(net, addr) ?:
482  		tipc_node_find_by_id(net, peer_id);
483  	if (n) {
484  		if (!n->preliminary)
485  			goto update;
486  		if (preliminary)
487  			goto exit;
488  		/* A preliminary node becomes "real" now, refresh its data */
489  		tipc_node_write_lock(n);
490  		n->preliminary = false;
491  		n->addr = addr;
492  		hlist_del_rcu(&n->hash);
493  		hlist_add_head_rcu(&n->hash,
494  				   &tn->node_htable[tipc_hashfn(addr)]);
495  		list_del_rcu(&n->list);
496  		list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
497  			if (n->addr < temp_node->addr)
498  				break;
499  		}
500  		list_add_tail_rcu(&n->list, &temp_node->list);
501  		tipc_node_write_unlock_fast(n);
502  
503  update:
504  		if (n->peer_hash_mix ^ hash_mixes)
505  			tipc_node_assign_peer_net(n, hash_mixes);
506  		if (n->capabilities == capabilities)
507  			goto exit;
508  		/* Same node may come back with new capabilities */
509  		tipc_node_write_lock(n);
510  		n->capabilities = capabilities;
511  		for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
512  			l = n->links[bearer_id].link;
513  			if (l)
514  				tipc_link_update_caps(l, capabilities);
515  		}
516  		tipc_node_write_unlock_fast(n);
517  
518  		/* Calculate cluster capabilities */
519  		tn->capabilities = TIPC_NODE_CAPABILITIES;
520  		list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
521  			tn->capabilities &= temp_node->capabilities;
522  		}
523  
524  		tipc_bcast_toggle_rcast(net,
525  					(tn->capabilities & TIPC_BCAST_RCAST));
526  
527  		goto exit;
528  	}
529  	n = kzalloc(sizeof(*n), GFP_ATOMIC);
530  	if (!n) {
531  		pr_warn("Node creation failed, no memory\n");
532  		goto exit;
533  	}
534  	tipc_nodeid2string(n->peer_id_string, peer_id);
535  #ifdef CONFIG_TIPC_CRYPTO
536  	if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
537  		pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
538  		kfree(n);
539  		n = NULL;
540  		goto exit;
541  	}
542  #endif
543  	n->addr = addr;
544  	n->preliminary = preliminary;
545  	memcpy(&n->peer_id, peer_id, 16);
546  	n->net = net;
547  	n->peer_net = NULL;
548  	n->peer_hash_mix = 0;
549  	/* Assign kernel local namespace if exists */
550  	tipc_node_assign_peer_net(n, hash_mixes);
551  	n->capabilities = capabilities;
552  	kref_init(&n->kref);
553  	rwlock_init(&n->lock);
554  	INIT_HLIST_NODE(&n->hash);
555  	INIT_LIST_HEAD(&n->list);
556  	INIT_LIST_HEAD(&n->publ_list);
557  	INIT_LIST_HEAD(&n->conn_sks);
558  	skb_queue_head_init(&n->bc_entry.namedq);
559  	skb_queue_head_init(&n->bc_entry.inputq1);
560  	__skb_queue_head_init(&n->bc_entry.arrvq);
561  	skb_queue_head_init(&n->bc_entry.inputq2);
562  	for (i = 0; i < MAX_BEARERS; i++)
563  		spin_lock_init(&n->links[i].lock);
564  	n->state = SELF_DOWN_PEER_LEAVING;
565  	n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
566  	n->signature = INVALID_NODE_SIG;
567  	n->active_links[0] = INVALID_BEARER_ID;
568  	n->active_links[1] = INVALID_BEARER_ID;
569  	n->bc_entry.link = NULL;
570  	tipc_node_get(n);
571  	timer_setup(&n->timer, tipc_node_timeout, 0);
572  	/* Start a slow timer anyway, crypto needs it */
573  	n->keepalive_intv = 10000;
574  	intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
575  	if (!mod_timer(&n->timer, intv))
576  		tipc_node_get(n);
577  	hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
578  	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
579  		if (n->addr < temp_node->addr)
580  			break;
581  	}
582  	list_add_tail_rcu(&n->list, &temp_node->list);
583  	/* Calculate cluster capabilities */
584  	tn->capabilities = TIPC_NODE_CAPABILITIES;
585  	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
586  		tn->capabilities &= temp_node->capabilities;
587  	}
588  	tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
589  	trace_tipc_node_create(n, true, " ");
590  exit:
591  	spin_unlock_bh(&tn->node_list_lock);
592  	return n;
593  }
594  
595  static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
596  {
597  	unsigned long tol = tipc_link_tolerance(l);
598  	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
599  
600  	/* Link with lowest tolerance determines timer interval */
601  	if (intv < n->keepalive_intv)
602  		n->keepalive_intv = intv;
603  
604  	/* Ensure link's abort limit corresponds to current tolerance */
605  	tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
606  }
607  
608  static void tipc_node_delete_from_list(struct tipc_node *node)
609  {
610  #ifdef CONFIG_TIPC_CRYPTO
611  	tipc_crypto_key_flush(node->crypto_rx);
612  #endif
613  	list_del_rcu(&node->list);
614  	hlist_del_rcu(&node->hash);
615  	tipc_node_put(node);
616  }
617  
618  static void tipc_node_delete(struct tipc_node *node)
619  {
620  	trace_tipc_node_delete(node, true, " ");
621  	tipc_node_delete_from_list(node);
622  
623  	del_timer_sync(&node->timer);
624  	tipc_node_put(node);
625  }
626  
627  void tipc_node_stop(struct net *net)
628  {
629  	struct tipc_net *tn = tipc_net(net);
630  	struct tipc_node *node, *t_node;
631  
632  	spin_lock_bh(&tn->node_list_lock);
633  	list_for_each_entry_safe(node, t_node, &tn->node_list, list)
634  		tipc_node_delete(node);
635  	spin_unlock_bh(&tn->node_list_lock);
636  }
637  
638  void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
639  {
640  	struct tipc_node *n;
641  
642  	if (in_own_node(net, addr))
643  		return;
644  
645  	n = tipc_node_find(net, addr);
646  	if (!n) {
647  		pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
648  		return;
649  	}
650  	tipc_node_write_lock(n);
651  	list_add_tail(subscr, &n->publ_list);
652  	tipc_node_write_unlock_fast(n);
653  	tipc_node_put(n);
654  }
655  
656  void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
657  {
658  	struct tipc_node *n;
659  
660  	if (in_own_node(net, addr))
661  		return;
662  
663  	n = tipc_node_find(net, addr);
664  	if (!n) {
665  		pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
666  		return;
667  	}
668  	tipc_node_write_lock(n);
669  	list_del_init(subscr);
670  	tipc_node_write_unlock_fast(n);
671  	tipc_node_put(n);
672  }
673  
674  int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
675  {
676  	struct tipc_node *node;
677  	struct tipc_sock_conn *conn;
678  	int err = 0;
679  
680  	if (in_own_node(net, dnode))
681  		return 0;
682  
683  	node = tipc_node_find(net, dnode);
684  	if (!node) {
685  		pr_warn("Connecting sock to node 0x%x failed\n", dnode);
686  		return -EHOSTUNREACH;
687  	}
688  	conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
689  	if (!conn) {
690  		err = -EHOSTUNREACH;
691  		goto exit;
692  	}
693  	conn->peer_node = dnode;
694  	conn->port = port;
695  	conn->peer_port = peer_port;
696  
697  	tipc_node_write_lock(node);
698  	list_add_tail(&conn->list, &node->conn_sks);
699  	tipc_node_write_unlock(node);
700  exit:
701  	tipc_node_put(node);
702  	return err;
703  }
704  
705  void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
706  {
707  	struct tipc_node *node;
708  	struct tipc_sock_conn *conn, *safe;
709  
710  	if (in_own_node(net, dnode))
711  		return;
712  
713  	node = tipc_node_find(net, dnode);
714  	if (!node)
715  		return;
716  
717  	tipc_node_write_lock(node);
718  	list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
719  		if (port != conn->port)
720  			continue;
721  		list_del(&conn->list);
722  		kfree(conn);
723  	}
724  	tipc_node_write_unlock(node);
725  	tipc_node_put(node);
726  }
727  
728  static void  tipc_node_clear_links(struct tipc_node *node)
729  {
730  	int i;
731  
732  	for (i = 0; i < MAX_BEARERS; i++) {
733  		struct tipc_link_entry *le = &node->links[i];
734  
735  		if (le->link) {
736  			kfree(le->link);
737  			le->link = NULL;
738  			node->link_cnt--;
739  		}
740  	}
741  }
742  
743  /* tipc_node_cleanup - delete nodes that does not
744   * have active links for NODE_CLEANUP_AFTER time
745   */
746  static bool tipc_node_cleanup(struct tipc_node *peer)
747  {
748  	struct tipc_node *temp_node;
749  	struct tipc_net *tn = tipc_net(peer->net);
750  	bool deleted = false;
751  
752  	/* If lock held by tipc_node_stop() the node will be deleted anyway */
753  	if (!spin_trylock_bh(&tn->node_list_lock))
754  		return false;
755  
756  	tipc_node_write_lock(peer);
757  
758  	if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
759  		tipc_node_clear_links(peer);
760  		tipc_node_delete_from_list(peer);
761  		deleted = true;
762  	}
763  	tipc_node_write_unlock(peer);
764  
765  	if (!deleted) {
766  		spin_unlock_bh(&tn->node_list_lock);
767  		return deleted;
768  	}
769  
770  	/* Calculate cluster capabilities */
771  	tn->capabilities = TIPC_NODE_CAPABILITIES;
772  	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
773  		tn->capabilities &= temp_node->capabilities;
774  	}
775  	tipc_bcast_toggle_rcast(peer->net,
776  				(tn->capabilities & TIPC_BCAST_RCAST));
777  	spin_unlock_bh(&tn->node_list_lock);
778  	return deleted;
779  }
780  
781  /* tipc_node_timeout - handle expiration of node timer
782   */
783  static void tipc_node_timeout(struct timer_list *t)
784  {
785  	struct tipc_node *n = from_timer(n, t, timer);
786  	struct tipc_link_entry *le;
787  	struct sk_buff_head xmitq;
788  	int remains = n->link_cnt;
789  	int bearer_id;
790  	int rc = 0;
791  
792  	trace_tipc_node_timeout(n, false, " ");
793  	if (!node_is_up(n) && tipc_node_cleanup(n)) {
794  		/*Removing the reference of Timer*/
795  		tipc_node_put(n);
796  		return;
797  	}
798  
799  #ifdef CONFIG_TIPC_CRYPTO
800  	/* Take any crypto key related actions first */
801  	tipc_crypto_timeout(n->crypto_rx);
802  #endif
803  	__skb_queue_head_init(&xmitq);
804  
805  	/* Initial node interval to value larger (10 seconds), then it will be
806  	 * recalculated with link lowest tolerance
807  	 */
808  	tipc_node_read_lock(n);
809  	n->keepalive_intv = 10000;
810  	tipc_node_read_unlock(n);
811  	for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
812  		tipc_node_read_lock(n);
813  		le = &n->links[bearer_id];
814  		if (le->link) {
815  			spin_lock_bh(&le->lock);
816  			/* Link tolerance may change asynchronously: */
817  			tipc_node_calculate_timer(n, le->link);
818  			rc = tipc_link_timeout(le->link, &xmitq);
819  			spin_unlock_bh(&le->lock);
820  			remains--;
821  		}
822  		tipc_node_read_unlock(n);
823  		tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
824  		if (rc & TIPC_LINK_DOWN_EVT)
825  			tipc_node_link_down(n, bearer_id, false);
826  	}
827  	mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
828  }
829  
830  /**
831   * __tipc_node_link_up - handle addition of link
832   * @n: target tipc_node
833   * @bearer_id: id of the bearer
834   * @xmitq: queue for messages to be xmited on
835   * Node lock must be held by caller
836   * Link becomes active (alone or shared) or standby, depending on its priority.
837   */
838  static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
839  				struct sk_buff_head *xmitq)
840  {
841  	int *slot0 = &n->active_links[0];
842  	int *slot1 = &n->active_links[1];
843  	struct tipc_link *ol = node_active_link(n, 0);
844  	struct tipc_link *nl = n->links[bearer_id].link;
845  
846  	if (!nl || tipc_link_is_up(nl))
847  		return;
848  
849  	tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
850  	if (!tipc_link_is_up(nl))
851  		return;
852  
853  	n->working_links++;
854  	n->action_flags |= TIPC_NOTIFY_LINK_UP;
855  	n->link_id = tipc_link_id(nl);
856  
857  	/* Leave room for tunnel header when returning 'mtu' to users: */
858  	n->links[bearer_id].mtu = tipc_link_mss(nl);
859  
860  	tipc_bearer_add_dest(n->net, bearer_id, n->addr);
861  	tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
862  
863  	pr_debug("Established link <%s> on network plane %c\n",
864  		 tipc_link_name(nl), tipc_link_plane(nl));
865  	trace_tipc_node_link_up(n, true, " ");
866  
867  	/* Ensure that a STATE message goes first */
868  	tipc_link_build_state_msg(nl, xmitq);
869  
870  	/* First link? => give it both slots */
871  	if (!ol) {
872  		*slot0 = bearer_id;
873  		*slot1 = bearer_id;
874  		tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
875  		n->action_flags |= TIPC_NOTIFY_NODE_UP;
876  		tipc_link_set_active(nl, true);
877  		tipc_bcast_add_peer(n->net, nl, xmitq);
878  		return;
879  	}
880  
881  	/* Second link => redistribute slots */
882  	if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
883  		pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
884  		*slot0 = bearer_id;
885  		*slot1 = bearer_id;
886  		tipc_link_set_active(nl, true);
887  		tipc_link_set_active(ol, false);
888  	} else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
889  		tipc_link_set_active(nl, true);
890  		*slot1 = bearer_id;
891  	} else {
892  		pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
893  	}
894  
895  	/* Prepare synchronization with first link */
896  	tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
897  }
898  
899  /**
900   * tipc_node_link_up - handle addition of link
901   * @n: target tipc_node
902   * @bearer_id: id of the bearer
903   * @xmitq: queue for messages to be xmited on
904   *
905   * Link becomes active (alone or shared) or standby, depending on its priority.
906   */
907  static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
908  			      struct sk_buff_head *xmitq)
909  {
910  	struct tipc_media_addr *maddr;
911  
912  	tipc_node_write_lock(n);
913  	__tipc_node_link_up(n, bearer_id, xmitq);
914  	maddr = &n->links[bearer_id].maddr;
915  	tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
916  	tipc_node_write_unlock(n);
917  }
918  
919  /**
920   * tipc_node_link_failover() - start failover in case "half-failover"
921   *
922   * This function is only called in a very special situation where link
923   * failover can be already started on peer node but not on this node.
924   * This can happen when e.g.::
925   *
926   *	1. Both links <1A-2A>, <1B-2B> down
927   *	2. Link endpoint 2A up, but 1A still down (e.g. due to network
928   *	disturbance, wrong session, etc.)
929   *	3. Link <1B-2B> up
930   *	4. Link endpoint 2A down (e.g. due to link tolerance timeout)
931   *	5. Node 2 starts failover onto link <1B-2B>
932   *
933   *	==> Node 1 does never start link/node failover!
934   *
935   * @n: tipc node structure
936   * @l: link peer endpoint failingover (- can be NULL)
937   * @tnl: tunnel link
938   * @xmitq: queue for messages to be xmited on tnl link later
939   */
940  static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
941  				    struct tipc_link *tnl,
942  				    struct sk_buff_head *xmitq)
943  {
944  	/* Avoid to be "self-failover" that can never end */
945  	if (!tipc_link_is_up(tnl))
946  		return;
947  
948  	/* Don't rush, failure link may be in the process of resetting */
949  	if (l && !tipc_link_is_reset(l))
950  		return;
951  
952  	tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
953  	tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
954  
955  	n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
956  	tipc_link_failover_prepare(l, tnl, xmitq);
957  
958  	if (l)
959  		tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
960  	tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
961  }
962  
963  /**
964   * __tipc_node_link_down - handle loss of link
965   * @n: target tipc_node
966   * @bearer_id: id of the bearer
967   * @xmitq: queue for messages to be xmited on
968   * @maddr: output media address of the bearer
969   */
970  static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
971  				  struct sk_buff_head *xmitq,
972  				  struct tipc_media_addr **maddr)
973  {
974  	struct tipc_link_entry *le = &n->links[*bearer_id];
975  	int *slot0 = &n->active_links[0];
976  	int *slot1 = &n->active_links[1];
977  	int i, highest = 0, prio;
978  	struct tipc_link *l, *_l, *tnl;
979  
980  	l = n->links[*bearer_id].link;
981  	if (!l || tipc_link_is_reset(l))
982  		return;
983  
984  	n->working_links--;
985  	n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
986  	n->link_id = tipc_link_id(l);
987  
988  	tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
989  
990  	pr_debug("Lost link <%s> on network plane %c\n",
991  		 tipc_link_name(l), tipc_link_plane(l));
992  
993  	/* Select new active link if any available */
994  	*slot0 = INVALID_BEARER_ID;
995  	*slot1 = INVALID_BEARER_ID;
996  	for (i = 0; i < MAX_BEARERS; i++) {
997  		_l = n->links[i].link;
998  		if (!_l || !tipc_link_is_up(_l))
999  			continue;
1000  		if (_l == l)
1001  			continue;
1002  		prio = tipc_link_prio(_l);
1003  		if (prio < highest)
1004  			continue;
1005  		if (prio > highest) {
1006  			highest = prio;
1007  			*slot0 = i;
1008  			*slot1 = i;
1009  			continue;
1010  		}
1011  		*slot1 = i;
1012  	}
1013  
1014  	if (!node_is_up(n)) {
1015  		if (tipc_link_peer_is_down(l))
1016  			tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1017  		tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
1018  		trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
1019  		tipc_link_fsm_evt(l, LINK_RESET_EVT);
1020  		tipc_link_reset(l);
1021  		tipc_link_build_reset_msg(l, xmitq);
1022  		*maddr = &n->links[*bearer_id].maddr;
1023  		node_lost_contact(n, &le->inputq);
1024  		tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1025  		return;
1026  	}
1027  	tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1028  
1029  	/* There is still a working link => initiate failover */
1030  	*bearer_id = n->active_links[0];
1031  	tnl = n->links[*bearer_id].link;
1032  	tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1033  	tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
1034  	n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
1035  	tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
1036  	trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
1037  	tipc_link_reset(l);
1038  	tipc_link_fsm_evt(l, LINK_RESET_EVT);
1039  	tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1040  	tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
1041  	*maddr = &n->links[*bearer_id].maddr;
1042  }
1043  
1044  static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
1045  {
1046  	struct tipc_link_entry *le = &n->links[bearer_id];
1047  	struct tipc_media_addr *maddr = NULL;
1048  	struct tipc_link *l = le->link;
1049  	int old_bearer_id = bearer_id;
1050  	struct sk_buff_head xmitq;
1051  
1052  	if (!l)
1053  		return;
1054  
1055  	__skb_queue_head_init(&xmitq);
1056  
1057  	tipc_node_write_lock(n);
1058  	if (!tipc_link_is_establishing(l)) {
1059  		__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1060  	} else {
1061  		/* Defuse pending tipc_node_link_up() */
1062  		tipc_link_reset(l);
1063  		tipc_link_fsm_evt(l, LINK_RESET_EVT);
1064  	}
1065  	if (delete) {
1066  		kfree(l);
1067  		le->link = NULL;
1068  		n->link_cnt--;
1069  	}
1070  	trace_tipc_node_link_down(n, true, "node link down or deleted!");
1071  	tipc_node_write_unlock(n);
1072  	if (delete)
1073  		tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
1074  	if (!skb_queue_empty(&xmitq))
1075  		tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1076  	tipc_sk_rcv(n->net, &le->inputq);
1077  }
1078  
1079  static bool node_is_up(struct tipc_node *n)
1080  {
1081  	return n->active_links[0] != INVALID_BEARER_ID;
1082  }
1083  
1084  bool tipc_node_is_up(struct net *net, u32 addr)
1085  {
1086  	struct tipc_node *n;
1087  	bool retval = false;
1088  
1089  	if (in_own_node(net, addr))
1090  		return true;
1091  
1092  	n = tipc_node_find(net, addr);
1093  	if (!n)
1094  		return false;
1095  	retval = node_is_up(n);
1096  	tipc_node_put(n);
1097  	return retval;
1098  }
1099  
1100  static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
1101  {
1102  	struct tipc_node *n;
1103  
1104  	addr ^= tipc_net(net)->random;
1105  	while ((n = tipc_node_find(net, addr))) {
1106  		tipc_node_put(n);
1107  		addr++;
1108  	}
1109  	return addr;
1110  }
1111  
1112  /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
1113   * Returns suggested address if any, otherwise 0
1114   */
1115  u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
1116  {
1117  	struct tipc_net *tn = tipc_net(net);
1118  	struct tipc_node *n;
1119  	bool preliminary;
1120  	u32 sugg_addr;
1121  
1122  	/* Suggest new address if some other peer is using this one */
1123  	n = tipc_node_find(net, addr);
1124  	if (n) {
1125  		if (!memcmp(n->peer_id, id, NODE_ID_LEN))
1126  			addr = 0;
1127  		tipc_node_put(n);
1128  		if (!addr)
1129  			return 0;
1130  		return tipc_node_suggest_addr(net, addr);
1131  	}
1132  
1133  	/* Suggest previously used address if peer is known */
1134  	n = tipc_node_find_by_id(net, id);
1135  	if (n) {
1136  		sugg_addr = n->addr;
1137  		preliminary = n->preliminary;
1138  		tipc_node_put(n);
1139  		if (!preliminary)
1140  			return sugg_addr;
1141  	}
1142  
1143  	/* Even this node may be in conflict */
1144  	if (tn->trial_addr == addr)
1145  		return tipc_node_suggest_addr(net, addr);
1146  
1147  	return 0;
1148  }
1149  
1150  void tipc_node_check_dest(struct net *net, u32 addr,
1151  			  u8 *peer_id, struct tipc_bearer *b,
1152  			  u16 capabilities, u32 signature, u32 hash_mixes,
1153  			  struct tipc_media_addr *maddr,
1154  			  bool *respond, bool *dupl_addr)
1155  {
1156  	struct tipc_node *n;
1157  	struct tipc_link *l, *snd_l;
1158  	struct tipc_link_entry *le;
1159  	bool addr_match = false;
1160  	bool sign_match = false;
1161  	bool link_up = false;
1162  	bool accept_addr = false;
1163  	bool reset = true;
1164  	char *if_name;
1165  	unsigned long intv;
1166  	u16 session;
1167  
1168  	*dupl_addr = false;
1169  	*respond = false;
1170  
1171  	n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
1172  			     false);
1173  	if (!n)
1174  		return;
1175  
1176  	tipc_node_write_lock(n);
1177  	if (unlikely(!n->bc_entry.link)) {
1178  		snd_l = tipc_bc_sndlink(net);
1179  		if (!tipc_link_bc_create(net, tipc_own_addr(net),
1180  					 addr, peer_id, U16_MAX,
1181  					 tipc_link_min_win(snd_l),
1182  					 tipc_link_max_win(snd_l),
1183  					 n->capabilities,
1184  					 &n->bc_entry.inputq1,
1185  					 &n->bc_entry.namedq, snd_l,
1186  					 &n->bc_entry.link)) {
1187  			pr_warn("Broadcast rcv link creation failed, no mem\n");
1188  			tipc_node_write_unlock_fast(n);
1189  			tipc_node_put(n);
1190  			return;
1191  		}
1192  	}
1193  
1194  	le = &n->links[b->identity];
1195  
1196  	/* Prepare to validate requesting node's signature and media address */
1197  	l = le->link;
1198  	link_up = l && tipc_link_is_up(l);
1199  	addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
1200  	sign_match = (signature == n->signature);
1201  
1202  	/* These three flags give us eight permutations: */
1203  
1204  	if (sign_match && addr_match && link_up) {
1205  		/* All is fine. Do nothing. */
1206  		reset = false;
1207  		/* Peer node is not a container/local namespace */
1208  		if (!n->peer_hash_mix)
1209  			n->peer_hash_mix = hash_mixes;
1210  	} else if (sign_match && addr_match && !link_up) {
1211  		/* Respond. The link will come up in due time */
1212  		*respond = true;
1213  	} else if (sign_match && !addr_match && link_up) {
1214  		/* Peer has changed i/f address without rebooting.
1215  		 * If so, the link will reset soon, and the next
1216  		 * discovery will be accepted. So we can ignore it.
1217  		 * It may also be a cloned or malicious peer having
1218  		 * chosen the same node address and signature as an
1219  		 * existing one.
1220  		 * Ignore requests until the link goes down, if ever.
1221  		 */
1222  		*dupl_addr = true;
1223  	} else if (sign_match && !addr_match && !link_up) {
1224  		/* Peer link has changed i/f address without rebooting.
1225  		 * It may also be a cloned or malicious peer; we can't
1226  		 * distinguish between the two.
1227  		 * The signature is correct, so we must accept.
1228  		 */
1229  		accept_addr = true;
1230  		*respond = true;
1231  	} else if (!sign_match && addr_match && link_up) {
1232  		/* Peer node rebooted. Two possibilities:
1233  		 *  - Delayed re-discovery; this link endpoint has already
1234  		 *    reset and re-established contact with the peer, before
1235  		 *    receiving a discovery message from that node.
1236  		 *    (The peer happened to receive one from this node first).
1237  		 *  - The peer came back so fast that our side has not
1238  		 *    discovered it yet. Probing from this side will soon
1239  		 *    reset the link, since there can be no working link
1240  		 *    endpoint at the peer end, and the link will re-establish.
1241  		 *  Accept the signature, since it comes from a known peer.
1242  		 */
1243  		n->signature = signature;
1244  	} else if (!sign_match && addr_match && !link_up) {
1245  		/*  The peer node has rebooted.
1246  		 *  Accept signature, since it is a known peer.
1247  		 */
1248  		n->signature = signature;
1249  		*respond = true;
1250  	} else if (!sign_match && !addr_match && link_up) {
1251  		/* Peer rebooted with new address, or a new/duplicate peer.
1252  		 * Ignore until the link goes down, if ever.
1253  		 */
1254  		*dupl_addr = true;
1255  	} else if (!sign_match && !addr_match && !link_up) {
1256  		/* Peer rebooted with new address, or it is a new peer.
1257  		 * Accept signature and address.
1258  		 */
1259  		n->signature = signature;
1260  		accept_addr = true;
1261  		*respond = true;
1262  	}
1263  
1264  	if (!accept_addr)
1265  		goto exit;
1266  
1267  	/* Now create new link if not already existing */
1268  	if (!l) {
1269  		if (n->link_cnt == 2)
1270  			goto exit;
1271  
1272  		if_name = strchr(b->name, ':') + 1;
1273  		get_random_bytes(&session, sizeof(u16));
1274  		if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1275  				      b->net_plane, b->mtu, b->priority,
1276  				      b->min_win, b->max_win, session,
1277  				      tipc_own_addr(net), addr, peer_id,
1278  				      n->capabilities,
1279  				      tipc_bc_sndlink(n->net), n->bc_entry.link,
1280  				      &le->inputq,
1281  				      &n->bc_entry.namedq, &l)) {
1282  			*respond = false;
1283  			goto exit;
1284  		}
1285  		trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
1286  		tipc_link_reset(l);
1287  		tipc_link_fsm_evt(l, LINK_RESET_EVT);
1288  		if (n->state == NODE_FAILINGOVER)
1289  			tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1290  		le->link = l;
1291  		n->link_cnt++;
1292  		tipc_node_calculate_timer(n, l);
1293  		if (n->link_cnt == 1) {
1294  			intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
1295  			if (!mod_timer(&n->timer, intv))
1296  				tipc_node_get(n);
1297  		}
1298  	}
1299  	memcpy(&le->maddr, maddr, sizeof(*maddr));
1300  exit:
1301  	tipc_node_write_unlock(n);
1302  	if (reset && l && !tipc_link_is_reset(l))
1303  		tipc_node_link_down(n, b->identity, false);
1304  	tipc_node_put(n);
1305  }
1306  
1307  void tipc_node_delete_links(struct net *net, int bearer_id)
1308  {
1309  	struct tipc_net *tn = net_generic(net, tipc_net_id);
1310  	struct tipc_node *n;
1311  
1312  	rcu_read_lock();
1313  	list_for_each_entry_rcu(n, &tn->node_list, list) {
1314  		tipc_node_link_down(n, bearer_id, true);
1315  	}
1316  	rcu_read_unlock();
1317  }
1318  
1319  static void tipc_node_reset_links(struct tipc_node *n)
1320  {
1321  	int i;
1322  
1323  	pr_warn("Resetting all links to %x\n", n->addr);
1324  
1325  	trace_tipc_node_reset_links(n, true, " ");
1326  	for (i = 0; i < MAX_BEARERS; i++) {
1327  		tipc_node_link_down(n, i, false);
1328  	}
1329  }
1330  
1331  /* tipc_node_fsm_evt - node finite state machine
1332   * Determines when contact is allowed with peer node
1333   */
1334  static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1335  {
1336  	int state = n->state;
1337  
1338  	switch (state) {
1339  	case SELF_DOWN_PEER_DOWN:
1340  		switch (evt) {
1341  		case SELF_ESTABL_CONTACT_EVT:
1342  			state = SELF_UP_PEER_COMING;
1343  			break;
1344  		case PEER_ESTABL_CONTACT_EVT:
1345  			state = SELF_COMING_PEER_UP;
1346  			break;
1347  		case SELF_LOST_CONTACT_EVT:
1348  		case PEER_LOST_CONTACT_EVT:
1349  			break;
1350  		case NODE_SYNCH_END_EVT:
1351  		case NODE_SYNCH_BEGIN_EVT:
1352  		case NODE_FAILOVER_BEGIN_EVT:
1353  		case NODE_FAILOVER_END_EVT:
1354  		default:
1355  			goto illegal_evt;
1356  		}
1357  		break;
1358  	case SELF_UP_PEER_UP:
1359  		switch (evt) {
1360  		case SELF_LOST_CONTACT_EVT:
1361  			state = SELF_DOWN_PEER_LEAVING;
1362  			break;
1363  		case PEER_LOST_CONTACT_EVT:
1364  			state = SELF_LEAVING_PEER_DOWN;
1365  			break;
1366  		case NODE_SYNCH_BEGIN_EVT:
1367  			state = NODE_SYNCHING;
1368  			break;
1369  		case NODE_FAILOVER_BEGIN_EVT:
1370  			state = NODE_FAILINGOVER;
1371  			break;
1372  		case SELF_ESTABL_CONTACT_EVT:
1373  		case PEER_ESTABL_CONTACT_EVT:
1374  		case NODE_SYNCH_END_EVT:
1375  		case NODE_FAILOVER_END_EVT:
1376  			break;
1377  		default:
1378  			goto illegal_evt;
1379  		}
1380  		break;
1381  	case SELF_DOWN_PEER_LEAVING:
1382  		switch (evt) {
1383  		case PEER_LOST_CONTACT_EVT:
1384  			state = SELF_DOWN_PEER_DOWN;
1385  			break;
1386  		case SELF_ESTABL_CONTACT_EVT:
1387  		case PEER_ESTABL_CONTACT_EVT:
1388  		case SELF_LOST_CONTACT_EVT:
1389  			break;
1390  		case NODE_SYNCH_END_EVT:
1391  		case NODE_SYNCH_BEGIN_EVT:
1392  		case NODE_FAILOVER_BEGIN_EVT:
1393  		case NODE_FAILOVER_END_EVT:
1394  		default:
1395  			goto illegal_evt;
1396  		}
1397  		break;
1398  	case SELF_UP_PEER_COMING:
1399  		switch (evt) {
1400  		case PEER_ESTABL_CONTACT_EVT:
1401  			state = SELF_UP_PEER_UP;
1402  			break;
1403  		case SELF_LOST_CONTACT_EVT:
1404  			state = SELF_DOWN_PEER_DOWN;
1405  			break;
1406  		case SELF_ESTABL_CONTACT_EVT:
1407  		case PEER_LOST_CONTACT_EVT:
1408  		case NODE_SYNCH_END_EVT:
1409  		case NODE_FAILOVER_BEGIN_EVT:
1410  			break;
1411  		case NODE_SYNCH_BEGIN_EVT:
1412  		case NODE_FAILOVER_END_EVT:
1413  		default:
1414  			goto illegal_evt;
1415  		}
1416  		break;
1417  	case SELF_COMING_PEER_UP:
1418  		switch (evt) {
1419  		case SELF_ESTABL_CONTACT_EVT:
1420  			state = SELF_UP_PEER_UP;
1421  			break;
1422  		case PEER_LOST_CONTACT_EVT:
1423  			state = SELF_DOWN_PEER_DOWN;
1424  			break;
1425  		case SELF_LOST_CONTACT_EVT:
1426  		case PEER_ESTABL_CONTACT_EVT:
1427  			break;
1428  		case NODE_SYNCH_END_EVT:
1429  		case NODE_SYNCH_BEGIN_EVT:
1430  		case NODE_FAILOVER_BEGIN_EVT:
1431  		case NODE_FAILOVER_END_EVT:
1432  		default:
1433  			goto illegal_evt;
1434  		}
1435  		break;
1436  	case SELF_LEAVING_PEER_DOWN:
1437  		switch (evt) {
1438  		case SELF_LOST_CONTACT_EVT:
1439  			state = SELF_DOWN_PEER_DOWN;
1440  			break;
1441  		case SELF_ESTABL_CONTACT_EVT:
1442  		case PEER_ESTABL_CONTACT_EVT:
1443  		case PEER_LOST_CONTACT_EVT:
1444  			break;
1445  		case NODE_SYNCH_END_EVT:
1446  		case NODE_SYNCH_BEGIN_EVT:
1447  		case NODE_FAILOVER_BEGIN_EVT:
1448  		case NODE_FAILOVER_END_EVT:
1449  		default:
1450  			goto illegal_evt;
1451  		}
1452  		break;
1453  	case NODE_FAILINGOVER:
1454  		switch (evt) {
1455  		case SELF_LOST_CONTACT_EVT:
1456  			state = SELF_DOWN_PEER_LEAVING;
1457  			break;
1458  		case PEER_LOST_CONTACT_EVT:
1459  			state = SELF_LEAVING_PEER_DOWN;
1460  			break;
1461  		case NODE_FAILOVER_END_EVT:
1462  			state = SELF_UP_PEER_UP;
1463  			break;
1464  		case NODE_FAILOVER_BEGIN_EVT:
1465  		case SELF_ESTABL_CONTACT_EVT:
1466  		case PEER_ESTABL_CONTACT_EVT:
1467  			break;
1468  		case NODE_SYNCH_BEGIN_EVT:
1469  		case NODE_SYNCH_END_EVT:
1470  		default:
1471  			goto illegal_evt;
1472  		}
1473  		break;
1474  	case NODE_SYNCHING:
1475  		switch (evt) {
1476  		case SELF_LOST_CONTACT_EVT:
1477  			state = SELF_DOWN_PEER_LEAVING;
1478  			break;
1479  		case PEER_LOST_CONTACT_EVT:
1480  			state = SELF_LEAVING_PEER_DOWN;
1481  			break;
1482  		case NODE_SYNCH_END_EVT:
1483  			state = SELF_UP_PEER_UP;
1484  			break;
1485  		case NODE_FAILOVER_BEGIN_EVT:
1486  			state = NODE_FAILINGOVER;
1487  			break;
1488  		case NODE_SYNCH_BEGIN_EVT:
1489  		case SELF_ESTABL_CONTACT_EVT:
1490  		case PEER_ESTABL_CONTACT_EVT:
1491  			break;
1492  		case NODE_FAILOVER_END_EVT:
1493  		default:
1494  			goto illegal_evt;
1495  		}
1496  		break;
1497  	default:
1498  		pr_err("Unknown node fsm state %x\n", state);
1499  		break;
1500  	}
1501  	trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1502  	n->state = state;
1503  	return;
1504  
1505  illegal_evt:
1506  	pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1507  	trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1508  }
1509  
1510  static void node_lost_contact(struct tipc_node *n,
1511  			      struct sk_buff_head *inputq)
1512  {
1513  	struct tipc_sock_conn *conn, *safe;
1514  	struct tipc_link *l;
1515  	struct list_head *conns = &n->conn_sks;
1516  	struct sk_buff *skb;
1517  	uint i;
1518  
1519  	pr_debug("Lost contact with %x\n", n->addr);
1520  	n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1521  	trace_tipc_node_lost_contact(n, true, " ");
1522  
1523  	/* Clean up broadcast state */
1524  	tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1525  	skb_queue_purge(&n->bc_entry.namedq);
1526  
1527  	/* Abort any ongoing link failover */
1528  	for (i = 0; i < MAX_BEARERS; i++) {
1529  		l = n->links[i].link;
1530  		if (l)
1531  			tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1532  	}
1533  
1534  	/* Notify publications from this node */
1535  	n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1536  	n->peer_net = NULL;
1537  	n->peer_hash_mix = 0;
1538  	/* Notify sockets connected to node */
1539  	list_for_each_entry_safe(conn, safe, conns, list) {
1540  		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1541  				      SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1542  				      conn->peer_node, conn->port,
1543  				      conn->peer_port, TIPC_ERR_NO_NODE);
1544  		if (likely(skb))
1545  			skb_queue_tail(inputq, skb);
1546  		list_del(&conn->list);
1547  		kfree(conn);
1548  	}
1549  }
1550  
1551  /**
1552   * tipc_node_get_linkname - get the name of a link
1553   *
1554   * @net: the applicable net namespace
1555   * @bearer_id: id of the bearer
1556   * @addr: peer node address
1557   * @linkname: link name output buffer
1558   * @len: size of @linkname output buffer
1559   *
1560   * Return: 0 on success
1561   */
1562  int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1563  			   char *linkname, size_t len)
1564  {
1565  	struct tipc_link *link;
1566  	int err = -EINVAL;
1567  	struct tipc_node *node = tipc_node_find(net, addr);
1568  
1569  	if (!node)
1570  		return err;
1571  
1572  	if (bearer_id >= MAX_BEARERS)
1573  		goto exit;
1574  
1575  	tipc_node_read_lock(node);
1576  	link = node->links[bearer_id].link;
1577  	if (link) {
1578  		strncpy(linkname, tipc_link_name(link), len);
1579  		err = 0;
1580  	}
1581  	tipc_node_read_unlock(node);
1582  exit:
1583  	tipc_node_put(node);
1584  	return err;
1585  }
1586  
1587  /* Caller should hold node lock for the passed node */
1588  static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1589  {
1590  	void *hdr;
1591  	struct nlattr *attrs;
1592  
1593  	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1594  			  NLM_F_MULTI, TIPC_NL_NODE_GET);
1595  	if (!hdr)
1596  		return -EMSGSIZE;
1597  
1598  	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
1599  	if (!attrs)
1600  		goto msg_full;
1601  
1602  	if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1603  		goto attr_msg_full;
1604  	if (node_is_up(node))
1605  		if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1606  			goto attr_msg_full;
1607  
1608  	nla_nest_end(msg->skb, attrs);
1609  	genlmsg_end(msg->skb, hdr);
1610  
1611  	return 0;
1612  
1613  attr_msg_full:
1614  	nla_nest_cancel(msg->skb, attrs);
1615  msg_full:
1616  	genlmsg_cancel(msg->skb, hdr);
1617  
1618  	return -EMSGSIZE;
1619  }
1620  
1621  static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
1622  {
1623  	struct tipc_msg *hdr = buf_msg(skb_peek(list));
1624  	struct sk_buff_head inputq;
1625  
1626  	switch (msg_user(hdr)) {
1627  	case TIPC_LOW_IMPORTANCE:
1628  	case TIPC_MEDIUM_IMPORTANCE:
1629  	case TIPC_HIGH_IMPORTANCE:
1630  	case TIPC_CRITICAL_IMPORTANCE:
1631  		if (msg_connected(hdr) || msg_named(hdr) ||
1632  		    msg_direct(hdr)) {
1633  			tipc_loopback_trace(peer_net, list);
1634  			spin_lock_init(&list->lock);
1635  			tipc_sk_rcv(peer_net, list);
1636  			return;
1637  		}
1638  		if (msg_mcast(hdr)) {
1639  			tipc_loopback_trace(peer_net, list);
1640  			skb_queue_head_init(&inputq);
1641  			tipc_sk_mcast_rcv(peer_net, list, &inputq);
1642  			__skb_queue_purge(list);
1643  			skb_queue_purge(&inputq);
1644  			return;
1645  		}
1646  		return;
1647  	case MSG_FRAGMENTER:
1648  		if (tipc_msg_assemble(list)) {
1649  			tipc_loopback_trace(peer_net, list);
1650  			skb_queue_head_init(&inputq);
1651  			tipc_sk_mcast_rcv(peer_net, list, &inputq);
1652  			__skb_queue_purge(list);
1653  			skb_queue_purge(&inputq);
1654  		}
1655  		return;
1656  	case GROUP_PROTOCOL:
1657  	case CONN_MANAGER:
1658  		tipc_loopback_trace(peer_net, list);
1659  		spin_lock_init(&list->lock);
1660  		tipc_sk_rcv(peer_net, list);
1661  		return;
1662  	case LINK_PROTOCOL:
1663  	case NAME_DISTRIBUTOR:
1664  	case TUNNEL_PROTOCOL:
1665  	case BCAST_PROTOCOL:
1666  		return;
1667  	default:
1668  		return;
1669  	}
1670  }
1671  
1672  /**
1673   * tipc_node_xmit() - general link level function for message sending
1674   * @net: the applicable net namespace
1675   * @list: chain of buffers containing message
1676   * @dnode: address of destination node
1677   * @selector: a number used for deterministic link selection
1678   * Consumes the buffer chain.
1679   * Return: 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1680   */
1681  int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1682  		   u32 dnode, int selector)
1683  {
1684  	struct tipc_link_entry *le = NULL;
1685  	struct tipc_node *n;
1686  	struct sk_buff_head xmitq;
1687  	bool node_up = false;
1688  	int bearer_id;
1689  	int rc;
1690  
1691  	if (in_own_node(net, dnode)) {
1692  		tipc_loopback_trace(net, list);
1693  		spin_lock_init(&list->lock);
1694  		tipc_sk_rcv(net, list);
1695  		return 0;
1696  	}
1697  
1698  	n = tipc_node_find(net, dnode);
1699  	if (unlikely(!n)) {
1700  		__skb_queue_purge(list);
1701  		return -EHOSTUNREACH;
1702  	}
1703  
1704  	tipc_node_read_lock(n);
1705  	node_up = node_is_up(n);
1706  	if (node_up && n->peer_net && check_net(n->peer_net)) {
1707  		/* xmit inner linux container */
1708  		tipc_lxc_xmit(n->peer_net, list);
1709  		if (likely(skb_queue_empty(list))) {
1710  			tipc_node_read_unlock(n);
1711  			tipc_node_put(n);
1712  			return 0;
1713  		}
1714  	}
1715  
1716  	bearer_id = n->active_links[selector & 1];
1717  	if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1718  		tipc_node_read_unlock(n);
1719  		tipc_node_put(n);
1720  		__skb_queue_purge(list);
1721  		return -EHOSTUNREACH;
1722  	}
1723  
1724  	__skb_queue_head_init(&xmitq);
1725  	le = &n->links[bearer_id];
1726  	spin_lock_bh(&le->lock);
1727  	rc = tipc_link_xmit(le->link, list, &xmitq);
1728  	spin_unlock_bh(&le->lock);
1729  	tipc_node_read_unlock(n);
1730  
1731  	if (unlikely(rc == -ENOBUFS))
1732  		tipc_node_link_down(n, bearer_id, false);
1733  	else
1734  		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1735  
1736  	tipc_node_put(n);
1737  
1738  	return rc;
1739  }
1740  
1741  /* tipc_node_xmit_skb(): send single buffer to destination
1742   * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE
1743   * messages, which will not be rejected
1744   * The only exception is datagram messages rerouted after secondary
1745   * lookup, which are rare and safe to dispose of anyway.
1746   */
1747  int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1748  		       u32 selector)
1749  {
1750  	struct sk_buff_head head;
1751  
1752  	__skb_queue_head_init(&head);
1753  	__skb_queue_tail(&head, skb);
1754  	tipc_node_xmit(net, &head, dnode, selector);
1755  	return 0;
1756  }
1757  
1758  /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1759   * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1760   */
1761  int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1762  {
1763  	struct sk_buff *skb;
1764  	u32 selector, dnode;
1765  
1766  	while ((skb = __skb_dequeue(xmitq))) {
1767  		selector = msg_origport(buf_msg(skb));
1768  		dnode = msg_destnode(buf_msg(skb));
1769  		tipc_node_xmit_skb(net, skb, dnode, selector);
1770  	}
1771  	return 0;
1772  }
1773  
1774  void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
1775  {
1776  	struct sk_buff_head xmitq;
1777  	struct sk_buff *txskb;
1778  	struct tipc_node *n;
1779  	u16 dummy;
1780  	u32 dst;
1781  
1782  	/* Use broadcast if all nodes support it */
1783  	if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
1784  		__skb_queue_head_init(&xmitq);
1785  		__skb_queue_tail(&xmitq, skb);
1786  		tipc_bcast_xmit(net, &xmitq, &dummy);
1787  		return;
1788  	}
1789  
1790  	/* Otherwise use legacy replicast method */
1791  	rcu_read_lock();
1792  	list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1793  		dst = n->addr;
1794  		if (in_own_node(net, dst))
1795  			continue;
1796  		if (!node_is_up(n))
1797  			continue;
1798  		txskb = pskb_copy(skb, GFP_ATOMIC);
1799  		if (!txskb)
1800  			break;
1801  		msg_set_destnode(buf_msg(txskb), dst);
1802  		tipc_node_xmit_skb(net, txskb, dst, 0);
1803  	}
1804  	rcu_read_unlock();
1805  	kfree_skb(skb);
1806  }
1807  
1808  static void tipc_node_mcast_rcv(struct tipc_node *n)
1809  {
1810  	struct tipc_bclink_entry *be = &n->bc_entry;
1811  
1812  	/* 'arrvq' is under inputq2's lock protection */
1813  	spin_lock_bh(&be->inputq2.lock);
1814  	spin_lock_bh(&be->inputq1.lock);
1815  	skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1816  	spin_unlock_bh(&be->inputq1.lock);
1817  	spin_unlock_bh(&be->inputq2.lock);
1818  	tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1819  }
1820  
1821  static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1822  				  int bearer_id, struct sk_buff_head *xmitq)
1823  {
1824  	struct tipc_link *ucl;
1825  	int rc;
1826  
1827  	rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
1828  
1829  	if (rc & TIPC_LINK_DOWN_EVT) {
1830  		tipc_node_reset_links(n);
1831  		return;
1832  	}
1833  
1834  	if (!(rc & TIPC_LINK_SND_STATE))
1835  		return;
1836  
1837  	/* If probe message, a STATE response will be sent anyway */
1838  	if (msg_probe(hdr))
1839  		return;
1840  
1841  	/* Produce a STATE message carrying broadcast NACK */
1842  	tipc_node_read_lock(n);
1843  	ucl = n->links[bearer_id].link;
1844  	if (ucl)
1845  		tipc_link_build_state_msg(ucl, xmitq);
1846  	tipc_node_read_unlock(n);
1847  }
1848  
1849  /**
1850   * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1851   * @net: the applicable net namespace
1852   * @skb: TIPC packet
1853   * @bearer_id: id of bearer message arrived on
1854   *
1855   * Invoked with no locks held.
1856   */
1857  static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1858  {
1859  	int rc;
1860  	struct sk_buff_head xmitq;
1861  	struct tipc_bclink_entry *be;
1862  	struct tipc_link_entry *le;
1863  	struct tipc_msg *hdr = buf_msg(skb);
1864  	int usr = msg_user(hdr);
1865  	u32 dnode = msg_destnode(hdr);
1866  	struct tipc_node *n;
1867  
1868  	__skb_queue_head_init(&xmitq);
1869  
1870  	/* If NACK for other node, let rcv link for that node peek into it */
1871  	if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1872  		n = tipc_node_find(net, dnode);
1873  	else
1874  		n = tipc_node_find(net, msg_prevnode(hdr));
1875  	if (!n) {
1876  		kfree_skb(skb);
1877  		return;
1878  	}
1879  	be = &n->bc_entry;
1880  	le = &n->links[bearer_id];
1881  
1882  	rc = tipc_bcast_rcv(net, be->link, skb);
1883  
1884  	/* Broadcast ACKs are sent on a unicast link */
1885  	if (rc & TIPC_LINK_SND_STATE) {
1886  		tipc_node_read_lock(n);
1887  		tipc_link_build_state_msg(le->link, &xmitq);
1888  		tipc_node_read_unlock(n);
1889  	}
1890  
1891  	if (!skb_queue_empty(&xmitq))
1892  		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1893  
1894  	if (!skb_queue_empty(&be->inputq1))
1895  		tipc_node_mcast_rcv(n);
1896  
1897  	/* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
1898  	if (!skb_queue_empty(&n->bc_entry.namedq))
1899  		tipc_named_rcv(net, &n->bc_entry.namedq,
1900  			       &n->bc_entry.named_rcv_nxt,
1901  			       &n->bc_entry.named_open);
1902  
1903  	/* If reassembly or retransmission failure => reset all links to peer */
1904  	if (rc & TIPC_LINK_DOWN_EVT)
1905  		tipc_node_reset_links(n);
1906  
1907  	tipc_node_put(n);
1908  }
1909  
1910  /**
1911   * tipc_node_check_state - check and if necessary update node state
1912   * @n: target tipc_node
1913   * @skb: TIPC packet
1914   * @bearer_id: identity of bearer delivering the packet
1915   * @xmitq: queue for messages to be xmited on
1916   * Return: true if state and msg are ok, otherwise false
1917   */
1918  static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1919  				  int bearer_id, struct sk_buff_head *xmitq)
1920  {
1921  	struct tipc_msg *hdr = buf_msg(skb);
1922  	int usr = msg_user(hdr);
1923  	int mtyp = msg_type(hdr);
1924  	u16 oseqno = msg_seqno(hdr);
1925  	u16 exp_pkts = msg_msgcnt(hdr);
1926  	u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1927  	int state = n->state;
1928  	struct tipc_link *l, *tnl, *pl = NULL;
1929  	struct tipc_media_addr *maddr;
1930  	int pb_id;
1931  
1932  	if (trace_tipc_node_check_state_enabled()) {
1933  		trace_tipc_skb_dump(skb, false, "skb for node state check");
1934  		trace_tipc_node_check_state(n, true, " ");
1935  	}
1936  	l = n->links[bearer_id].link;
1937  	if (!l)
1938  		return false;
1939  	rcv_nxt = tipc_link_rcv_nxt(l);
1940  
1941  
1942  	if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1943  		return true;
1944  
1945  	/* Find parallel link, if any */
1946  	for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1947  		if ((pb_id != bearer_id) && n->links[pb_id].link) {
1948  			pl = n->links[pb_id].link;
1949  			break;
1950  		}
1951  	}
1952  
1953  	if (!tipc_link_validate_msg(l, hdr)) {
1954  		trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
1955  		trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
1956  		return false;
1957  	}
1958  
1959  	/* Check and update node accesibility if applicable */
1960  	if (state == SELF_UP_PEER_COMING) {
1961  		if (!tipc_link_is_up(l))
1962  			return true;
1963  		if (!msg_peer_link_is_up(hdr))
1964  			return true;
1965  		tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1966  	}
1967  
1968  	if (state == SELF_DOWN_PEER_LEAVING) {
1969  		if (msg_peer_node_is_up(hdr))
1970  			return false;
1971  		tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1972  		return true;
1973  	}
1974  
1975  	if (state == SELF_LEAVING_PEER_DOWN)
1976  		return false;
1977  
1978  	/* Ignore duplicate packets */
1979  	if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1980  		return true;
1981  
1982  	/* Initiate or update failover mode if applicable */
1983  	if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1984  		syncpt = oseqno + exp_pkts - 1;
1985  		if (pl && !tipc_link_is_reset(pl)) {
1986  			__tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1987  			trace_tipc_node_link_down(n, true,
1988  						  "node link down <- failover!");
1989  			tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1990  							tipc_link_inputq(l));
1991  		}
1992  
1993  		/* If parallel link was already down, and this happened before
1994  		 * the tunnel link came up, node failover was never started.
1995  		 * Ensure that a FAILOVER_MSG is sent to get peer out of
1996  		 * NODE_FAILINGOVER state, also this node must accept
1997  		 * TUNNEL_MSGs from peer.
1998  		 */
1999  		if (n->state != NODE_FAILINGOVER)
2000  			tipc_node_link_failover(n, pl, l, xmitq);
2001  
2002  		/* If pkts arrive out of order, use lowest calculated syncpt */
2003  		if (less(syncpt, n->sync_point))
2004  			n->sync_point = syncpt;
2005  	}
2006  
2007  	/* Open parallel link when tunnel link reaches synch point */
2008  	if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
2009  		if (!more(rcv_nxt, n->sync_point))
2010  			return true;
2011  		tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
2012  		if (pl)
2013  			tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
2014  		return true;
2015  	}
2016  
2017  	/* No syncing needed if only one link */
2018  	if (!pl || !tipc_link_is_up(pl))
2019  		return true;
2020  
2021  	/* Initiate synch mode if applicable */
2022  	if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
2023  		if (n->capabilities & TIPC_TUNNEL_ENHANCED)
2024  			syncpt = msg_syncpt(hdr);
2025  		else
2026  			syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
2027  		if (!tipc_link_is_up(l))
2028  			__tipc_node_link_up(n, bearer_id, xmitq);
2029  		if (n->state == SELF_UP_PEER_UP) {
2030  			n->sync_point = syncpt;
2031  			tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
2032  			tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
2033  		}
2034  	}
2035  
2036  	/* Open tunnel link when parallel link reaches synch point */
2037  	if (n->state == NODE_SYNCHING) {
2038  		if (tipc_link_is_synching(l)) {
2039  			tnl = l;
2040  		} else {
2041  			tnl = pl;
2042  			pl = l;
2043  		}
2044  		inputq_len = skb_queue_len(tipc_link_inputq(pl));
2045  		dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
2046  		if (more(dlv_nxt, n->sync_point)) {
2047  			tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
2048  			tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
2049  			return true;
2050  		}
2051  		if (l == pl)
2052  			return true;
2053  		if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
2054  			return true;
2055  		if (usr == LINK_PROTOCOL)
2056  			return true;
2057  		return false;
2058  	}
2059  	return true;
2060  }
2061  
2062  /**
2063   * tipc_rcv - process TIPC packets/messages arriving from off-node
2064   * @net: the applicable net namespace
2065   * @skb: TIPC packet
2066   * @b: pointer to bearer message arrived on
2067   *
2068   * Invoked with no locks held. Bearer pointer must point to a valid bearer
2069   * structure (i.e. cannot be NULL), but bearer can be inactive.
2070   */
2071  void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
2072  {
2073  	struct sk_buff_head xmitq;
2074  	struct tipc_link_entry *le;
2075  	struct tipc_msg *hdr;
2076  	struct tipc_node *n;
2077  	int bearer_id = b->identity;
2078  	u32 self = tipc_own_addr(net);
2079  	int usr, rc = 0;
2080  	u16 bc_ack;
2081  #ifdef CONFIG_TIPC_CRYPTO
2082  	struct tipc_ehdr *ehdr;
2083  
2084  	/* Check if message must be decrypted first */
2085  	if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
2086  		goto rcv;
2087  
2088  	ehdr = (struct tipc_ehdr *)skb->data;
2089  	if (likely(ehdr->user != LINK_CONFIG)) {
2090  		n = tipc_node_find(net, ntohl(ehdr->addr));
2091  		if (unlikely(!n))
2092  			goto discard;
2093  	} else {
2094  		n = tipc_node_find_by_id(net, ehdr->id);
2095  	}
2096  	tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
2097  	if (!skb)
2098  		return;
2099  
2100  rcv:
2101  #endif
2102  	/* Ensure message is well-formed before touching the header */
2103  	if (unlikely(!tipc_msg_validate(&skb)))
2104  		goto discard;
2105  	__skb_queue_head_init(&xmitq);
2106  	hdr = buf_msg(skb);
2107  	usr = msg_user(hdr);
2108  	bc_ack = msg_bcast_ack(hdr);
2109  
2110  	/* Handle arrival of discovery or broadcast packet */
2111  	if (unlikely(msg_non_seq(hdr))) {
2112  		if (unlikely(usr == LINK_CONFIG))
2113  			return tipc_disc_rcv(net, skb, b);
2114  		else
2115  			return tipc_node_bc_rcv(net, skb, bearer_id);
2116  	}
2117  
2118  	/* Discard unicast link messages destined for another node */
2119  	if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
2120  		goto discard;
2121  
2122  	/* Locate neighboring node that sent packet */
2123  	n = tipc_node_find(net, msg_prevnode(hdr));
2124  	if (unlikely(!n))
2125  		goto discard;
2126  	le = &n->links[bearer_id];
2127  
2128  	/* Ensure broadcast reception is in synch with peer's send state */
2129  	if (unlikely(usr == LINK_PROTOCOL)) {
2130  		if (unlikely(skb_linearize(skb))) {
2131  			tipc_node_put(n);
2132  			goto discard;
2133  		}
2134  		hdr = buf_msg(skb);
2135  		tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2136  	} else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
2137  		tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
2138  	}
2139  
2140  	/* Receive packet directly if conditions permit */
2141  	tipc_node_read_lock(n);
2142  	if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
2143  		spin_lock_bh(&le->lock);
2144  		if (le->link) {
2145  			rc = tipc_link_rcv(le->link, skb, &xmitq);
2146  			skb = NULL;
2147  		}
2148  		spin_unlock_bh(&le->lock);
2149  	}
2150  	tipc_node_read_unlock(n);
2151  
2152  	/* Check/update node state before receiving */
2153  	if (unlikely(skb)) {
2154  		if (unlikely(skb_linearize(skb)))
2155  			goto out_node_put;
2156  		tipc_node_write_lock(n);
2157  		if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2158  			if (le->link) {
2159  				rc = tipc_link_rcv(le->link, skb, &xmitq);
2160  				skb = NULL;
2161  			}
2162  		}
2163  		tipc_node_write_unlock(n);
2164  	}
2165  
2166  	if (unlikely(rc & TIPC_LINK_UP_EVT))
2167  		tipc_node_link_up(n, bearer_id, &xmitq);
2168  
2169  	if (unlikely(rc & TIPC_LINK_DOWN_EVT))
2170  		tipc_node_link_down(n, bearer_id, false);
2171  
2172  	if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
2173  		tipc_named_rcv(net, &n->bc_entry.namedq,
2174  			       &n->bc_entry.named_rcv_nxt,
2175  			       &n->bc_entry.named_open);
2176  
2177  	if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
2178  		tipc_node_mcast_rcv(n);
2179  
2180  	if (!skb_queue_empty(&le->inputq))
2181  		tipc_sk_rcv(net, &le->inputq);
2182  
2183  	if (!skb_queue_empty(&xmitq))
2184  		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2185  
2186  out_node_put:
2187  	tipc_node_put(n);
2188  discard:
2189  	kfree_skb(skb);
2190  }
2191  
2192  void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
2193  			      int prop)
2194  {
2195  	struct tipc_net *tn = tipc_net(net);
2196  	int bearer_id = b->identity;
2197  	struct sk_buff_head xmitq;
2198  	struct tipc_link_entry *e;
2199  	struct tipc_node *n;
2200  
2201  	__skb_queue_head_init(&xmitq);
2202  
2203  	rcu_read_lock();
2204  
2205  	list_for_each_entry_rcu(n, &tn->node_list, list) {
2206  		tipc_node_write_lock(n);
2207  		e = &n->links[bearer_id];
2208  		if (e->link) {
2209  			if (prop == TIPC_NLA_PROP_TOL)
2210  				tipc_link_set_tolerance(e->link, b->tolerance,
2211  							&xmitq);
2212  			else if (prop == TIPC_NLA_PROP_MTU)
2213  				tipc_link_set_mtu(e->link, b->mtu);
2214  
2215  			/* Update MTU for node link entry */
2216  			e->mtu = tipc_link_mss(e->link);
2217  		}
2218  
2219  		tipc_node_write_unlock(n);
2220  		tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2221  	}
2222  
2223  	rcu_read_unlock();
2224  }
2225  
2226  int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
2227  {
2228  	struct net *net = sock_net(skb->sk);
2229  	struct tipc_net *tn = net_generic(net, tipc_net_id);
2230  	struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
2231  	struct tipc_node *peer, *temp_node;
2232  	u8 node_id[NODE_ID_LEN];
2233  	u64 *w0 = (u64 *)&node_id[0];
2234  	u64 *w1 = (u64 *)&node_id[8];
2235  	u32 addr;
2236  	int err;
2237  
2238  	/* We identify the peer by its net */
2239  	if (!info->attrs[TIPC_NLA_NET])
2240  		return -EINVAL;
2241  
2242  	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
2243  					  info->attrs[TIPC_NLA_NET],
2244  					  tipc_nl_net_policy, info->extack);
2245  	if (err)
2246  		return err;
2247  
2248  	/* attrs[TIPC_NLA_NET_NODEID] and attrs[TIPC_NLA_NET_ADDR] are
2249  	 * mutually exclusive cases
2250  	 */
2251  	if (attrs[TIPC_NLA_NET_ADDR]) {
2252  		addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
2253  		if (!addr)
2254  			return -EINVAL;
2255  	}
2256  
2257  	if (attrs[TIPC_NLA_NET_NODEID]) {
2258  		if (!attrs[TIPC_NLA_NET_NODEID_W1])
2259  			return -EINVAL;
2260  		*w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
2261  		*w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
2262  		addr = hash128to32(node_id);
2263  	}
2264  
2265  	if (in_own_node(net, addr))
2266  		return -ENOTSUPP;
2267  
2268  	spin_lock_bh(&tn->node_list_lock);
2269  	peer = tipc_node_find(net, addr);
2270  	if (!peer) {
2271  		spin_unlock_bh(&tn->node_list_lock);
2272  		return -ENXIO;
2273  	}
2274  
2275  	tipc_node_write_lock(peer);
2276  	if (peer->state != SELF_DOWN_PEER_DOWN &&
2277  	    peer->state != SELF_DOWN_PEER_LEAVING) {
2278  		tipc_node_write_unlock(peer);
2279  		err = -EBUSY;
2280  		goto err_out;
2281  	}
2282  
2283  	tipc_node_clear_links(peer);
2284  	tipc_node_write_unlock(peer);
2285  	tipc_node_delete(peer);
2286  
2287  	/* Calculate cluster capabilities */
2288  	tn->capabilities = TIPC_NODE_CAPABILITIES;
2289  	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
2290  		tn->capabilities &= temp_node->capabilities;
2291  	}
2292  	tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
2293  	err = 0;
2294  err_out:
2295  	tipc_node_put(peer);
2296  	spin_unlock_bh(&tn->node_list_lock);
2297  
2298  	return err;
2299  }
2300  
2301  int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
2302  {
2303  	int err;
2304  	struct net *net = sock_net(skb->sk);
2305  	struct tipc_net *tn = net_generic(net, tipc_net_id);
2306  	int done = cb->args[0];
2307  	int last_addr = cb->args[1];
2308  	struct tipc_node *node;
2309  	struct tipc_nl_msg msg;
2310  
2311  	if (done)
2312  		return 0;
2313  
2314  	msg.skb = skb;
2315  	msg.portid = NETLINK_CB(cb->skb).portid;
2316  	msg.seq = cb->nlh->nlmsg_seq;
2317  
2318  	rcu_read_lock();
2319  	if (last_addr) {
2320  		node = tipc_node_find(net, last_addr);
2321  		if (!node) {
2322  			rcu_read_unlock();
2323  			/* We never set seq or call nl_dump_check_consistent()
2324  			 * this means that setting prev_seq here will cause the
2325  			 * consistence check to fail in the netlink callback
2326  			 * handler. Resulting in the NLMSG_DONE message having
2327  			 * the NLM_F_DUMP_INTR flag set if the node state
2328  			 * changed while we released the lock.
2329  			 */
2330  			cb->prev_seq = 1;
2331  			return -EPIPE;
2332  		}
2333  		tipc_node_put(node);
2334  	}
2335  
2336  	list_for_each_entry_rcu(node, &tn->node_list, list) {
2337  		if (node->preliminary)
2338  			continue;
2339  		if (last_addr) {
2340  			if (node->addr == last_addr)
2341  				last_addr = 0;
2342  			else
2343  				continue;
2344  		}
2345  
2346  		tipc_node_read_lock(node);
2347  		err = __tipc_nl_add_node(&msg, node);
2348  		if (err) {
2349  			last_addr = node->addr;
2350  			tipc_node_read_unlock(node);
2351  			goto out;
2352  		}
2353  
2354  		tipc_node_read_unlock(node);
2355  	}
2356  	done = 1;
2357  out:
2358  	cb->args[0] = done;
2359  	cb->args[1] = last_addr;
2360  	rcu_read_unlock();
2361  
2362  	return skb->len;
2363  }
2364  
2365  /* tipc_node_find_by_name - locate owner node of link by link's name
2366   * @net: the applicable net namespace
2367   * @name: pointer to link name string
2368   * @bearer_id: pointer to index in 'node->links' array where the link was found.
2369   *
2370   * Returns pointer to node owning the link, or 0 if no matching link is found.
2371   */
2372  static struct tipc_node *tipc_node_find_by_name(struct net *net,
2373  						const char *link_name,
2374  						unsigned int *bearer_id)
2375  {
2376  	struct tipc_net *tn = net_generic(net, tipc_net_id);
2377  	struct tipc_link *l;
2378  	struct tipc_node *n;
2379  	struct tipc_node *found_node = NULL;
2380  	int i;
2381  
2382  	*bearer_id = 0;
2383  	rcu_read_lock();
2384  	list_for_each_entry_rcu(n, &tn->node_list, list) {
2385  		tipc_node_read_lock(n);
2386  		for (i = 0; i < MAX_BEARERS; i++) {
2387  			l = n->links[i].link;
2388  			if (l && !strcmp(tipc_link_name(l), link_name)) {
2389  				*bearer_id = i;
2390  				found_node = n;
2391  				break;
2392  			}
2393  		}
2394  		tipc_node_read_unlock(n);
2395  		if (found_node)
2396  			break;
2397  	}
2398  	rcu_read_unlock();
2399  
2400  	return found_node;
2401  }
2402  
2403  int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
2404  {
2405  	int err;
2406  	int res = 0;
2407  	int bearer_id;
2408  	char *name;
2409  	struct tipc_link *link;
2410  	struct tipc_node *node;
2411  	struct sk_buff_head xmitq;
2412  	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2413  	struct net *net = sock_net(skb->sk);
2414  
2415  	__skb_queue_head_init(&xmitq);
2416  
2417  	if (!info->attrs[TIPC_NLA_LINK])
2418  		return -EINVAL;
2419  
2420  	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2421  					  info->attrs[TIPC_NLA_LINK],
2422  					  tipc_nl_link_policy, info->extack);
2423  	if (err)
2424  		return err;
2425  
2426  	if (!attrs[TIPC_NLA_LINK_NAME])
2427  		return -EINVAL;
2428  
2429  	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2430  
2431  	if (strcmp(name, tipc_bclink_name) == 0)
2432  		return tipc_nl_bc_link_set(net, attrs);
2433  
2434  	node = tipc_node_find_by_name(net, name, &bearer_id);
2435  	if (!node)
2436  		return -EINVAL;
2437  
2438  	tipc_node_read_lock(node);
2439  
2440  	link = node->links[bearer_id].link;
2441  	if (!link) {
2442  		res = -EINVAL;
2443  		goto out;
2444  	}
2445  
2446  	if (attrs[TIPC_NLA_LINK_PROP]) {
2447  		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2448  
2449  		err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
2450  		if (err) {
2451  			res = err;
2452  			goto out;
2453  		}
2454  
2455  		if (props[TIPC_NLA_PROP_TOL]) {
2456  			u32 tol;
2457  
2458  			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2459  			tipc_link_set_tolerance(link, tol, &xmitq);
2460  		}
2461  		if (props[TIPC_NLA_PROP_PRIO]) {
2462  			u32 prio;
2463  
2464  			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2465  			tipc_link_set_prio(link, prio, &xmitq);
2466  		}
2467  		if (props[TIPC_NLA_PROP_WIN]) {
2468  			u32 max_win;
2469  
2470  			max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2471  			tipc_link_set_queue_limits(link,
2472  						   tipc_link_min_win(link),
2473  						   max_win);
2474  		}
2475  	}
2476  
2477  out:
2478  	tipc_node_read_unlock(node);
2479  	tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
2480  			 NULL);
2481  	return res;
2482  }
2483  
2484  int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2485  {
2486  	struct net *net = genl_info_net(info);
2487  	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2488  	struct tipc_nl_msg msg;
2489  	char *name;
2490  	int err;
2491  
2492  	msg.portid = info->snd_portid;
2493  	msg.seq = info->snd_seq;
2494  
2495  	if (!info->attrs[TIPC_NLA_LINK])
2496  		return -EINVAL;
2497  
2498  	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2499  					  info->attrs[TIPC_NLA_LINK],
2500  					  tipc_nl_link_policy, info->extack);
2501  	if (err)
2502  		return err;
2503  
2504  	if (!attrs[TIPC_NLA_LINK_NAME])
2505  		return -EINVAL;
2506  
2507  	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2508  
2509  	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2510  	if (!msg.skb)
2511  		return -ENOMEM;
2512  
2513  	if (strcmp(name, tipc_bclink_name) == 0) {
2514  		err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
2515  		if (err)
2516  			goto err_free;
2517  	} else {
2518  		int bearer_id;
2519  		struct tipc_node *node;
2520  		struct tipc_link *link;
2521  
2522  		node = tipc_node_find_by_name(net, name, &bearer_id);
2523  		if (!node) {
2524  			err = -EINVAL;
2525  			goto err_free;
2526  		}
2527  
2528  		tipc_node_read_lock(node);
2529  		link = node->links[bearer_id].link;
2530  		if (!link) {
2531  			tipc_node_read_unlock(node);
2532  			err = -EINVAL;
2533  			goto err_free;
2534  		}
2535  
2536  		err = __tipc_nl_add_link(net, &msg, link, 0);
2537  		tipc_node_read_unlock(node);
2538  		if (err)
2539  			goto err_free;
2540  	}
2541  
2542  	return genlmsg_reply(msg.skb, info);
2543  
2544  err_free:
2545  	nlmsg_free(msg.skb);
2546  	return err;
2547  }
2548  
2549  int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2550  {
2551  	int err;
2552  	char *link_name;
2553  	unsigned int bearer_id;
2554  	struct tipc_link *link;
2555  	struct tipc_node *node;
2556  	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2557  	struct net *net = sock_net(skb->sk);
2558  	struct tipc_net *tn = tipc_net(net);
2559  	struct tipc_link_entry *le;
2560  
2561  	if (!info->attrs[TIPC_NLA_LINK])
2562  		return -EINVAL;
2563  
2564  	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2565  					  info->attrs[TIPC_NLA_LINK],
2566  					  tipc_nl_link_policy, info->extack);
2567  	if (err)
2568  		return err;
2569  
2570  	if (!attrs[TIPC_NLA_LINK_NAME])
2571  		return -EINVAL;
2572  
2573  	link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2574  
2575  	err = -EINVAL;
2576  	if (!strcmp(link_name, tipc_bclink_name)) {
2577  		err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
2578  		if (err)
2579  			return err;
2580  		return 0;
2581  	} else if (strstr(link_name, tipc_bclink_name)) {
2582  		rcu_read_lock();
2583  		list_for_each_entry_rcu(node, &tn->node_list, list) {
2584  			tipc_node_read_lock(node);
2585  			link = node->bc_entry.link;
2586  			if (link && !strcmp(link_name, tipc_link_name(link))) {
2587  				err = tipc_bclink_reset_stats(net, link);
2588  				tipc_node_read_unlock(node);
2589  				break;
2590  			}
2591  			tipc_node_read_unlock(node);
2592  		}
2593  		rcu_read_unlock();
2594  		return err;
2595  	}
2596  
2597  	node = tipc_node_find_by_name(net, link_name, &bearer_id);
2598  	if (!node)
2599  		return -EINVAL;
2600  
2601  	le = &node->links[bearer_id];
2602  	tipc_node_read_lock(node);
2603  	spin_lock_bh(&le->lock);
2604  	link = node->links[bearer_id].link;
2605  	if (!link) {
2606  		spin_unlock_bh(&le->lock);
2607  		tipc_node_read_unlock(node);
2608  		return -EINVAL;
2609  	}
2610  	tipc_link_reset_stats(link);
2611  	spin_unlock_bh(&le->lock);
2612  	tipc_node_read_unlock(node);
2613  	return 0;
2614  }
2615  
2616  /* Caller should hold node lock  */
2617  static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2618  				    struct tipc_node *node, u32 *prev_link,
2619  				    bool bc_link)
2620  {
2621  	u32 i;
2622  	int err;
2623  
2624  	for (i = *prev_link; i < MAX_BEARERS; i++) {
2625  		*prev_link = i;
2626  
2627  		if (!node->links[i].link)
2628  			continue;
2629  
2630  		err = __tipc_nl_add_link(net, msg,
2631  					 node->links[i].link, NLM_F_MULTI);
2632  		if (err)
2633  			return err;
2634  	}
2635  
2636  	if (bc_link) {
2637  		*prev_link = i;
2638  		err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
2639  		if (err)
2640  			return err;
2641  	}
2642  
2643  	*prev_link = 0;
2644  
2645  	return 0;
2646  }
2647  
2648  int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2649  {
2650  	struct net *net = sock_net(skb->sk);
2651  	struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2652  	struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
2653  	struct tipc_net *tn = net_generic(net, tipc_net_id);
2654  	struct tipc_node *node;
2655  	struct tipc_nl_msg msg;
2656  	u32 prev_node = cb->args[0];
2657  	u32 prev_link = cb->args[1];
2658  	int done = cb->args[2];
2659  	bool bc_link = cb->args[3];
2660  	int err;
2661  
2662  	if (done)
2663  		return 0;
2664  
2665  	if (!prev_node) {
2666  		/* Check if broadcast-receiver links dumping is needed */
2667  		if (attrs && attrs[TIPC_NLA_LINK]) {
2668  			err = nla_parse_nested_deprecated(link,
2669  							  TIPC_NLA_LINK_MAX,
2670  							  attrs[TIPC_NLA_LINK],
2671  							  tipc_nl_link_policy,
2672  							  NULL);
2673  			if (unlikely(err))
2674  				return err;
2675  			if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
2676  				return -EINVAL;
2677  			bc_link = true;
2678  		}
2679  	}
2680  
2681  	msg.skb = skb;
2682  	msg.portid = NETLINK_CB(cb->skb).portid;
2683  	msg.seq = cb->nlh->nlmsg_seq;
2684  
2685  	rcu_read_lock();
2686  	if (prev_node) {
2687  		node = tipc_node_find(net, prev_node);
2688  		if (!node) {
2689  			/* We never set seq or call nl_dump_check_consistent()
2690  			 * this means that setting prev_seq here will cause the
2691  			 * consistence check to fail in the netlink callback
2692  			 * handler. Resulting in the last NLMSG_DONE message
2693  			 * having the NLM_F_DUMP_INTR flag set.
2694  			 */
2695  			cb->prev_seq = 1;
2696  			goto out;
2697  		}
2698  		tipc_node_put(node);
2699  
2700  		list_for_each_entry_continue_rcu(node, &tn->node_list,
2701  						 list) {
2702  			tipc_node_read_lock(node);
2703  			err = __tipc_nl_add_node_links(net, &msg, node,
2704  						       &prev_link, bc_link);
2705  			tipc_node_read_unlock(node);
2706  			if (err)
2707  				goto out;
2708  
2709  			prev_node = node->addr;
2710  		}
2711  	} else {
2712  		err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
2713  		if (err)
2714  			goto out;
2715  
2716  		list_for_each_entry_rcu(node, &tn->node_list, list) {
2717  			tipc_node_read_lock(node);
2718  			err = __tipc_nl_add_node_links(net, &msg, node,
2719  						       &prev_link, bc_link);
2720  			tipc_node_read_unlock(node);
2721  			if (err)
2722  				goto out;
2723  
2724  			prev_node = node->addr;
2725  		}
2726  	}
2727  	done = 1;
2728  out:
2729  	rcu_read_unlock();
2730  
2731  	cb->args[0] = prev_node;
2732  	cb->args[1] = prev_link;
2733  	cb->args[2] = done;
2734  	cb->args[3] = bc_link;
2735  
2736  	return skb->len;
2737  }
2738  
2739  int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2740  {
2741  	struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2742  	struct net *net = sock_net(skb->sk);
2743  	int err;
2744  
2745  	if (!info->attrs[TIPC_NLA_MON])
2746  		return -EINVAL;
2747  
2748  	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
2749  					  info->attrs[TIPC_NLA_MON],
2750  					  tipc_nl_monitor_policy,
2751  					  info->extack);
2752  	if (err)
2753  		return err;
2754  
2755  	if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2756  		u32 val;
2757  
2758  		val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2759  		err = tipc_nl_monitor_set_threshold(net, val);
2760  		if (err)
2761  			return err;
2762  	}
2763  
2764  	return 0;
2765  }
2766  
2767  static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2768  {
2769  	struct nlattr *attrs;
2770  	void *hdr;
2771  	u32 val;
2772  
2773  	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2774  			  0, TIPC_NL_MON_GET);
2775  	if (!hdr)
2776  		return -EMSGSIZE;
2777  
2778  	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
2779  	if (!attrs)
2780  		goto msg_full;
2781  
2782  	val = tipc_nl_monitor_get_threshold(net);
2783  
2784  	if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2785  		goto attr_msg_full;
2786  
2787  	nla_nest_end(msg->skb, attrs);
2788  	genlmsg_end(msg->skb, hdr);
2789  
2790  	return 0;
2791  
2792  attr_msg_full:
2793  	nla_nest_cancel(msg->skb, attrs);
2794  msg_full:
2795  	genlmsg_cancel(msg->skb, hdr);
2796  
2797  	return -EMSGSIZE;
2798  }
2799  
2800  int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2801  {
2802  	struct net *net = sock_net(skb->sk);
2803  	struct tipc_nl_msg msg;
2804  	int err;
2805  
2806  	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2807  	if (!msg.skb)
2808  		return -ENOMEM;
2809  	msg.portid = info->snd_portid;
2810  	msg.seq = info->snd_seq;
2811  
2812  	err = __tipc_nl_add_monitor_prop(net, &msg);
2813  	if (err) {
2814  		nlmsg_free(msg.skb);
2815  		return err;
2816  	}
2817  
2818  	return genlmsg_reply(msg.skb, info);
2819  }
2820  
2821  int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2822  {
2823  	struct net *net = sock_net(skb->sk);
2824  	u32 prev_bearer = cb->args[0];
2825  	struct tipc_nl_msg msg;
2826  	int bearer_id;
2827  	int err;
2828  
2829  	if (prev_bearer == MAX_BEARERS)
2830  		return 0;
2831  
2832  	msg.skb = skb;
2833  	msg.portid = NETLINK_CB(cb->skb).portid;
2834  	msg.seq = cb->nlh->nlmsg_seq;
2835  
2836  	rtnl_lock();
2837  	for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2838  		err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2839  		if (err)
2840  			break;
2841  	}
2842  	rtnl_unlock();
2843  	cb->args[0] = bearer_id;
2844  
2845  	return skb->len;
2846  }
2847  
2848  int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2849  				   struct netlink_callback *cb)
2850  {
2851  	struct net *net = sock_net(skb->sk);
2852  	u32 prev_node = cb->args[1];
2853  	u32 bearer_id = cb->args[2];
2854  	int done = cb->args[0];
2855  	struct tipc_nl_msg msg;
2856  	int err;
2857  
2858  	if (!prev_node) {
2859  		struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2860  		struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2861  
2862  		if (!attrs[TIPC_NLA_MON])
2863  			return -EINVAL;
2864  
2865  		err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
2866  						  attrs[TIPC_NLA_MON],
2867  						  tipc_nl_monitor_policy,
2868  						  NULL);
2869  		if (err)
2870  			return err;
2871  
2872  		if (!mon[TIPC_NLA_MON_REF])
2873  			return -EINVAL;
2874  
2875  		bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2876  
2877  		if (bearer_id >= MAX_BEARERS)
2878  			return -EINVAL;
2879  	}
2880  
2881  	if (done)
2882  		return 0;
2883  
2884  	msg.skb = skb;
2885  	msg.portid = NETLINK_CB(cb->skb).portid;
2886  	msg.seq = cb->nlh->nlmsg_seq;
2887  
2888  	rtnl_lock();
2889  	err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2890  	if (!err)
2891  		done = 1;
2892  
2893  	rtnl_unlock();
2894  	cb->args[0] = done;
2895  	cb->args[1] = prev_node;
2896  	cb->args[2] = bearer_id;
2897  
2898  	return skb->len;
2899  }
2900  
2901  #ifdef CONFIG_TIPC_CRYPTO
2902  static int tipc_nl_retrieve_key(struct nlattr **attrs,
2903  				struct tipc_aead_key **pkey)
2904  {
2905  	struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
2906  	struct tipc_aead_key *key;
2907  
2908  	if (!attr)
2909  		return -ENODATA;
2910  
2911  	if (nla_len(attr) < sizeof(*key))
2912  		return -EINVAL;
2913  	key = (struct tipc_aead_key *)nla_data(attr);
2914  	if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
2915  	    nla_len(attr) < tipc_aead_key_size(key))
2916  		return -EINVAL;
2917  
2918  	*pkey = key;
2919  	return 0;
2920  }
2921  
2922  static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
2923  {
2924  	struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
2925  
2926  	if (!attr)
2927  		return -ENODATA;
2928  
2929  	if (nla_len(attr) < TIPC_NODEID_LEN)
2930  		return -EINVAL;
2931  
2932  	*node_id = (u8 *)nla_data(attr);
2933  	return 0;
2934  }
2935  
2936  static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
2937  {
2938  	struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
2939  
2940  	if (!attr)
2941  		return -ENODATA;
2942  
2943  	*intv = nla_get_u32(attr);
2944  	return 0;
2945  }
2946  
2947  static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2948  {
2949  	struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
2950  	struct net *net = sock_net(skb->sk);
2951  	struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
2952  	struct tipc_node *n = NULL;
2953  	struct tipc_aead_key *ukey;
2954  	bool rekeying = true, master_key = false;
2955  	u8 *id, *own_id, mode;
2956  	u32 intv = 0;
2957  	int rc = 0;
2958  
2959  	if (!info->attrs[TIPC_NLA_NODE])
2960  		return -EINVAL;
2961  
2962  	rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
2963  			      info->attrs[TIPC_NLA_NODE],
2964  			      tipc_nl_node_policy, info->extack);
2965  	if (rc)
2966  		return rc;
2967  
2968  	own_id = tipc_own_id(net);
2969  	if (!own_id) {
2970  		GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
2971  		return -EPERM;
2972  	}
2973  
2974  	rc = tipc_nl_retrieve_rekeying(attrs, &intv);
2975  	if (rc == -ENODATA)
2976  		rekeying = false;
2977  
2978  	rc = tipc_nl_retrieve_key(attrs, &ukey);
2979  	if (rc == -ENODATA && rekeying)
2980  		goto rekeying;
2981  	else if (rc)
2982  		return rc;
2983  
2984  	rc = tipc_aead_key_validate(ukey, info);
2985  	if (rc)
2986  		return rc;
2987  
2988  	rc = tipc_nl_retrieve_nodeid(attrs, &id);
2989  	switch (rc) {
2990  	case -ENODATA:
2991  		mode = CLUSTER_KEY;
2992  		master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
2993  		break;
2994  	case 0:
2995  		mode = PER_NODE_KEY;
2996  		if (memcmp(id, own_id, NODE_ID_LEN)) {
2997  			n = tipc_node_find_by_id(net, id) ?:
2998  				tipc_node_create(net, 0, id, 0xffffu, 0, true);
2999  			if (unlikely(!n))
3000  				return -ENOMEM;
3001  			c = n->crypto_rx;
3002  		}
3003  		break;
3004  	default:
3005  		return rc;
3006  	}
3007  
3008  	/* Initiate the TX/RX key */
3009  	rc = tipc_crypto_key_init(c, ukey, mode, master_key);
3010  	if (n)
3011  		tipc_node_put(n);
3012  
3013  	if (unlikely(rc < 0)) {
3014  		GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
3015  		return rc;
3016  	} else if (c == tx) {
3017  		/* Distribute TX key but not master one */
3018  		if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
3019  			GENL_SET_ERR_MSG(info, "failed to replicate new key");
3020  rekeying:
3021  		/* Schedule TX rekeying if needed */
3022  		tipc_crypto_rekeying_sched(tx, rekeying, intv);
3023  	}
3024  
3025  	return 0;
3026  }
3027  
3028  int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
3029  {
3030  	int err;
3031  
3032  	rtnl_lock();
3033  	err = __tipc_nl_node_set_key(skb, info);
3034  	rtnl_unlock();
3035  
3036  	return err;
3037  }
3038  
3039  static int __tipc_nl_node_flush_key(struct sk_buff *skb,
3040  				    struct genl_info *info)
3041  {
3042  	struct net *net = sock_net(skb->sk);
3043  	struct tipc_net *tn = tipc_net(net);
3044  	struct tipc_node *n;
3045  
3046  	tipc_crypto_key_flush(tn->crypto_tx);
3047  	rcu_read_lock();
3048  	list_for_each_entry_rcu(n, &tn->node_list, list)
3049  		tipc_crypto_key_flush(n->crypto_rx);
3050  	rcu_read_unlock();
3051  
3052  	return 0;
3053  }
3054  
3055  int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
3056  {
3057  	int err;
3058  
3059  	rtnl_lock();
3060  	err = __tipc_nl_node_flush_key(skb, info);
3061  	rtnl_unlock();
3062  
3063  	return err;
3064  }
3065  #endif
3066  
3067  /**
3068   * tipc_node_dump - dump TIPC node data
3069   * @n: tipc node to be dumped
3070   * @more: dump more?
3071   *        - false: dump only tipc node data
3072   *        - true: dump node link data as well
3073   * @buf: returned buffer of dump data in format
3074   */
3075  int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
3076  {
3077  	int i = 0;
3078  	size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
3079  
3080  	if (!n) {
3081  		i += scnprintf(buf, sz, "node data: (null)\n");
3082  		return i;
3083  	}
3084  
3085  	i += scnprintf(buf, sz, "node data: %x", n->addr);
3086  	i += scnprintf(buf + i, sz - i, " %x", n->state);
3087  	i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
3088  	i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
3089  	i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
3090  	i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
3091  	i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
3092  	i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
3093  	i += scnprintf(buf + i, sz - i, " %u", n->working_links);
3094  	i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
3095  	i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
3096  
3097  	if (!more)
3098  		return i;
3099  
3100  	i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
3101  	i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
3102  	i += scnprintf(buf + i, sz - i, " media: ");
3103  	i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
3104  	i += scnprintf(buf + i, sz - i, "\n");
3105  	i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
3106  	i += scnprintf(buf + i, sz - i, " inputq: ");
3107  	i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
3108  
3109  	i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
3110  	i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
3111  	i += scnprintf(buf + i, sz - i, " media: ");
3112  	i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
3113  	i += scnprintf(buf + i, sz - i, "\n");
3114  	i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
3115  	i += scnprintf(buf + i, sz - i, " inputq: ");
3116  	i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
3117  
3118  	i += scnprintf(buf + i, sz - i, "bclink:\n ");
3119  	i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
3120  
3121  	return i;
3122  }
3123  
3124  void tipc_node_pre_cleanup_net(struct net *exit_net)
3125  {
3126  	struct tipc_node *n;
3127  	struct tipc_net *tn;
3128  	struct net *tmp;
3129  
3130  	rcu_read_lock();
3131  	for_each_net_rcu(tmp) {
3132  		if (tmp == exit_net)
3133  			continue;
3134  		tn = tipc_net(tmp);
3135  		if (!tn)
3136  			continue;
3137  		spin_lock_bh(&tn->node_list_lock);
3138  		list_for_each_entry_rcu(n, &tn->node_list, list) {
3139  			if (!n->peer_net)
3140  				continue;
3141  			if (n->peer_net != exit_net)
3142  				continue;
3143  			tipc_node_write_lock(n);
3144  			n->peer_net = NULL;
3145  			n->peer_hash_mix = 0;
3146  			tipc_node_write_unlock_fast(n);
3147  			break;
3148  		}
3149  		spin_unlock_bh(&tn->node_list_lock);
3150  	}
3151  	rcu_read_unlock();
3152  }
3153