xref: /linux/net/tipc/link.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 
46 #include <linux/pkt_sched.h>
47 
48 struct tipc_stats {
49 	u32 sent_info;		/* used in counting # sent packets */
50 	u32 recv_info;		/* used in counting # recv'd packets */
51 	u32 sent_states;
52 	u32 recv_states;
53 	u32 sent_probes;
54 	u32 recv_probes;
55 	u32 sent_nacks;
56 	u32 recv_nacks;
57 	u32 sent_acks;
58 	u32 sent_bundled;
59 	u32 sent_bundles;
60 	u32 recv_bundled;
61 	u32 recv_bundles;
62 	u32 retransmitted;
63 	u32 sent_fragmented;
64 	u32 sent_fragments;
65 	u32 recv_fragmented;
66 	u32 recv_fragments;
67 	u32 link_congs;		/* # port sends blocked by congestion */
68 	u32 deferred_recv;
69 	u32 duplicates;
70 	u32 max_queue_sz;	/* send queue size high water mark */
71 	u32 accu_queue_sz;	/* used for send queue size profiling */
72 	u32 queue_sz_counts;	/* used for send queue size profiling */
73 	u32 msg_length_counts;	/* used for message length profiling */
74 	u32 msg_lengths_total;	/* used for message length profiling */
75 	u32 msg_length_profile[7]; /* used for msg. length profiling */
76 };
77 
78 /**
79  * struct tipc_link - TIPC link data structure
80  * @addr: network address of link's peer node
81  * @name: link name character string
82  * @media_addr: media address to use when sending messages over link
83  * @timer: link timer
84  * @net: pointer to namespace struct
85  * @refcnt: reference counter for permanent references (owner node & timer)
86  * @peer_session: link session # being used by peer end of link
87  * @peer_bearer_id: bearer id used by link's peer endpoint
88  * @bearer_id: local bearer id used by link
89  * @tolerance: minimum link continuity loss needed to reset link [in ms]
90  * @keepalive_intv: link keepalive timer interval
91  * @abort_limit: # of unacknowledged continuity probes needed to reset link
92  * @state: current state of link FSM
93  * @peer_caps: bitmap describing capabilities of peer node
94  * @silent_intv_cnt: # of timer intervals without any reception from peer
95  * @proto_msg: template for control messages generated by link
96  * @pmsg: convenience pointer to "proto_msg" field
97  * @priority: current link priority
98  * @net_plane: current link network plane ('A' through 'H')
99  * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
100  * @exp_msg_count: # of tunnelled messages expected during link changeover
101  * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
102  * @mtu: current maximum packet size for this link
103  * @advertised_mtu: advertised own mtu when link is being established
104  * @transmitq: queue for sent, non-acked messages
105  * @backlogq: queue for messages waiting to be sent
106  * @snt_nxt: next sequence number to use for outbound messages
107  * @last_retransmitted: sequence number of most recently retransmitted message
108  * @stale_count: # of identical retransmit requests made by peer
109  * @ackers: # of peers that needs to ack each packet before it can be released
110  * @acked: # last packet acked by a certain peer. Used for broadcast.
111  * @rcv_nxt: next sequence number to expect for inbound messages
112  * @deferred_queue: deferred queue saved OOS b'cast message received from node
113  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
114  * @inputq: buffer queue for messages to be delivered upwards
115  * @namedq: buffer queue for name table messages to be delivered upwards
116  * @next_out: ptr to first unsent outbound message in queue
117  * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
118  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
119  * @reasm_buf: head of partially reassembled inbound message fragments
120  * @bc_rcvr: marks that this is a broadcast receiver link
121  * @stats: collects statistics regarding link activity
122  */
123 struct tipc_link {
124 	u32 addr;
125 	char name[TIPC_MAX_LINK_NAME];
126 	struct net *net;
127 
128 	/* Management and link supervision data */
129 	u32 peer_session;
130 	u32 session;
131 	u32 peer_bearer_id;
132 	u32 bearer_id;
133 	u32 tolerance;
134 	unsigned long keepalive_intv;
135 	u32 abort_limit;
136 	u32 state;
137 	u16 peer_caps;
138 	bool active;
139 	u32 silent_intv_cnt;
140 	char if_name[TIPC_MAX_IF_NAME];
141 	u32 priority;
142 	char net_plane;
143 	u16 rst_cnt;
144 
145 	/* Failover/synch */
146 	u16 drop_point;
147 	struct sk_buff *failover_reasm_skb;
148 
149 	/* Max packet negotiation */
150 	u16 mtu;
151 	u16 advertised_mtu;
152 
153 	/* Sending */
154 	struct sk_buff_head transmq;
155 	struct sk_buff_head backlogq;
156 	struct {
157 		u16 len;
158 		u16 limit;
159 	} backlog[5];
160 	u16 snd_nxt;
161 	u16 last_retransm;
162 	u16 window;
163 	u32 stale_count;
164 
165 	/* Reception */
166 	u16 rcv_nxt;
167 	u32 rcv_unacked;
168 	struct sk_buff_head deferdq;
169 	struct sk_buff_head *inputq;
170 	struct sk_buff_head *namedq;
171 
172 	/* Congestion handling */
173 	struct sk_buff_head wakeupq;
174 
175 	/* Fragmentation/reassembly */
176 	struct sk_buff *reasm_buf;
177 
178 	/* Broadcast */
179 	u16 ackers;
180 	u16 acked;
181 	struct tipc_link *bc_rcvlink;
182 	struct tipc_link *bc_sndlink;
183 	int nack_state;
184 	bool bc_peer_is_up;
185 
186 	/* Statistics */
187 	struct tipc_stats stats;
188 };
189 
190 /*
191  * Error message prefixes
192  */
193 static const char *link_co_err = "Link tunneling error, ";
194 static const char *link_rst_msg = "Resetting link ";
195 
196 /* Send states for broadcast NACKs
197  */
198 enum {
199 	BC_NACK_SND_CONDITIONAL,
200 	BC_NACK_SND_UNCONDITIONAL,
201 	BC_NACK_SND_SUPPRESS,
202 };
203 
204 /*
205  * Interval between NACKs when packets arrive out of order
206  */
207 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
208 
209 /* Wildcard value for link session numbers. When it is known that
210  * peer endpoint is down, any session number must be accepted.
211  */
212 #define ANY_SESSION 0x10000
213 
214 /* Link FSM states:
215  */
216 enum {
217 	LINK_ESTABLISHED     = 0xe,
218 	LINK_ESTABLISHING    = 0xe  << 4,
219 	LINK_RESET           = 0x1  << 8,
220 	LINK_RESETTING       = 0x2  << 12,
221 	LINK_PEER_RESET      = 0xd  << 16,
222 	LINK_FAILINGOVER     = 0xf  << 20,
223 	LINK_SYNCHING        = 0xc  << 24
224 };
225 
226 /* Link FSM state checking routines
227  */
228 static int link_is_up(struct tipc_link *l)
229 {
230 	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
231 }
232 
233 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
234 			       struct sk_buff_head *xmitq);
235 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
236 				      u16 rcvgap, int tolerance, int priority,
237 				      struct sk_buff_head *xmitq);
238 static void link_print(struct tipc_link *l, const char *str);
239 static void tipc_link_build_nack_msg(struct tipc_link *l,
240 				     struct sk_buff_head *xmitq);
241 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
242 					struct sk_buff_head *xmitq);
243 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
244 
245 /*
246  *  Simple non-static link routines (i.e. referenced outside this file)
247  */
248 bool tipc_link_is_up(struct tipc_link *l)
249 {
250 	return link_is_up(l);
251 }
252 
253 bool tipc_link_peer_is_down(struct tipc_link *l)
254 {
255 	return l->state == LINK_PEER_RESET;
256 }
257 
258 bool tipc_link_is_reset(struct tipc_link *l)
259 {
260 	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
261 }
262 
263 bool tipc_link_is_establishing(struct tipc_link *l)
264 {
265 	return l->state == LINK_ESTABLISHING;
266 }
267 
268 bool tipc_link_is_synching(struct tipc_link *l)
269 {
270 	return l->state == LINK_SYNCHING;
271 }
272 
273 bool tipc_link_is_failingover(struct tipc_link *l)
274 {
275 	return l->state == LINK_FAILINGOVER;
276 }
277 
278 bool tipc_link_is_blocked(struct tipc_link *l)
279 {
280 	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
281 }
282 
283 static bool link_is_bc_sndlink(struct tipc_link *l)
284 {
285 	return !l->bc_sndlink;
286 }
287 
288 static bool link_is_bc_rcvlink(struct tipc_link *l)
289 {
290 	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
291 }
292 
293 int tipc_link_is_active(struct tipc_link *l)
294 {
295 	return l->active;
296 }
297 
298 void tipc_link_set_active(struct tipc_link *l, bool active)
299 {
300 	l->active = active;
301 }
302 
303 u32 tipc_link_id(struct tipc_link *l)
304 {
305 	return l->peer_bearer_id << 16 | l->bearer_id;
306 }
307 
308 int tipc_link_window(struct tipc_link *l)
309 {
310 	return l->window;
311 }
312 
313 int tipc_link_prio(struct tipc_link *l)
314 {
315 	return l->priority;
316 }
317 
318 unsigned long tipc_link_tolerance(struct tipc_link *l)
319 {
320 	return l->tolerance;
321 }
322 
323 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
324 {
325 	return l->inputq;
326 }
327 
328 char tipc_link_plane(struct tipc_link *l)
329 {
330 	return l->net_plane;
331 }
332 
333 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
334 			   struct tipc_link *uc_l,
335 			   struct sk_buff_head *xmitq)
336 {
337 	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
338 
339 	snd_l->ackers++;
340 	rcv_l->acked = snd_l->snd_nxt - 1;
341 	snd_l->state = LINK_ESTABLISHED;
342 	tipc_link_build_bc_init_msg(uc_l, xmitq);
343 }
344 
345 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
346 			      struct tipc_link *rcv_l,
347 			      struct sk_buff_head *xmitq)
348 {
349 	u16 ack = snd_l->snd_nxt - 1;
350 
351 	snd_l->ackers--;
352 	tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
353 	tipc_link_reset(rcv_l);
354 	rcv_l->state = LINK_RESET;
355 	if (!snd_l->ackers) {
356 		tipc_link_reset(snd_l);
357 		snd_l->state = LINK_RESET;
358 		__skb_queue_purge(xmitq);
359 	}
360 }
361 
362 int tipc_link_bc_peers(struct tipc_link *l)
363 {
364 	return l->ackers;
365 }
366 
367 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
368 {
369 	l->mtu = mtu;
370 }
371 
372 int tipc_link_mtu(struct tipc_link *l)
373 {
374 	return l->mtu;
375 }
376 
377 u16 tipc_link_rcv_nxt(struct tipc_link *l)
378 {
379 	return l->rcv_nxt;
380 }
381 
382 u16 tipc_link_acked(struct tipc_link *l)
383 {
384 	return l->acked;
385 }
386 
387 char *tipc_link_name(struct tipc_link *l)
388 {
389 	return l->name;
390 }
391 
392 /**
393  * tipc_link_create - create a new link
394  * @n: pointer to associated node
395  * @if_name: associated interface name
396  * @bearer_id: id (index) of associated bearer
397  * @tolerance: link tolerance to be used by link
398  * @net_plane: network plane (A,B,c..) this link belongs to
399  * @mtu: mtu to be advertised by link
400  * @priority: priority to be used by link
401  * @window: send window to be used by link
402  * @session: session to be used by link
403  * @ownnode: identity of own node
404  * @peer: node id of peer node
405  * @peer_caps: bitmap describing peer node capabilities
406  * @bc_sndlink: the namespace global link used for broadcast sending
407  * @bc_rcvlink: the peer specific link used for broadcast reception
408  * @inputq: queue to put messages ready for delivery
409  * @namedq: queue to put binding table update messages ready for delivery
410  * @link: return value, pointer to put the created link
411  *
412  * Returns true if link was created, otherwise false
413  */
414 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
415 		      int tolerance, char net_plane, u32 mtu, int priority,
416 		      int window, u32 session, u32 ownnode, u32 peer,
417 		      u16 peer_caps,
418 		      struct tipc_link *bc_sndlink,
419 		      struct tipc_link *bc_rcvlink,
420 		      struct sk_buff_head *inputq,
421 		      struct sk_buff_head *namedq,
422 		      struct tipc_link **link)
423 {
424 	struct tipc_link *l;
425 
426 	l = kzalloc(sizeof(*l), GFP_ATOMIC);
427 	if (!l)
428 		return false;
429 	*link = l;
430 	l->session = session;
431 
432 	/* Note: peer i/f name is completed by reset/activate message */
433 	sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
434 		tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
435 		if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
436 	strcpy(l->if_name, if_name);
437 	l->addr = peer;
438 	l->peer_caps = peer_caps;
439 	l->net = net;
440 	l->peer_session = ANY_SESSION;
441 	l->bearer_id = bearer_id;
442 	l->tolerance = tolerance;
443 	l->net_plane = net_plane;
444 	l->advertised_mtu = mtu;
445 	l->mtu = mtu;
446 	l->priority = priority;
447 	tipc_link_set_queue_limits(l, window);
448 	l->ackers = 1;
449 	l->bc_sndlink = bc_sndlink;
450 	l->bc_rcvlink = bc_rcvlink;
451 	l->inputq = inputq;
452 	l->namedq = namedq;
453 	l->state = LINK_RESETTING;
454 	__skb_queue_head_init(&l->transmq);
455 	__skb_queue_head_init(&l->backlogq);
456 	__skb_queue_head_init(&l->deferdq);
457 	skb_queue_head_init(&l->wakeupq);
458 	skb_queue_head_init(l->inputq);
459 	return true;
460 }
461 
462 /**
463  * tipc_link_bc_create - create new link to be used for broadcast
464  * @n: pointer to associated node
465  * @mtu: mtu to be used
466  * @window: send window to be used
467  * @inputq: queue to put messages ready for delivery
468  * @namedq: queue to put binding table update messages ready for delivery
469  * @link: return value, pointer to put the created link
470  *
471  * Returns true if link was created, otherwise false
472  */
473 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
474 			 int mtu, int window, u16 peer_caps,
475 			 struct sk_buff_head *inputq,
476 			 struct sk_buff_head *namedq,
477 			 struct tipc_link *bc_sndlink,
478 			 struct tipc_link **link)
479 {
480 	struct tipc_link *l;
481 
482 	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
483 			      0, ownnode, peer, peer_caps, bc_sndlink,
484 			      NULL, inputq, namedq, link))
485 		return false;
486 
487 	l = *link;
488 	strcpy(l->name, tipc_bclink_name);
489 	tipc_link_reset(l);
490 	l->state = LINK_RESET;
491 	l->ackers = 0;
492 	l->bc_rcvlink = l;
493 
494 	/* Broadcast send link is always up */
495 	if (link_is_bc_sndlink(l))
496 		l->state = LINK_ESTABLISHED;
497 
498 	return true;
499 }
500 
501 /**
502  * tipc_link_fsm_evt - link finite state machine
503  * @l: pointer to link
504  * @evt: state machine event to be processed
505  */
506 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
507 {
508 	int rc = 0;
509 
510 	switch (l->state) {
511 	case LINK_RESETTING:
512 		switch (evt) {
513 		case LINK_PEER_RESET_EVT:
514 			l->state = LINK_PEER_RESET;
515 			break;
516 		case LINK_RESET_EVT:
517 			l->state = LINK_RESET;
518 			break;
519 		case LINK_FAILURE_EVT:
520 		case LINK_FAILOVER_BEGIN_EVT:
521 		case LINK_ESTABLISH_EVT:
522 		case LINK_FAILOVER_END_EVT:
523 		case LINK_SYNCH_BEGIN_EVT:
524 		case LINK_SYNCH_END_EVT:
525 		default:
526 			goto illegal_evt;
527 		}
528 		break;
529 	case LINK_RESET:
530 		switch (evt) {
531 		case LINK_PEER_RESET_EVT:
532 			l->state = LINK_ESTABLISHING;
533 			break;
534 		case LINK_FAILOVER_BEGIN_EVT:
535 			l->state = LINK_FAILINGOVER;
536 		case LINK_FAILURE_EVT:
537 		case LINK_RESET_EVT:
538 		case LINK_ESTABLISH_EVT:
539 		case LINK_FAILOVER_END_EVT:
540 			break;
541 		case LINK_SYNCH_BEGIN_EVT:
542 		case LINK_SYNCH_END_EVT:
543 		default:
544 			goto illegal_evt;
545 		}
546 		break;
547 	case LINK_PEER_RESET:
548 		switch (evt) {
549 		case LINK_RESET_EVT:
550 			l->state = LINK_ESTABLISHING;
551 			break;
552 		case LINK_PEER_RESET_EVT:
553 		case LINK_ESTABLISH_EVT:
554 		case LINK_FAILURE_EVT:
555 			break;
556 		case LINK_SYNCH_BEGIN_EVT:
557 		case LINK_SYNCH_END_EVT:
558 		case LINK_FAILOVER_BEGIN_EVT:
559 		case LINK_FAILOVER_END_EVT:
560 		default:
561 			goto illegal_evt;
562 		}
563 		break;
564 	case LINK_FAILINGOVER:
565 		switch (evt) {
566 		case LINK_FAILOVER_END_EVT:
567 			l->state = LINK_RESET;
568 			break;
569 		case LINK_PEER_RESET_EVT:
570 		case LINK_RESET_EVT:
571 		case LINK_ESTABLISH_EVT:
572 		case LINK_FAILURE_EVT:
573 			break;
574 		case LINK_FAILOVER_BEGIN_EVT:
575 		case LINK_SYNCH_BEGIN_EVT:
576 		case LINK_SYNCH_END_EVT:
577 		default:
578 			goto illegal_evt;
579 		}
580 		break;
581 	case LINK_ESTABLISHING:
582 		switch (evt) {
583 		case LINK_ESTABLISH_EVT:
584 			l->state = LINK_ESTABLISHED;
585 			break;
586 		case LINK_FAILOVER_BEGIN_EVT:
587 			l->state = LINK_FAILINGOVER;
588 			break;
589 		case LINK_RESET_EVT:
590 			l->state = LINK_RESET;
591 			break;
592 		case LINK_FAILURE_EVT:
593 		case LINK_PEER_RESET_EVT:
594 		case LINK_SYNCH_BEGIN_EVT:
595 		case LINK_FAILOVER_END_EVT:
596 			break;
597 		case LINK_SYNCH_END_EVT:
598 		default:
599 			goto illegal_evt;
600 		}
601 		break;
602 	case LINK_ESTABLISHED:
603 		switch (evt) {
604 		case LINK_PEER_RESET_EVT:
605 			l->state = LINK_PEER_RESET;
606 			rc |= TIPC_LINK_DOWN_EVT;
607 			break;
608 		case LINK_FAILURE_EVT:
609 			l->state = LINK_RESETTING;
610 			rc |= TIPC_LINK_DOWN_EVT;
611 			break;
612 		case LINK_RESET_EVT:
613 			l->state = LINK_RESET;
614 			break;
615 		case LINK_ESTABLISH_EVT:
616 		case LINK_SYNCH_END_EVT:
617 			break;
618 		case LINK_SYNCH_BEGIN_EVT:
619 			l->state = LINK_SYNCHING;
620 			break;
621 		case LINK_FAILOVER_BEGIN_EVT:
622 		case LINK_FAILOVER_END_EVT:
623 		default:
624 			goto illegal_evt;
625 		}
626 		break;
627 	case LINK_SYNCHING:
628 		switch (evt) {
629 		case LINK_PEER_RESET_EVT:
630 			l->state = LINK_PEER_RESET;
631 			rc |= TIPC_LINK_DOWN_EVT;
632 			break;
633 		case LINK_FAILURE_EVT:
634 			l->state = LINK_RESETTING;
635 			rc |= TIPC_LINK_DOWN_EVT;
636 			break;
637 		case LINK_RESET_EVT:
638 			l->state = LINK_RESET;
639 			break;
640 		case LINK_ESTABLISH_EVT:
641 		case LINK_SYNCH_BEGIN_EVT:
642 			break;
643 		case LINK_SYNCH_END_EVT:
644 			l->state = LINK_ESTABLISHED;
645 			break;
646 		case LINK_FAILOVER_BEGIN_EVT:
647 		case LINK_FAILOVER_END_EVT:
648 		default:
649 			goto illegal_evt;
650 		}
651 		break;
652 	default:
653 		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
654 	}
655 	return rc;
656 illegal_evt:
657 	pr_err("Illegal FSM event %x in state %x on link %s\n",
658 	       evt, l->state, l->name);
659 	return rc;
660 }
661 
662 /* link_profile_stats - update statistical profiling of traffic
663  */
664 static void link_profile_stats(struct tipc_link *l)
665 {
666 	struct sk_buff *skb;
667 	struct tipc_msg *msg;
668 	int length;
669 
670 	/* Update counters used in statistical profiling of send traffic */
671 	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
672 	l->stats.queue_sz_counts++;
673 
674 	skb = skb_peek(&l->transmq);
675 	if (!skb)
676 		return;
677 	msg = buf_msg(skb);
678 	length = msg_size(msg);
679 
680 	if (msg_user(msg) == MSG_FRAGMENTER) {
681 		if (msg_type(msg) != FIRST_FRAGMENT)
682 			return;
683 		length = msg_size(msg_get_wrapped(msg));
684 	}
685 	l->stats.msg_lengths_total += length;
686 	l->stats.msg_length_counts++;
687 	if (length <= 64)
688 		l->stats.msg_length_profile[0]++;
689 	else if (length <= 256)
690 		l->stats.msg_length_profile[1]++;
691 	else if (length <= 1024)
692 		l->stats.msg_length_profile[2]++;
693 	else if (length <= 4096)
694 		l->stats.msg_length_profile[3]++;
695 	else if (length <= 16384)
696 		l->stats.msg_length_profile[4]++;
697 	else if (length <= 32768)
698 		l->stats.msg_length_profile[5]++;
699 	else
700 		l->stats.msg_length_profile[6]++;
701 }
702 
703 /* tipc_link_timeout - perform periodic task as instructed from node timeout
704  */
705 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
706 {
707 	int mtyp, rc = 0;
708 	bool state = false;
709 	bool probe = false;
710 	bool setup = false;
711 	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
712 	u16 bc_acked = l->bc_rcvlink->acked;
713 
714 	link_profile_stats(l);
715 
716 	switch (l->state) {
717 	case LINK_ESTABLISHED:
718 	case LINK_SYNCHING:
719 		if (l->silent_intv_cnt > l->abort_limit)
720 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
721 		mtyp = STATE_MSG;
722 		state = bc_acked != bc_snt;
723 		probe = l->silent_intv_cnt;
724 		l->silent_intv_cnt++;
725 		break;
726 	case LINK_RESET:
727 		setup = l->rst_cnt++ <= 4;
728 		setup |= !(l->rst_cnt % 16);
729 		mtyp = RESET_MSG;
730 		break;
731 	case LINK_ESTABLISHING:
732 		setup = true;
733 		mtyp = ACTIVATE_MSG;
734 		break;
735 	case LINK_PEER_RESET:
736 	case LINK_RESETTING:
737 	case LINK_FAILINGOVER:
738 		break;
739 	default:
740 		break;
741 	}
742 
743 	if (state || probe || setup)
744 		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq);
745 
746 	return rc;
747 }
748 
749 /**
750  * link_schedule_user - schedule a message sender for wakeup after congestion
751  * @link: congested link
752  * @list: message that was attempted sent
753  * Create pseudo msg to send back to user when congestion abates
754  * Does not consume buffer list
755  */
756 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
757 {
758 	struct tipc_msg *msg = buf_msg(skb_peek(list));
759 	int imp = msg_importance(msg);
760 	u32 oport = msg_origport(msg);
761 	u32 addr = tipc_own_addr(link->net);
762 	struct sk_buff *skb;
763 
764 	/* This really cannot happen...  */
765 	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
766 		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
767 		return -ENOBUFS;
768 	}
769 	/* Non-blocking sender: */
770 	if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
771 		return -ELINKCONG;
772 
773 	/* Create and schedule wakeup pseudo message */
774 	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
775 			      addr, addr, oport, 0, 0);
776 	if (!skb)
777 		return -ENOBUFS;
778 	TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
779 	TIPC_SKB_CB(skb)->chain_imp = imp;
780 	skb_queue_tail(&link->wakeupq, skb);
781 	link->stats.link_congs++;
782 	return -ELINKCONG;
783 }
784 
785 /**
786  * link_prepare_wakeup - prepare users for wakeup after congestion
787  * @link: congested link
788  * Move a number of waiting users, as permitted by available space in
789  * the send queue, from link wait queue to node wait queue for wakeup
790  */
791 void link_prepare_wakeup(struct tipc_link *l)
792 {
793 	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
794 	int imp, lim;
795 	struct sk_buff *skb, *tmp;
796 
797 	skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
798 		imp = TIPC_SKB_CB(skb)->chain_imp;
799 		lim = l->window + l->backlog[imp].limit;
800 		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
801 		if ((pnd[imp] + l->backlog[imp].len) >= lim)
802 			break;
803 		skb_unlink(skb, &l->wakeupq);
804 		skb_queue_tail(l->inputq, skb);
805 	}
806 }
807 
808 void tipc_link_reset(struct tipc_link *l)
809 {
810 	l->peer_session = ANY_SESSION;
811 	l->session++;
812 	l->mtu = l->advertised_mtu;
813 	__skb_queue_purge(&l->transmq);
814 	__skb_queue_purge(&l->deferdq);
815 	skb_queue_splice_init(&l->wakeupq, l->inputq);
816 	__skb_queue_purge(&l->backlogq);
817 	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
818 	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
819 	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
820 	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
821 	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
822 	kfree_skb(l->reasm_buf);
823 	kfree_skb(l->failover_reasm_skb);
824 	l->reasm_buf = NULL;
825 	l->failover_reasm_skb = NULL;
826 	l->rcv_unacked = 0;
827 	l->snd_nxt = 1;
828 	l->rcv_nxt = 1;
829 	l->acked = 0;
830 	l->silent_intv_cnt = 0;
831 	l->rst_cnt = 0;
832 	l->stats.recv_info = 0;
833 	l->stale_count = 0;
834 	l->bc_peer_is_up = false;
835 	tipc_link_reset_stats(l);
836 }
837 
838 /**
839  * tipc_link_xmit(): enqueue buffer list according to queue situation
840  * @link: link to use
841  * @list: chain of buffers containing message
842  * @xmitq: returned list of packets to be sent by caller
843  *
844  * Consumes the buffer chain, except when returning -ELINKCONG,
845  * since the caller then may want to make more send attempts.
846  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
847  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
848  */
849 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
850 		   struct sk_buff_head *xmitq)
851 {
852 	struct tipc_msg *hdr = buf_msg(skb_peek(list));
853 	unsigned int maxwin = l->window;
854 	unsigned int i, imp = msg_importance(hdr);
855 	unsigned int mtu = l->mtu;
856 	u16 ack = l->rcv_nxt - 1;
857 	u16 seqno = l->snd_nxt;
858 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
859 	struct sk_buff_head *transmq = &l->transmq;
860 	struct sk_buff_head *backlogq = &l->backlogq;
861 	struct sk_buff *skb, *_skb, *bskb;
862 
863 	/* Match msg importance against this and all higher backlog limits: */
864 	for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
865 		if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
866 			return link_schedule_user(l, list);
867 	}
868 	if (unlikely(msg_size(hdr) > mtu)) {
869 		skb_queue_purge(list);
870 		return -EMSGSIZE;
871 	}
872 
873 	/* Prepare each packet for sending, and add to relevant queue: */
874 	while (skb_queue_len(list)) {
875 		skb = skb_peek(list);
876 		hdr = buf_msg(skb);
877 		msg_set_seqno(hdr, seqno);
878 		msg_set_ack(hdr, ack);
879 		msg_set_bcast_ack(hdr, bc_ack);
880 
881 		if (likely(skb_queue_len(transmq) < maxwin)) {
882 			_skb = skb_clone(skb, GFP_ATOMIC);
883 			if (!_skb) {
884 				skb_queue_purge(list);
885 				return -ENOBUFS;
886 			}
887 			__skb_dequeue(list);
888 			__skb_queue_tail(transmq, skb);
889 			__skb_queue_tail(xmitq, _skb);
890 			TIPC_SKB_CB(skb)->ackers = l->ackers;
891 			l->rcv_unacked = 0;
892 			seqno++;
893 			continue;
894 		}
895 		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
896 			kfree_skb(__skb_dequeue(list));
897 			l->stats.sent_bundled++;
898 			continue;
899 		}
900 		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
901 			kfree_skb(__skb_dequeue(list));
902 			__skb_queue_tail(backlogq, bskb);
903 			l->backlog[msg_importance(buf_msg(bskb))].len++;
904 			l->stats.sent_bundled++;
905 			l->stats.sent_bundles++;
906 			continue;
907 		}
908 		l->backlog[imp].len += skb_queue_len(list);
909 		skb_queue_splice_tail_init(list, backlogq);
910 	}
911 	l->snd_nxt = seqno;
912 	return 0;
913 }
914 
915 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
916 {
917 	struct sk_buff *skb, *_skb;
918 	struct tipc_msg *hdr;
919 	u16 seqno = l->snd_nxt;
920 	u16 ack = l->rcv_nxt - 1;
921 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
922 
923 	while (skb_queue_len(&l->transmq) < l->window) {
924 		skb = skb_peek(&l->backlogq);
925 		if (!skb)
926 			break;
927 		_skb = skb_clone(skb, GFP_ATOMIC);
928 		if (!_skb)
929 			break;
930 		__skb_dequeue(&l->backlogq);
931 		hdr = buf_msg(skb);
932 		l->backlog[msg_importance(hdr)].len--;
933 		__skb_queue_tail(&l->transmq, skb);
934 		__skb_queue_tail(xmitq, _skb);
935 		TIPC_SKB_CB(skb)->ackers = l->ackers;
936 		msg_set_seqno(hdr, seqno);
937 		msg_set_ack(hdr, ack);
938 		msg_set_bcast_ack(hdr, bc_ack);
939 		l->rcv_unacked = 0;
940 		seqno++;
941 	}
942 	l->snd_nxt = seqno;
943 }
944 
945 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
946 {
947 	struct tipc_msg *hdr = buf_msg(skb);
948 
949 	pr_warn("Retransmission failure on link <%s>\n", l->name);
950 	link_print(l, "Resetting link ");
951 	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
952 		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
953 	pr_info("sqno %u, prev: %x, src: %x\n",
954 		msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
955 }
956 
957 int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
958 		      struct sk_buff_head *xmitq)
959 {
960 	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
961 	struct tipc_msg *hdr;
962 	u16 ack = l->rcv_nxt - 1;
963 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
964 
965 	if (!skb)
966 		return 0;
967 
968 	/* Detect repeated retransmit failures on same packet */
969 	if (likely(l->last_retransm != buf_seqno(skb))) {
970 		l->last_retransm = buf_seqno(skb);
971 		l->stale_count = 1;
972 	} else if (++l->stale_count > 100) {
973 		link_retransmit_failure(l, skb);
974 		return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
975 	}
976 
977 	/* Move forward to where retransmission should start */
978 	skb_queue_walk(&l->transmq, skb) {
979 		if (!less(buf_seqno(skb), from))
980 			break;
981 	}
982 
983 	skb_queue_walk_from(&l->transmq, skb) {
984 		if (more(buf_seqno(skb), to))
985 			break;
986 		hdr = buf_msg(skb);
987 		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
988 		if (!_skb)
989 			return 0;
990 		hdr = buf_msg(_skb);
991 		msg_set_ack(hdr, ack);
992 		msg_set_bcast_ack(hdr, bc_ack);
993 		_skb->priority = TC_PRIO_CONTROL;
994 		__skb_queue_tail(xmitq, _skb);
995 		l->stats.retransmitted++;
996 	}
997 	return 0;
998 }
999 
1000 /* tipc_data_input - deliver data and name distr msgs to upper layer
1001  *
1002  * Consumes buffer if message is of right type
1003  * Node lock must be held
1004  */
1005 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1006 			    struct sk_buff_head *inputq)
1007 {
1008 	switch (msg_user(buf_msg(skb))) {
1009 	case TIPC_LOW_IMPORTANCE:
1010 	case TIPC_MEDIUM_IMPORTANCE:
1011 	case TIPC_HIGH_IMPORTANCE:
1012 	case TIPC_CRITICAL_IMPORTANCE:
1013 	case CONN_MANAGER:
1014 		skb_queue_tail(inputq, skb);
1015 		return true;
1016 	case NAME_DISTRIBUTOR:
1017 		l->bc_rcvlink->state = LINK_ESTABLISHED;
1018 		skb_queue_tail(l->namedq, skb);
1019 		return true;
1020 	case MSG_BUNDLER:
1021 	case TUNNEL_PROTOCOL:
1022 	case MSG_FRAGMENTER:
1023 	case BCAST_PROTOCOL:
1024 		return false;
1025 	default:
1026 		pr_warn("Dropping received illegal msg type\n");
1027 		kfree_skb(skb);
1028 		return false;
1029 	};
1030 }
1031 
1032 /* tipc_link_input - process packet that has passed link protocol check
1033  *
1034  * Consumes buffer
1035  */
1036 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1037 			   struct sk_buff_head *inputq)
1038 {
1039 	struct tipc_msg *hdr = buf_msg(skb);
1040 	struct sk_buff **reasm_skb = &l->reasm_buf;
1041 	struct sk_buff *iskb;
1042 	struct sk_buff_head tmpq;
1043 	int usr = msg_user(hdr);
1044 	int rc = 0;
1045 	int pos = 0;
1046 	int ipos = 0;
1047 
1048 	if (unlikely(usr == TUNNEL_PROTOCOL)) {
1049 		if (msg_type(hdr) == SYNCH_MSG) {
1050 			__skb_queue_purge(&l->deferdq);
1051 			goto drop;
1052 		}
1053 		if (!tipc_msg_extract(skb, &iskb, &ipos))
1054 			return rc;
1055 		kfree_skb(skb);
1056 		skb = iskb;
1057 		hdr = buf_msg(skb);
1058 		if (less(msg_seqno(hdr), l->drop_point))
1059 			goto drop;
1060 		if (tipc_data_input(l, skb, inputq))
1061 			return rc;
1062 		usr = msg_user(hdr);
1063 		reasm_skb = &l->failover_reasm_skb;
1064 	}
1065 
1066 	if (usr == MSG_BUNDLER) {
1067 		skb_queue_head_init(&tmpq);
1068 		l->stats.recv_bundles++;
1069 		l->stats.recv_bundled += msg_msgcnt(hdr);
1070 		while (tipc_msg_extract(skb, &iskb, &pos))
1071 			tipc_data_input(l, iskb, &tmpq);
1072 		tipc_skb_queue_splice_tail(&tmpq, inputq);
1073 		return 0;
1074 	} else if (usr == MSG_FRAGMENTER) {
1075 		l->stats.recv_fragments++;
1076 		if (tipc_buf_append(reasm_skb, &skb)) {
1077 			l->stats.recv_fragmented++;
1078 			tipc_data_input(l, skb, inputq);
1079 		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1080 			pr_warn_ratelimited("Unable to build fragment list\n");
1081 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1082 		}
1083 		return 0;
1084 	} else if (usr == BCAST_PROTOCOL) {
1085 		tipc_bcast_lock(l->net);
1086 		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1087 		tipc_bcast_unlock(l->net);
1088 	}
1089 drop:
1090 	kfree_skb(skb);
1091 	return 0;
1092 }
1093 
1094 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1095 {
1096 	bool released = false;
1097 	struct sk_buff *skb, *tmp;
1098 
1099 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1100 		if (more(buf_seqno(skb), acked))
1101 			break;
1102 		__skb_unlink(skb, &l->transmq);
1103 		kfree_skb(skb);
1104 		released = true;
1105 	}
1106 	return released;
1107 }
1108 
1109 /* tipc_link_build_state_msg: prepare link state message for transmission
1110  *
1111  * Note that sending of broadcast ack is coordinated among nodes, to reduce
1112  * risk of ack storms towards the sender
1113  */
1114 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1115 {
1116 	if (!l)
1117 		return 0;
1118 
1119 	/* Broadcast ACK must be sent via a unicast link => defer to caller */
1120 	if (link_is_bc_rcvlink(l)) {
1121 		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1122 			return 0;
1123 		l->rcv_unacked = 0;
1124 		return TIPC_LINK_SND_BC_ACK;
1125 	}
1126 
1127 	/* Unicast ACK */
1128 	l->rcv_unacked = 0;
1129 	l->stats.sent_acks++;
1130 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1131 	return 0;
1132 }
1133 
1134 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1135  */
1136 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1137 {
1138 	int mtyp = RESET_MSG;
1139 	struct sk_buff *skb;
1140 
1141 	if (l->state == LINK_ESTABLISHING)
1142 		mtyp = ACTIVATE_MSG;
1143 
1144 	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
1145 
1146 	/* Inform peer that this endpoint is going down if applicable */
1147 	skb = skb_peek_tail(xmitq);
1148 	if (skb && (l->state == LINK_RESET))
1149 		msg_set_peer_stopping(buf_msg(skb), 1);
1150 }
1151 
1152 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1153  */
1154 static void tipc_link_build_nack_msg(struct tipc_link *l,
1155 				     struct sk_buff_head *xmitq)
1156 {
1157 	u32 def_cnt = ++l->stats.deferred_recv;
1158 
1159 	if (link_is_bc_rcvlink(l))
1160 		return;
1161 
1162 	if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1163 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1164 }
1165 
1166 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1167  * @l: the link that should handle the message
1168  * @skb: TIPC packet
1169  * @xmitq: queue to place packets to be sent after this call
1170  */
1171 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1172 		  struct sk_buff_head *xmitq)
1173 {
1174 	struct sk_buff_head *defq = &l->deferdq;
1175 	struct tipc_msg *hdr;
1176 	u16 seqno, rcv_nxt, win_lim;
1177 	int rc = 0;
1178 
1179 	do {
1180 		hdr = buf_msg(skb);
1181 		seqno = msg_seqno(hdr);
1182 		rcv_nxt = l->rcv_nxt;
1183 		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1184 
1185 		/* Verify and update link state */
1186 		if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1187 			return tipc_link_proto_rcv(l, skb, xmitq);
1188 
1189 		if (unlikely(!link_is_up(l))) {
1190 			if (l->state == LINK_ESTABLISHING)
1191 				rc = TIPC_LINK_UP_EVT;
1192 			goto drop;
1193 		}
1194 
1195 		/* Don't send probe at next timeout expiration */
1196 		l->silent_intv_cnt = 0;
1197 
1198 		/* Drop if outside receive window */
1199 		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1200 			l->stats.duplicates++;
1201 			goto drop;
1202 		}
1203 
1204 		/* Forward queues and wake up waiting users */
1205 		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1206 			tipc_link_advance_backlog(l, xmitq);
1207 			if (unlikely(!skb_queue_empty(&l->wakeupq)))
1208 				link_prepare_wakeup(l);
1209 		}
1210 
1211 		/* Defer delivery if sequence gap */
1212 		if (unlikely(seqno != rcv_nxt)) {
1213 			__tipc_skb_queue_sorted(defq, seqno, skb);
1214 			tipc_link_build_nack_msg(l, xmitq);
1215 			break;
1216 		}
1217 
1218 		/* Deliver packet */
1219 		l->rcv_nxt++;
1220 		l->stats.recv_info++;
1221 		if (!tipc_data_input(l, skb, l->inputq))
1222 			rc |= tipc_link_input(l, skb, l->inputq);
1223 		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1224 			rc |= tipc_link_build_state_msg(l, xmitq);
1225 		if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK))
1226 			break;
1227 	} while ((skb = __skb_dequeue(defq)));
1228 
1229 	return rc;
1230 drop:
1231 	kfree_skb(skb);
1232 	return rc;
1233 }
1234 
1235 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1236 				      u16 rcvgap, int tolerance, int priority,
1237 				      struct sk_buff_head *xmitq)
1238 {
1239 	struct sk_buff *skb;
1240 	struct tipc_msg *hdr;
1241 	struct sk_buff_head *dfq = &l->deferdq;
1242 	bool node_up = link_is_up(l->bc_rcvlink);
1243 
1244 	/* Don't send protocol message during reset or link failover */
1245 	if (tipc_link_is_blocked(l))
1246 		return;
1247 
1248 	if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1249 		return;
1250 
1251 	if (!skb_queue_empty(dfq))
1252 		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1253 
1254 	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1255 			      TIPC_MAX_IF_NAME, l->addr,
1256 			      tipc_own_addr(l->net), 0, 0, 0);
1257 	if (!skb)
1258 		return;
1259 
1260 	hdr = buf_msg(skb);
1261 	msg_set_session(hdr, l->session);
1262 	msg_set_bearer_id(hdr, l->bearer_id);
1263 	msg_set_net_plane(hdr, l->net_plane);
1264 	msg_set_next_sent(hdr, l->snd_nxt);
1265 	msg_set_ack(hdr, l->rcv_nxt - 1);
1266 	msg_set_bcast_ack(hdr, l->bc_rcvlink->rcv_nxt - 1);
1267 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1268 	msg_set_link_tolerance(hdr, tolerance);
1269 	msg_set_linkprio(hdr, priority);
1270 	msg_set_redundant_link(hdr, node_up);
1271 	msg_set_seq_gap(hdr, 0);
1272 	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1273 
1274 	if (mtyp == STATE_MSG) {
1275 		msg_set_seq_gap(hdr, rcvgap);
1276 		msg_set_size(hdr, INT_H_SIZE);
1277 		msg_set_probe(hdr, probe);
1278 		l->stats.sent_states++;
1279 		l->rcv_unacked = 0;
1280 	} else {
1281 		/* RESET_MSG or ACTIVATE_MSG */
1282 		msg_set_max_pkt(hdr, l->advertised_mtu);
1283 		strcpy(msg_data(hdr), l->if_name);
1284 	}
1285 	if (probe)
1286 		l->stats.sent_probes++;
1287 	if (rcvgap)
1288 		l->stats.sent_nacks++;
1289 	skb->priority = TC_PRIO_CONTROL;
1290 	__skb_queue_tail(xmitq, skb);
1291 }
1292 
1293 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1294  * with contents of the link's transmit and backlog queues.
1295  */
1296 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1297 			   int mtyp, struct sk_buff_head *xmitq)
1298 {
1299 	struct sk_buff *skb, *tnlskb;
1300 	struct tipc_msg *hdr, tnlhdr;
1301 	struct sk_buff_head *queue = &l->transmq;
1302 	struct sk_buff_head tmpxq, tnlq;
1303 	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1304 
1305 	if (!tnl)
1306 		return;
1307 
1308 	skb_queue_head_init(&tnlq);
1309 	skb_queue_head_init(&tmpxq);
1310 
1311 	/* At least one packet required for safe algorithm => add dummy */
1312 	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1313 			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1314 			      0, 0, TIPC_ERR_NO_PORT);
1315 	if (!skb) {
1316 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1317 		return;
1318 	}
1319 	skb_queue_tail(&tnlq, skb);
1320 	tipc_link_xmit(l, &tnlq, &tmpxq);
1321 	__skb_queue_purge(&tmpxq);
1322 
1323 	/* Initialize reusable tunnel packet header */
1324 	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1325 		      mtyp, INT_H_SIZE, l->addr);
1326 	pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1327 	msg_set_msgcnt(&tnlhdr, pktcnt);
1328 	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1329 tnl:
1330 	/* Wrap each packet into a tunnel packet */
1331 	skb_queue_walk(queue, skb) {
1332 		hdr = buf_msg(skb);
1333 		if (queue == &l->backlogq)
1334 			msg_set_seqno(hdr, seqno++);
1335 		pktlen = msg_size(hdr);
1336 		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1337 		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
1338 		if (!tnlskb) {
1339 			pr_warn("%sunable to send packet\n", link_co_err);
1340 			return;
1341 		}
1342 		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1343 		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1344 		__skb_queue_tail(&tnlq, tnlskb);
1345 	}
1346 	if (queue != &l->backlogq) {
1347 		queue = &l->backlogq;
1348 		goto tnl;
1349 	}
1350 
1351 	tipc_link_xmit(tnl, &tnlq, xmitq);
1352 
1353 	if (mtyp == FAILOVER_MSG) {
1354 		tnl->drop_point = l->rcv_nxt;
1355 		tnl->failover_reasm_skb = l->reasm_buf;
1356 		l->reasm_buf = NULL;
1357 	}
1358 }
1359 
1360 /* tipc_link_proto_rcv(): receive link level protocol message :
1361  * Note that network plane id propagates through the network, and may
1362  * change at any time. The node with lowest numerical id determines
1363  * network plane
1364  */
1365 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1366 			       struct sk_buff_head *xmitq)
1367 {
1368 	struct tipc_msg *hdr = buf_msg(skb);
1369 	u16 rcvgap = 0;
1370 	u16 ack = msg_ack(hdr);
1371 	u16 gap = msg_seq_gap(hdr);
1372 	u16 peers_snd_nxt =  msg_next_sent(hdr);
1373 	u16 peers_tol = msg_link_tolerance(hdr);
1374 	u16 peers_prio = msg_linkprio(hdr);
1375 	u16 rcv_nxt = l->rcv_nxt;
1376 	int mtyp = msg_type(hdr);
1377 	char *if_name;
1378 	int rc = 0;
1379 
1380 	if (tipc_link_is_blocked(l) || !xmitq)
1381 		goto exit;
1382 
1383 	if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1384 		l->net_plane = msg_net_plane(hdr);
1385 
1386 	switch (mtyp) {
1387 	case RESET_MSG:
1388 
1389 		/* Ignore duplicate RESET with old session number */
1390 		if ((less_eq(msg_session(hdr), l->peer_session)) &&
1391 		    (l->peer_session != ANY_SESSION))
1392 			break;
1393 		/* fall thru' */
1394 
1395 	case ACTIVATE_MSG:
1396 		skb_linearize(skb);
1397 		hdr = buf_msg(skb);
1398 
1399 		/* Complete own link name with peer's interface name */
1400 		if_name =  strrchr(l->name, ':') + 1;
1401 		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1402 			break;
1403 		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1404 			break;
1405 		strncpy(if_name, msg_data(hdr),	TIPC_MAX_IF_NAME);
1406 
1407 		/* Update own tolerance if peer indicates a non-zero value */
1408 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1409 			l->tolerance = peers_tol;
1410 
1411 		/* Update own priority if peer's priority is higher */
1412 		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1413 			l->priority = peers_prio;
1414 
1415 		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1416 		if (msg_peer_stopping(hdr))
1417 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1418 		else if ((mtyp == RESET_MSG) || !link_is_up(l))
1419 			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1420 
1421 		/* ACTIVATE_MSG takes up link if it was already locally reset */
1422 		if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1423 			rc = TIPC_LINK_UP_EVT;
1424 
1425 		l->peer_session = msg_session(hdr);
1426 		l->peer_bearer_id = msg_bearer_id(hdr);
1427 		if (l->mtu > msg_max_pkt(hdr))
1428 			l->mtu = msg_max_pkt(hdr);
1429 		break;
1430 
1431 	case STATE_MSG:
1432 
1433 		/* Update own tolerance if peer indicates a non-zero value */
1434 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1435 			l->tolerance = peers_tol;
1436 
1437 		if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI,
1438 					   TIPC_MAX_LINK_PRI)) {
1439 			l->priority = peers_prio;
1440 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1441 		}
1442 
1443 		l->silent_intv_cnt = 0;
1444 		l->stats.recv_states++;
1445 		if (msg_probe(hdr))
1446 			l->stats.recv_probes++;
1447 
1448 		if (!link_is_up(l)) {
1449 			if (l->state == LINK_ESTABLISHING)
1450 				rc = TIPC_LINK_UP_EVT;
1451 			break;
1452 		}
1453 
1454 		/* Send NACK if peer has sent pkts we haven't received yet */
1455 		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1456 			rcvgap = peers_snd_nxt - l->rcv_nxt;
1457 		if (rcvgap || (msg_probe(hdr)))
1458 			tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
1459 						  0, 0, xmitq);
1460 		tipc_link_release_pkts(l, ack);
1461 
1462 		/* If NACK, retransmit will now start at right position */
1463 		if (gap) {
1464 			rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
1465 			l->stats.recv_nacks++;
1466 		}
1467 
1468 		tipc_link_advance_backlog(l, xmitq);
1469 		if (unlikely(!skb_queue_empty(&l->wakeupq)))
1470 			link_prepare_wakeup(l);
1471 	}
1472 exit:
1473 	kfree_skb(skb);
1474 	return rc;
1475 }
1476 
1477 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1478  */
1479 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1480 					 u16 peers_snd_nxt,
1481 					 struct sk_buff_head *xmitq)
1482 {
1483 	struct sk_buff *skb;
1484 	struct tipc_msg *hdr;
1485 	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1486 	u16 ack = l->rcv_nxt - 1;
1487 	u16 gap_to = peers_snd_nxt - 1;
1488 
1489 	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1490 			      0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
1491 	if (!skb)
1492 		return false;
1493 	hdr = buf_msg(skb);
1494 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1495 	msg_set_bcast_ack(hdr, ack);
1496 	msg_set_bcgap_after(hdr, ack);
1497 	if (dfrd_skb)
1498 		gap_to = buf_seqno(dfrd_skb) - 1;
1499 	msg_set_bcgap_to(hdr, gap_to);
1500 	msg_set_non_seq(hdr, bcast);
1501 	__skb_queue_tail(xmitq, skb);
1502 	return true;
1503 }
1504 
1505 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1506  *
1507  * Give a newly added peer node the sequence number where it should
1508  * start receiving and acking broadcast packets.
1509  */
1510 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1511 					struct sk_buff_head *xmitq)
1512 {
1513 	struct sk_buff_head list;
1514 
1515 	__skb_queue_head_init(&list);
1516 	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1517 		return;
1518 	tipc_link_xmit(l, &list, xmitq);
1519 }
1520 
1521 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1522  */
1523 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1524 {
1525 	int mtyp = msg_type(hdr);
1526 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1527 
1528 	if (link_is_up(l))
1529 		return;
1530 
1531 	if (msg_user(hdr) == BCAST_PROTOCOL) {
1532 		l->rcv_nxt = peers_snd_nxt;
1533 		l->state = LINK_ESTABLISHED;
1534 		return;
1535 	}
1536 
1537 	if (l->peer_caps & TIPC_BCAST_SYNCH)
1538 		return;
1539 
1540 	if (msg_peer_node_is_up(hdr))
1541 		return;
1542 
1543 	/* Compatibility: accept older, less safe initial synch data */
1544 	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1545 		l->rcv_nxt = peers_snd_nxt;
1546 }
1547 
1548 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1549  */
1550 void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1551 			   struct sk_buff_head *xmitq)
1552 {
1553 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1554 
1555 	if (!link_is_up(l))
1556 		return;
1557 
1558 	if (!msg_peer_node_is_up(hdr))
1559 		return;
1560 
1561 	l->bc_peer_is_up = true;
1562 
1563 	/* Ignore if peers_snd_nxt goes beyond receive window */
1564 	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1565 		return;
1566 
1567 	if (!more(peers_snd_nxt, l->rcv_nxt)) {
1568 		l->nack_state = BC_NACK_SND_CONDITIONAL;
1569 		return;
1570 	}
1571 
1572 	/* Don't NACK if one was recently sent or peeked */
1573 	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1574 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1575 		return;
1576 	}
1577 
1578 	/* Conditionally delay NACK sending until next synch rcv */
1579 	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1580 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1581 		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1582 			return;
1583 	}
1584 
1585 	/* Send NACK now but suppress next one */
1586 	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1587 	l->nack_state = BC_NACK_SND_SUPPRESS;
1588 }
1589 
1590 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1591 			  struct sk_buff_head *xmitq)
1592 {
1593 	struct sk_buff *skb, *tmp;
1594 	struct tipc_link *snd_l = l->bc_sndlink;
1595 
1596 	if (!link_is_up(l) || !l->bc_peer_is_up)
1597 		return;
1598 
1599 	if (!more(acked, l->acked))
1600 		return;
1601 
1602 	/* Skip over packets peer has already acked */
1603 	skb_queue_walk(&snd_l->transmq, skb) {
1604 		if (more(buf_seqno(skb), l->acked))
1605 			break;
1606 	}
1607 
1608 	/* Update/release the packets peer is acking now */
1609 	skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1610 		if (more(buf_seqno(skb), acked))
1611 			break;
1612 		if (!--TIPC_SKB_CB(skb)->ackers) {
1613 			__skb_unlink(skb, &snd_l->transmq);
1614 			kfree_skb(skb);
1615 		}
1616 	}
1617 	l->acked = acked;
1618 	tipc_link_advance_backlog(snd_l, xmitq);
1619 	if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1620 		link_prepare_wakeup(snd_l);
1621 }
1622 
1623 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1624  */
1625 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1626 			  struct sk_buff_head *xmitq)
1627 {
1628 	struct tipc_msg *hdr = buf_msg(skb);
1629 	u32 dnode = msg_destnode(hdr);
1630 	int mtyp = msg_type(hdr);
1631 	u16 acked = msg_bcast_ack(hdr);
1632 	u16 from = acked + 1;
1633 	u16 to = msg_bcgap_to(hdr);
1634 	u16 peers_snd_nxt = to + 1;
1635 	int rc = 0;
1636 
1637 	kfree_skb(skb);
1638 
1639 	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1640 		return 0;
1641 
1642 	if (mtyp != STATE_MSG)
1643 		return 0;
1644 
1645 	if (dnode == tipc_own_addr(l->net)) {
1646 		tipc_link_bc_ack_rcv(l, acked, xmitq);
1647 		rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
1648 		l->stats.recv_nacks++;
1649 		return rc;
1650 	}
1651 
1652 	/* Msg for other node => suppress own NACK at next sync if applicable */
1653 	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1654 		l->nack_state = BC_NACK_SND_SUPPRESS;
1655 
1656 	return 0;
1657 }
1658 
1659 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1660 {
1661 	int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1662 
1663 	l->window = win;
1664 	l->backlog[TIPC_LOW_IMPORTANCE].limit      = win / 2;
1665 	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = win;
1666 	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = win / 2 * 3;
1667 	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1668 	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
1669 }
1670 
1671 /**
1672  * link_reset_stats - reset link statistics
1673  * @l: pointer to link
1674  */
1675 void tipc_link_reset_stats(struct tipc_link *l)
1676 {
1677 	memset(&l->stats, 0, sizeof(l->stats));
1678 	if (!link_is_bc_sndlink(l)) {
1679 		l->stats.sent_info = l->snd_nxt;
1680 		l->stats.recv_info = l->rcv_nxt;
1681 	}
1682 }
1683 
1684 static void link_print(struct tipc_link *l, const char *str)
1685 {
1686 	struct sk_buff *hskb = skb_peek(&l->transmq);
1687 	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
1688 	u16 tail = l->snd_nxt - 1;
1689 
1690 	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
1691 	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1692 		skb_queue_len(&l->transmq), head, tail,
1693 		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1694 }
1695 
1696 /* Parse and validate nested (link) properties valid for media, bearer and link
1697  */
1698 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1699 {
1700 	int err;
1701 
1702 	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1703 			       tipc_nl_prop_policy);
1704 	if (err)
1705 		return err;
1706 
1707 	if (props[TIPC_NLA_PROP_PRIO]) {
1708 		u32 prio;
1709 
1710 		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1711 		if (prio > TIPC_MAX_LINK_PRI)
1712 			return -EINVAL;
1713 	}
1714 
1715 	if (props[TIPC_NLA_PROP_TOL]) {
1716 		u32 tol;
1717 
1718 		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1719 		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1720 			return -EINVAL;
1721 	}
1722 
1723 	if (props[TIPC_NLA_PROP_WIN]) {
1724 		u32 win;
1725 
1726 		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1727 		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1728 			return -EINVAL;
1729 	}
1730 
1731 	return 0;
1732 }
1733 
1734 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1735 {
1736 	int i;
1737 	struct nlattr *stats;
1738 
1739 	struct nla_map {
1740 		u32 key;
1741 		u32 val;
1742 	};
1743 
1744 	struct nla_map map[] = {
1745 		{TIPC_NLA_STATS_RX_INFO, s->recv_info},
1746 		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1747 		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1748 		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1749 		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1750 		{TIPC_NLA_STATS_TX_INFO, s->sent_info},
1751 		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1752 		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1753 		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1754 		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1755 		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1756 			s->msg_length_counts : 1},
1757 		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1758 		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1759 		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1760 		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1761 		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1762 		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1763 		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1764 		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1765 		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1766 		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
1767 		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1768 		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1769 		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1770 		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
1771 		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1772 		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1773 		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1774 		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1775 		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1776 		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1777 		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1778 		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1779 			(s->accu_queue_sz / s->queue_sz_counts) : 0}
1780 	};
1781 
1782 	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1783 	if (!stats)
1784 		return -EMSGSIZE;
1785 
1786 	for (i = 0; i <  ARRAY_SIZE(map); i++)
1787 		if (nla_put_u32(skb, map[i].key, map[i].val))
1788 			goto msg_full;
1789 
1790 	nla_nest_end(skb, stats);
1791 
1792 	return 0;
1793 msg_full:
1794 	nla_nest_cancel(skb, stats);
1795 
1796 	return -EMSGSIZE;
1797 }
1798 
1799 /* Caller should hold appropriate locks to protect the link */
1800 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1801 		       struct tipc_link *link, int nlflags)
1802 {
1803 	int err;
1804 	void *hdr;
1805 	struct nlattr *attrs;
1806 	struct nlattr *prop;
1807 	struct tipc_net *tn = net_generic(net, tipc_net_id);
1808 
1809 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1810 			  nlflags, TIPC_NL_LINK_GET);
1811 	if (!hdr)
1812 		return -EMSGSIZE;
1813 
1814 	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1815 	if (!attrs)
1816 		goto msg_full;
1817 
1818 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1819 		goto attr_msg_full;
1820 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
1821 			tipc_cluster_mask(tn->own_addr)))
1822 		goto attr_msg_full;
1823 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1824 		goto attr_msg_full;
1825 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
1826 		goto attr_msg_full;
1827 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
1828 		goto attr_msg_full;
1829 
1830 	if (tipc_link_is_up(link))
1831 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1832 			goto attr_msg_full;
1833 	if (link->active)
1834 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1835 			goto attr_msg_full;
1836 
1837 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1838 	if (!prop)
1839 		goto attr_msg_full;
1840 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1841 		goto prop_msg_full;
1842 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1843 		goto prop_msg_full;
1844 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1845 			link->window))
1846 		goto prop_msg_full;
1847 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1848 		goto prop_msg_full;
1849 	nla_nest_end(msg->skb, prop);
1850 
1851 	err = __tipc_nl_add_stats(msg->skb, &link->stats);
1852 	if (err)
1853 		goto attr_msg_full;
1854 
1855 	nla_nest_end(msg->skb, attrs);
1856 	genlmsg_end(msg->skb, hdr);
1857 
1858 	return 0;
1859 
1860 prop_msg_full:
1861 	nla_nest_cancel(msg->skb, prop);
1862 attr_msg_full:
1863 	nla_nest_cancel(msg->skb, attrs);
1864 msg_full:
1865 	genlmsg_cancel(msg->skb, hdr);
1866 
1867 	return -EMSGSIZE;
1868 }
1869 
1870 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
1871 				      struct tipc_stats *stats)
1872 {
1873 	int i;
1874 	struct nlattr *nest;
1875 
1876 	struct nla_map {
1877 		__u32 key;
1878 		__u32 val;
1879 	};
1880 
1881 	struct nla_map map[] = {
1882 		{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
1883 		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
1884 		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
1885 		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
1886 		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
1887 		{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
1888 		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
1889 		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
1890 		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
1891 		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
1892 		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
1893 		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
1894 		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
1895 		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
1896 		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
1897 		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
1898 		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
1899 		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
1900 		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
1901 			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
1902 	};
1903 
1904 	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1905 	if (!nest)
1906 		return -EMSGSIZE;
1907 
1908 	for (i = 0; i <  ARRAY_SIZE(map); i++)
1909 		if (nla_put_u32(skb, map[i].key, map[i].val))
1910 			goto msg_full;
1911 
1912 	nla_nest_end(skb, nest);
1913 
1914 	return 0;
1915 msg_full:
1916 	nla_nest_cancel(skb, nest);
1917 
1918 	return -EMSGSIZE;
1919 }
1920 
1921 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
1922 {
1923 	int err;
1924 	void *hdr;
1925 	struct nlattr *attrs;
1926 	struct nlattr *prop;
1927 	struct tipc_net *tn = net_generic(net, tipc_net_id);
1928 	struct tipc_link *bcl = tn->bcl;
1929 
1930 	if (!bcl)
1931 		return 0;
1932 
1933 	tipc_bcast_lock(net);
1934 
1935 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1936 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
1937 	if (!hdr) {
1938 		tipc_bcast_unlock(net);
1939 		return -EMSGSIZE;
1940 	}
1941 
1942 	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1943 	if (!attrs)
1944 		goto msg_full;
1945 
1946 	/* The broadcast link is always up */
1947 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1948 		goto attr_msg_full;
1949 
1950 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
1951 		goto attr_msg_full;
1952 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
1953 		goto attr_msg_full;
1954 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
1955 		goto attr_msg_full;
1956 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
1957 		goto attr_msg_full;
1958 
1959 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1960 	if (!prop)
1961 		goto attr_msg_full;
1962 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
1963 		goto prop_msg_full;
1964 	nla_nest_end(msg->skb, prop);
1965 
1966 	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
1967 	if (err)
1968 		goto attr_msg_full;
1969 
1970 	tipc_bcast_unlock(net);
1971 	nla_nest_end(msg->skb, attrs);
1972 	genlmsg_end(msg->skb, hdr);
1973 
1974 	return 0;
1975 
1976 prop_msg_full:
1977 	nla_nest_cancel(msg->skb, prop);
1978 attr_msg_full:
1979 	nla_nest_cancel(msg->skb, attrs);
1980 msg_full:
1981 	tipc_bcast_unlock(net);
1982 	genlmsg_cancel(msg->skb, hdr);
1983 
1984 	return -EMSGSIZE;
1985 }
1986 
1987 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
1988 			     struct sk_buff_head *xmitq)
1989 {
1990 	l->tolerance = tol;
1991 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq);
1992 }
1993 
1994 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
1995 			struct sk_buff_head *xmitq)
1996 {
1997 	l->priority = prio;
1998 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq);
1999 }
2000 
2001 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2002 {
2003 	l->abort_limit = limit;
2004 }
2005