xref: /linux/net/tipc/msg.c (revision 2c63221cd9e5c0dad0424029aeb1c40faada8330)
1 /*
2  * net/tipc/msg.c: TIPC message header routines
3  *
4  * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5  * Copyright (c) 2005, 2010-2011, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include <net/sock.h>
38 #include "core.h"
39 #include "msg.h"
40 #include "addr.h"
41 #include "name_table.h"
42 
43 #define MAX_FORWARD_SIZE 1024
44 #define BUF_HEADROOM (LL_MAX_HEADER + 48)
45 #define BUF_TAILROOM 16
46 
47 static unsigned int align(unsigned int i)
48 {
49 	return (i + 3) & ~3u;
50 }
51 
52 /**
53  * tipc_buf_acquire - creates a TIPC message buffer
54  * @size: message size (including TIPC header)
55  *
56  * Returns a new buffer with data pointers set to the specified size.
57  *
58  * NOTE: Headroom is reserved to allow prepending of a data link header.
59  *       There may also be unrequested tailroom present at the buffer's end.
60  */
61 struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
62 {
63 	struct sk_buff *skb;
64 	unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
65 
66 	skb = alloc_skb_fclone(buf_size, gfp);
67 	if (skb) {
68 		skb_reserve(skb, BUF_HEADROOM);
69 		skb_put(skb, size);
70 		skb->next = NULL;
71 	}
72 	return skb;
73 }
74 
75 void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
76 		   u32 hsize, u32 dnode)
77 {
78 	memset(m, 0, hsize);
79 	msg_set_version(m);
80 	msg_set_user(m, user);
81 	msg_set_hdr_sz(m, hsize);
82 	msg_set_size(m, hsize);
83 	msg_set_prevnode(m, own_node);
84 	msg_set_type(m, type);
85 	if (hsize > SHORT_H_SIZE) {
86 		msg_set_orignode(m, own_node);
87 		msg_set_destnode(m, dnode);
88 	}
89 }
90 
91 struct sk_buff *tipc_msg_create(uint user, uint type,
92 				uint hdr_sz, uint data_sz, u32 dnode,
93 				u32 onode, u32 dport, u32 oport, int errcode)
94 {
95 	struct tipc_msg *msg;
96 	struct sk_buff *buf;
97 
98 	buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
99 	if (unlikely(!buf))
100 		return NULL;
101 
102 	msg = buf_msg(buf);
103 	tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
104 	msg_set_size(msg, hdr_sz + data_sz);
105 	msg_set_origport(msg, oport);
106 	msg_set_destport(msg, dport);
107 	msg_set_errcode(msg, errcode);
108 	if (hdr_sz > SHORT_H_SIZE) {
109 		msg_set_orignode(msg, onode);
110 		msg_set_destnode(msg, dnode);
111 	}
112 	return buf;
113 }
114 
115 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
116  * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
117  *            out: set when successful non-complete reassembly, otherwise NULL
118  * @*buf:     in:  the buffer to append. Always defined
119  *            out: head buf after successful complete reassembly, otherwise NULL
120  * Returns 1 when reassembly complete, otherwise 0
121  */
122 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
123 {
124 	struct sk_buff *head = *headbuf;
125 	struct sk_buff *frag = *buf;
126 	struct sk_buff *tail = NULL;
127 	struct tipc_msg *msg;
128 	u32 fragid;
129 	int delta;
130 	bool headstolen;
131 
132 	if (!frag)
133 		goto err;
134 
135 	msg = buf_msg(frag);
136 	fragid = msg_type(msg);
137 	frag->next = NULL;
138 	skb_pull(frag, msg_hdr_sz(msg));
139 
140 	if (fragid == FIRST_FRAGMENT) {
141 		if (unlikely(head))
142 			goto err;
143 		if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
144 			goto err;
145 		head = *headbuf = frag;
146 		*buf = NULL;
147 		TIPC_SKB_CB(head)->tail = NULL;
148 		if (skb_is_nonlinear(head)) {
149 			skb_walk_frags(head, tail) {
150 				TIPC_SKB_CB(head)->tail = tail;
151 			}
152 		} else {
153 			skb_frag_list_init(head);
154 		}
155 		return 0;
156 	}
157 
158 	if (!head)
159 		goto err;
160 
161 	if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
162 		kfree_skb_partial(frag, headstolen);
163 	} else {
164 		tail = TIPC_SKB_CB(head)->tail;
165 		if (!skb_has_frag_list(head))
166 			skb_shinfo(head)->frag_list = frag;
167 		else
168 			tail->next = frag;
169 		head->truesize += frag->truesize;
170 		head->data_len += frag->len;
171 		head->len += frag->len;
172 		TIPC_SKB_CB(head)->tail = frag;
173 	}
174 
175 	if (fragid == LAST_FRAGMENT) {
176 		TIPC_SKB_CB(head)->validated = false;
177 		if (unlikely(!tipc_msg_validate(&head)))
178 			goto err;
179 		*buf = head;
180 		TIPC_SKB_CB(head)->tail = NULL;
181 		*headbuf = NULL;
182 		return 1;
183 	}
184 	*buf = NULL;
185 	return 0;
186 err:
187 	kfree_skb(*buf);
188 	kfree_skb(*headbuf);
189 	*buf = *headbuf = NULL;
190 	return 0;
191 }
192 
193 /**
194  * tipc_msg_append(): Append data to tail of an existing buffer queue
195  * @hdr: header to be used
196  * @m: the data to be appended
197  * @mss: max allowable size of buffer
198  * @dlen: size of data to be appended
199  * @txq: queue to appand to
200  * Returns the number og 1k blocks appended or errno value
201  */
202 int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
203 		    int mss, struct sk_buff_head *txq)
204 {
205 	struct sk_buff *skb, *prev;
206 	int accounted, total, curr;
207 	int mlen, cpy, rem = dlen;
208 	struct tipc_msg *hdr;
209 
210 	skb = skb_peek_tail(txq);
211 	accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
212 	total = accounted;
213 
214 	while (rem) {
215 		if (!skb || skb->len >= mss) {
216 			prev = skb;
217 			skb = tipc_buf_acquire(mss, GFP_KERNEL);
218 			if (unlikely(!skb))
219 				return -ENOMEM;
220 			skb_orphan(skb);
221 			skb_trim(skb, MIN_H_SIZE);
222 			hdr = buf_msg(skb);
223 			skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
224 			msg_set_hdr_sz(hdr, MIN_H_SIZE);
225 			msg_set_size(hdr, MIN_H_SIZE);
226 			__skb_queue_tail(txq, skb);
227 			total += 1;
228 			if (prev)
229 				msg_set_ack_required(buf_msg(prev), 0);
230 			msg_set_ack_required(hdr, 1);
231 		}
232 		hdr = buf_msg(skb);
233 		curr = msg_blocks(hdr);
234 		mlen = msg_size(hdr);
235 		cpy = min_t(int, rem, mss - mlen);
236 		if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
237 			return -EFAULT;
238 		msg_set_size(hdr, mlen + cpy);
239 		skb_put(skb, cpy);
240 		rem -= cpy;
241 		total += msg_blocks(hdr) - curr;
242 	}
243 	return total - accounted;
244 }
245 
246 /* tipc_msg_validate - validate basic format of received message
247  *
248  * This routine ensures a TIPC message has an acceptable header, and at least
249  * as much data as the header indicates it should.  The routine also ensures
250  * that the entire message header is stored in the main fragment of the message
251  * buffer, to simplify future access to message header fields.
252  *
253  * Note: Having extra info present in the message header or data areas is OK.
254  * TIPC will ignore the excess, under the assumption that it is optional info
255  * introduced by a later release of the protocol.
256  */
257 bool tipc_msg_validate(struct sk_buff **_skb)
258 {
259 	struct sk_buff *skb = *_skb;
260 	struct tipc_msg *hdr;
261 	int msz, hsz;
262 
263 	/* Ensure that flow control ratio condition is satisfied */
264 	if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
265 		skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
266 		if (!skb)
267 			return false;
268 		kfree_skb(*_skb);
269 		*_skb = skb;
270 	}
271 
272 	if (unlikely(TIPC_SKB_CB(skb)->validated))
273 		return true;
274 	if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
275 		return false;
276 
277 	hsz = msg_hdr_sz(buf_msg(skb));
278 	if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
279 		return false;
280 	if (unlikely(!pskb_may_pull(skb, hsz)))
281 		return false;
282 
283 	hdr = buf_msg(skb);
284 	if (unlikely(msg_version(hdr) != TIPC_VERSION))
285 		return false;
286 
287 	msz = msg_size(hdr);
288 	if (unlikely(msz < hsz))
289 		return false;
290 	if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
291 		return false;
292 	if (unlikely(skb->len < msz))
293 		return false;
294 
295 	TIPC_SKB_CB(skb)->validated = true;
296 	return true;
297 }
298 
299 /**
300  * tipc_msg_fragment - build a fragment skb list for TIPC message
301  *
302  * @skb: TIPC message skb
303  * @hdr: internal msg header to be put on the top of the fragments
304  * @pktmax: max size of a fragment incl. the header
305  * @frags: returned fragment skb list
306  *
307  * Returns 0 if the fragmentation is successful, otherwise: -EINVAL
308  * or -ENOMEM
309  */
310 int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
311 		      int pktmax, struct sk_buff_head *frags)
312 {
313 	int pktno, nof_fragms, dsz, dmax, eat;
314 	struct tipc_msg *_hdr;
315 	struct sk_buff *_skb;
316 	u8 *data;
317 
318 	/* Non-linear buffer? */
319 	if (skb_linearize(skb))
320 		return -ENOMEM;
321 
322 	data = (u8 *)skb->data;
323 	dsz = msg_size(buf_msg(skb));
324 	dmax = pktmax - INT_H_SIZE;
325 	if (dsz <= dmax || !dmax)
326 		return -EINVAL;
327 
328 	nof_fragms = dsz / dmax + 1;
329 	for (pktno = 1; pktno <= nof_fragms; pktno++) {
330 		if (pktno < nof_fragms)
331 			eat = dmax;
332 		else
333 			eat = dsz % dmax;
334 		/* Allocate a new fragment */
335 		_skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
336 		if (!_skb)
337 			goto error;
338 		skb_orphan(_skb);
339 		__skb_queue_tail(frags, _skb);
340 		/* Copy header & data to the fragment */
341 		skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
342 		skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
343 		data += eat;
344 		/* Update the fragment's header */
345 		_hdr = buf_msg(_skb);
346 		msg_set_fragm_no(_hdr, pktno);
347 		msg_set_nof_fragms(_hdr, nof_fragms);
348 		msg_set_size(_hdr, INT_H_SIZE + eat);
349 	}
350 	return 0;
351 
352 error:
353 	__skb_queue_purge(frags);
354 	__skb_queue_head_init(frags);
355 	return -ENOMEM;
356 }
357 
358 /**
359  * tipc_msg_build - create buffer chain containing specified header and data
360  * @mhdr: Message header, to be prepended to data
361  * @m: User message
362  * @dsz: Total length of user data
363  * @pktmax: Max packet size that can be used
364  * @list: Buffer or chain of buffers to be returned to caller
365  *
366  * Note that the recursive call we are making here is safe, since it can
367  * logically go only one further level down.
368  *
369  * Returns message data size or errno: -ENOMEM, -EFAULT
370  */
371 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
372 		   int dsz, int pktmax, struct sk_buff_head *list)
373 {
374 	int mhsz = msg_hdr_sz(mhdr);
375 	struct tipc_msg pkthdr;
376 	int msz = mhsz + dsz;
377 	int pktrem = pktmax;
378 	struct sk_buff *skb;
379 	int drem = dsz;
380 	int pktno = 1;
381 	char *pktpos;
382 	int pktsz;
383 	int rc;
384 
385 	msg_set_size(mhdr, msz);
386 
387 	/* No fragmentation needed? */
388 	if (likely(msz <= pktmax)) {
389 		skb = tipc_buf_acquire(msz, GFP_KERNEL);
390 
391 		/* Fall back to smaller MTU if node local message */
392 		if (unlikely(!skb)) {
393 			if (pktmax != MAX_MSG_SIZE)
394 				return -ENOMEM;
395 			rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
396 			if (rc != dsz)
397 				return rc;
398 			if (tipc_msg_assemble(list))
399 				return dsz;
400 			return -ENOMEM;
401 		}
402 		skb_orphan(skb);
403 		__skb_queue_tail(list, skb);
404 		skb_copy_to_linear_data(skb, mhdr, mhsz);
405 		pktpos = skb->data + mhsz;
406 		if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
407 			return dsz;
408 		rc = -EFAULT;
409 		goto error;
410 	}
411 
412 	/* Prepare reusable fragment header */
413 	tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
414 		      FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
415 	msg_set_size(&pkthdr, pktmax);
416 	msg_set_fragm_no(&pkthdr, pktno);
417 	msg_set_importance(&pkthdr, msg_importance(mhdr));
418 
419 	/* Prepare first fragment */
420 	skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
421 	if (!skb)
422 		return -ENOMEM;
423 	skb_orphan(skb);
424 	__skb_queue_tail(list, skb);
425 	pktpos = skb->data;
426 	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
427 	pktpos += INT_H_SIZE;
428 	pktrem -= INT_H_SIZE;
429 	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
430 	pktpos += mhsz;
431 	pktrem -= mhsz;
432 
433 	do {
434 		if (drem < pktrem)
435 			pktrem = drem;
436 
437 		if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
438 			rc = -EFAULT;
439 			goto error;
440 		}
441 		drem -= pktrem;
442 
443 		if (!drem)
444 			break;
445 
446 		/* Prepare new fragment: */
447 		if (drem < (pktmax - INT_H_SIZE))
448 			pktsz = drem + INT_H_SIZE;
449 		else
450 			pktsz = pktmax;
451 		skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
452 		if (!skb) {
453 			rc = -ENOMEM;
454 			goto error;
455 		}
456 		skb_orphan(skb);
457 		__skb_queue_tail(list, skb);
458 		msg_set_type(&pkthdr, FRAGMENT);
459 		msg_set_size(&pkthdr, pktsz);
460 		msg_set_fragm_no(&pkthdr, ++pktno);
461 		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
462 		pktpos = skb->data + INT_H_SIZE;
463 		pktrem = pktsz - INT_H_SIZE;
464 
465 	} while (1);
466 	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
467 	return dsz;
468 error:
469 	__skb_queue_purge(list);
470 	__skb_queue_head_init(list);
471 	return rc;
472 }
473 
474 /**
475  * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
476  * @bskb: the bundle buffer to append to
477  * @msg: message to be appended
478  * @max: max allowable size for the bundle buffer
479  *
480  * Returns "true" if bundling has been performed, otherwise "false"
481  */
482 static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
483 			    u32 max)
484 {
485 	struct tipc_msg *bmsg = buf_msg(bskb);
486 	u32 msz, bsz, offset, pad;
487 
488 	msz = msg_size(msg);
489 	bsz = msg_size(bmsg);
490 	offset = align(bsz);
491 	pad = offset - bsz;
492 
493 	if (unlikely(skb_tailroom(bskb) < (pad + msz)))
494 		return false;
495 	if (unlikely(max < (offset + msz)))
496 		return false;
497 
498 	skb_put(bskb, pad + msz);
499 	skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
500 	msg_set_size(bmsg, offset + msz);
501 	msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
502 	return true;
503 }
504 
505 /**
506  * tipc_msg_try_bundle - Try to bundle a new message to the last one
507  * @tskb: the last/target message to which the new one will be appended
508  * @skb: the new message skb pointer
509  * @mss: max message size (header inclusive)
510  * @dnode: destination node for the message
511  * @new_bundle: if this call made a new bundle or not
512  *
513  * Return: "true" if the new message skb is potential for bundling this time or
514  * later, in the case a bundling has been done this time, the skb is consumed
515  * (the skb pointer = NULL).
516  * Otherwise, "false" if the skb cannot be bundled at all.
517  */
518 bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
519 			 u32 dnode, bool *new_bundle)
520 {
521 	struct tipc_msg *msg, *inner, *outer;
522 	u32 tsz;
523 
524 	/* First, check if the new buffer is suitable for bundling */
525 	msg = buf_msg(*skb);
526 	if (msg_user(msg) == MSG_FRAGMENTER)
527 		return false;
528 	if (msg_user(msg) == TUNNEL_PROTOCOL)
529 		return false;
530 	if (msg_user(msg) == BCAST_PROTOCOL)
531 		return false;
532 	if (mss <= INT_H_SIZE + msg_size(msg))
533 		return false;
534 
535 	/* Ok, but the last/target buffer can be empty? */
536 	if (unlikely(!tskb))
537 		return true;
538 
539 	/* Is it a bundle already? Try to bundle the new message to it */
540 	if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
541 		*new_bundle = false;
542 		goto bundle;
543 	}
544 
545 	/* Make a new bundle of the two messages if possible */
546 	tsz = msg_size(buf_msg(tskb));
547 	if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
548 		return true;
549 	if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
550 				      GFP_ATOMIC)))
551 		return true;
552 	inner = buf_msg(tskb);
553 	skb_push(tskb, INT_H_SIZE);
554 	outer = buf_msg(tskb);
555 	tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
556 		      dnode);
557 	msg_set_importance(outer, msg_importance(inner));
558 	msg_set_size(outer, INT_H_SIZE + tsz);
559 	msg_set_msgcnt(outer, 1);
560 	*new_bundle = true;
561 
562 bundle:
563 	if (likely(tipc_msg_bundle(tskb, msg, mss))) {
564 		consume_skb(*skb);
565 		*skb = NULL;
566 	}
567 	return true;
568 }
569 
570 /**
571  *  tipc_msg_extract(): extract bundled inner packet from buffer
572  *  @skb: buffer to be extracted from.
573  *  @iskb: extracted inner buffer, to be returned
574  *  @pos: position in outer message of msg to be extracted.
575  *        Returns position of next msg
576  *  Consumes outer buffer when last packet extracted
577  *  Returns true when when there is an extracted buffer, otherwise false
578  */
579 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
580 {
581 	struct tipc_msg *hdr, *ihdr;
582 	int imsz;
583 
584 	*iskb = NULL;
585 	if (unlikely(skb_linearize(skb)))
586 		goto none;
587 
588 	hdr = buf_msg(skb);
589 	if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
590 		goto none;
591 
592 	ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
593 	imsz = msg_size(ihdr);
594 
595 	if ((*pos + imsz) > msg_data_sz(hdr))
596 		goto none;
597 
598 	*iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
599 	if (!*iskb)
600 		goto none;
601 
602 	skb_copy_to_linear_data(*iskb, ihdr, imsz);
603 	if (unlikely(!tipc_msg_validate(iskb)))
604 		goto none;
605 
606 	*pos += align(imsz);
607 	return true;
608 none:
609 	kfree_skb(skb);
610 	kfree_skb(*iskb);
611 	*iskb = NULL;
612 	return false;
613 }
614 
615 /**
616  * tipc_msg_reverse(): swap source and destination addresses and add error code
617  * @own_node: originating node id for reversed message
618  * @skb:  buffer containing message to be reversed; will be consumed
619  * @err:  error code to be set in message, if any
620  * Replaces consumed buffer with new one when successful
621  * Returns true if success, otherwise false
622  */
623 bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
624 {
625 	struct sk_buff *_skb = *skb;
626 	struct tipc_msg *_hdr, *hdr;
627 	int hlen, dlen;
628 
629 	if (skb_linearize(_skb))
630 		goto exit;
631 	_hdr = buf_msg(_skb);
632 	dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
633 	hlen = msg_hdr_sz(_hdr);
634 
635 	if (msg_dest_droppable(_hdr))
636 		goto exit;
637 	if (msg_errcode(_hdr))
638 		goto exit;
639 
640 	/* Never return SHORT header */
641 	if (hlen == SHORT_H_SIZE)
642 		hlen = BASIC_H_SIZE;
643 
644 	/* Don't return data along with SYN+, - sender has a clone */
645 	if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
646 		dlen = 0;
647 
648 	/* Allocate new buffer to return */
649 	*skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
650 	if (!*skb)
651 		goto exit;
652 	memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
653 	memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
654 
655 	/* Build reverse header in new buffer */
656 	hdr = buf_msg(*skb);
657 	msg_set_hdr_sz(hdr, hlen);
658 	msg_set_errcode(hdr, err);
659 	msg_set_non_seq(hdr, 0);
660 	msg_set_origport(hdr, msg_destport(_hdr));
661 	msg_set_destport(hdr, msg_origport(_hdr));
662 	msg_set_destnode(hdr, msg_prevnode(_hdr));
663 	msg_set_prevnode(hdr, own_node);
664 	msg_set_orignode(hdr, own_node);
665 	msg_set_size(hdr, hlen + dlen);
666 	skb_orphan(_skb);
667 	kfree_skb(_skb);
668 	return true;
669 exit:
670 	kfree_skb(_skb);
671 	*skb = NULL;
672 	return false;
673 }
674 
675 bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
676 {
677 	struct sk_buff *skb, *_skb;
678 
679 	skb_queue_walk(msg, skb) {
680 		_skb = skb_clone(skb, GFP_ATOMIC);
681 		if (!_skb) {
682 			__skb_queue_purge(cpy);
683 			pr_err_ratelimited("Failed to clone buffer chain\n");
684 			return false;
685 		}
686 		__skb_queue_tail(cpy, _skb);
687 	}
688 	return true;
689 }
690 
691 /**
692  * tipc_msg_lookup_dest(): try to find new destination for named message
693  * @skb: the buffer containing the message.
694  * @err: error code to be used by caller if lookup fails
695  * Does not consume buffer
696  * Returns true if a destination is found, false otherwise
697  */
698 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
699 {
700 	struct tipc_msg *msg = buf_msg(skb);
701 	u32 dport, dnode;
702 	u32 onode = tipc_own_addr(net);
703 
704 	if (!msg_isdata(msg))
705 		return false;
706 	if (!msg_named(msg))
707 		return false;
708 	if (msg_errcode(msg))
709 		return false;
710 	*err = TIPC_ERR_NO_NAME;
711 	if (skb_linearize(skb))
712 		return false;
713 	msg = buf_msg(skb);
714 	if (msg_reroute_cnt(msg))
715 		return false;
716 	dnode = tipc_scope2node(net, msg_lookup_scope(msg));
717 	dport = tipc_nametbl_translate(net, msg_nametype(msg),
718 				       msg_nameinst(msg), &dnode);
719 	if (!dport)
720 		return false;
721 	msg_incr_reroute_cnt(msg);
722 	if (dnode != onode)
723 		msg_set_prevnode(msg, onode);
724 	msg_set_destnode(msg, dnode);
725 	msg_set_destport(msg, dport);
726 	*err = TIPC_OK;
727 
728 	if (!skb_cloned(skb))
729 		return true;
730 
731 	return true;
732 }
733 
734 /* tipc_msg_assemble() - assemble chain of fragments into one message
735  */
736 bool tipc_msg_assemble(struct sk_buff_head *list)
737 {
738 	struct sk_buff *skb, *tmp = NULL;
739 
740 	if (skb_queue_len(list) == 1)
741 		return true;
742 
743 	while ((skb = __skb_dequeue(list))) {
744 		skb->next = NULL;
745 		if (tipc_buf_append(&tmp, &skb)) {
746 			__skb_queue_tail(list, skb);
747 			return true;
748 		}
749 		if (!tmp)
750 			break;
751 	}
752 	__skb_queue_purge(list);
753 	__skb_queue_head_init(list);
754 	pr_warn("Failed do assemble buffer\n");
755 	return false;
756 }
757 
758 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
759  *                         reassemble the clones into one message
760  */
761 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
762 {
763 	struct sk_buff *skb, *_skb;
764 	struct sk_buff *frag = NULL;
765 	struct sk_buff *head = NULL;
766 	int hdr_len;
767 
768 	/* Copy header if single buffer */
769 	if (skb_queue_len(list) == 1) {
770 		skb = skb_peek(list);
771 		hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
772 		_skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
773 		if (!_skb)
774 			return false;
775 		__skb_queue_tail(rcvq, _skb);
776 		return true;
777 	}
778 
779 	/* Clone all fragments and reassemble */
780 	skb_queue_walk(list, skb) {
781 		frag = skb_clone(skb, GFP_ATOMIC);
782 		if (!frag)
783 			goto error;
784 		frag->next = NULL;
785 		if (tipc_buf_append(&head, &frag))
786 			break;
787 		if (!head)
788 			goto error;
789 	}
790 	__skb_queue_tail(rcvq, frag);
791 	return true;
792 error:
793 	pr_warn("Failed do clone local mcast rcv buffer\n");
794 	kfree_skb(head);
795 	return false;
796 }
797 
798 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
799 			struct sk_buff_head *cpy)
800 {
801 	struct sk_buff *skb, *_skb;
802 
803 	skb_queue_walk(msg, skb) {
804 		_skb = pskb_copy(skb, GFP_ATOMIC);
805 		if (!_skb) {
806 			__skb_queue_purge(cpy);
807 			return false;
808 		}
809 		msg_set_destnode(buf_msg(_skb), dst);
810 		__skb_queue_tail(cpy, _skb);
811 	}
812 	return true;
813 }
814 
815 /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
816  * @list: list to be appended to
817  * @seqno: sequence number of buffer to add
818  * @skb: buffer to add
819  */
820 void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
821 			     struct sk_buff *skb)
822 {
823 	struct sk_buff *_skb, *tmp;
824 
825 	if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
826 		__skb_queue_head(list, skb);
827 		return;
828 	}
829 
830 	if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
831 		__skb_queue_tail(list, skb);
832 		return;
833 	}
834 
835 	skb_queue_walk_safe(list, _skb, tmp) {
836 		if (more(seqno, buf_seqno(_skb)))
837 			continue;
838 		if (seqno == buf_seqno(_skb))
839 			break;
840 		__skb_queue_before(list, _skb, skb);
841 		return;
842 	}
843 	kfree_skb(skb);
844 }
845 
846 void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
847 		     struct sk_buff_head *xmitq)
848 {
849 	if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
850 		__skb_queue_tail(xmitq, skb);
851 }
852