xref: /linux/net/xfrm/xfrm_iptfs.c (revision 51a209ee33428ed688b1c00e0521a5b5b8ff483f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* xfrm_iptfs: IPTFS encapsulation support
3  *
4  * April 21 2022, Christian Hopps <chopps@labn.net>
5  *
6  * Copyright (c) 2022, LabN Consulting, L.L.C.
7  *
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/icmpv6.h>
12 #include <linux/skbuff_ref.h>
13 #include <net/gro.h>
14 #include <net/icmp.h>
15 #include <net/ip6_route.h>
16 #include <net/inet_ecn.h>
17 #include <net/xfrm.h>
18 
19 #include <crypto/aead.h>
20 
21 #include "xfrm_inout.h"
22 #include "trace_iptfs.h"
23 
24 /* IPTFS encap (header) values. */
25 #define IPTFS_SUBTYPE_BASIC 0
26 #define IPTFS_SUBTYPE_CC 1
27 
28 /* ----------------------------------------------- */
29 /* IP-TFS default SA values (tunnel egress/dir-in) */
30 /* ----------------------------------------------- */
31 
32 /**
33  * define IPTFS_DEFAULT_DROP_TIME_USECS - default drop time
34  *
35  * The default IPTFS drop time in microseconds. The drop time is the amount of
36  * time before a missing out-of-order IPTFS tunnel packet is considered lost.
37  * See also the reorder window.
38  *
39  * Default 1s.
40  */
41 #define IPTFS_DEFAULT_DROP_TIME_USECS 1000000
42 
43 /**
44  * define IPTFS_DEFAULT_REORDER_WINDOW - default reorder window size
45  *
46  * The default IPTFS reorder window size. The reorder window size dictates the
47  * maximum number of IPTFS tunnel packets in a sequence that may arrive out of
48  * order.
49  *
50  * Default 3. (tcp folks suggested)
51  */
52 #define IPTFS_DEFAULT_REORDER_WINDOW 3
53 
54 /* ------------------------------------------------ */
55 /* IPTFS default SA values (tunnel ingress/dir-out) */
56 /* ------------------------------------------------ */
57 
58 /**
59  * define IPTFS_DEFAULT_INIT_DELAY_USECS - default initial output delay
60  *
61  * The initial output delay is the amount of time prior to servicing the output
62  * queue after queueing the first packet on said queue. This applies anytime the
63  * output queue was previously empty.
64  *
65  * Default 0.
66  */
67 #define IPTFS_DEFAULT_INIT_DELAY_USECS 0
68 
69 /**
70  * define IPTFS_DEFAULT_MAX_QUEUE_SIZE - default max output queue size.
71  *
72  * The default IPTFS max output queue size in octets. The output queue is where
73  * received packets destined for output over an IPTFS tunnel are stored prior to
74  * being output in aggregated/fragmented form over the IPTFS tunnel.
75  *
76  * Default 1M.
77  */
78 #define IPTFS_DEFAULT_MAX_QUEUE_SIZE (1024 * 10240)
79 
80 /* Assumed: skb->head is cache aligned.
81  *
82  * L2 Header resv: Arrange for cacheline to start at skb->data - 16 to keep the
83  * to-be-pushed L2 header in the same cacheline as resulting `skb->data` (i.e.,
84  * the L3 header). If cacheline size is > 64 then skb->data + pushed L2 will all
85  * be in a single cacheline if we simply reserve 64 bytes.
86  *
87  * L3 Header resv: For L3+L2 headers (i.e., skb->data points at the IPTFS payload)
88  * we want `skb->data` to be cacheline aligned and all pushed L2L3 headers will
89  * be in their own cacheline[s]. 128 works for cachelins up to 128 bytes, for
90  * any larger cacheline sizes the pushed headers will simply share the cacheline
91  * with the start of the IPTFS payload (skb->data).
92  */
93 #define XFRM_IPTFS_MIN_L3HEADROOM 128
94 #define XFRM_IPTFS_MIN_L2HEADROOM (L1_CACHE_BYTES > 64 ? 64 : 64 + 16)
95 
96 /* Min to try to share outer iptfs skb data vs copying into new skb */
97 #define IPTFS_PKT_SHARE_MIN 129
98 
99 #define NSECS_IN_USEC 1000
100 
101 #define IPTFS_HRTIMER_MODE HRTIMER_MODE_REL_SOFT
102 
103 /**
104  * struct xfrm_iptfs_config - configuration for the IPTFS tunnel.
105  * @pkt_size: size of the outer IP packet. 0 to use interface and MTU discovery,
106  *	otherwise the user specified value.
107  * @max_queue_size: The maximum number of octets allowed to be queued to be sent
108  *	over the IPTFS SA. The queue size is measured as the size of all the
109  *	packets enqueued.
110  * @reorder_win_size: the number slots in the reorder window, thus the number of
111  *	packets that may arrive out of order.
112  * @dont_frag: true to inhibit fragmenting across IPTFS outer packets.
113  */
114 struct xfrm_iptfs_config {
115 	u32 pkt_size;	    /* outer_packet_size or 0 */
116 	u32 max_queue_size; /* octets */
117 	u16 reorder_win_size;
118 	u8 dont_frag : 1;
119 };
120 
121 struct skb_wseq {
122 	struct sk_buff *skb;
123 	u64 drop_time;
124 };
125 
126 /**
127  * struct xfrm_iptfs_data - mode specific xfrm state.
128  * @cfg: IPTFS tunnel config.
129  * @x: owning SA (xfrm_state).
130  * @queue: queued user packets to send.
131  * @queue_size: number of octets on queue (sum of packet sizes).
132  * @ecn_queue_size: octets above with ECN mark.
133  * @init_delay_ns: nanoseconds to wait to send initial IPTFS packet.
134  * @iptfs_timer: output timer.
135  * @iptfs_settime: time the output timer was set.
136  * @payload_mtu: max payload size.
137  * @w_seq_set: true after first seq received.
138  * @w_wantseq: waiting for this seq number as next to process (in order).
139  * @w_saved: the saved buf array (reorder window).
140  * @w_savedlen: the saved len (not size).
141  * @drop_lock: lock to protect reorder queue.
142  * @drop_timer: timer for considering next packet lost.
143  * @drop_time_ns: timer intervan in nanoseconds.
144  * @ra_newskb: new pkt being reassembled.
145  * @ra_wantseq: expected next sequence for reassembly.
146  * @ra_runt: last pkt bytes from very end of last skb.
147  * @ra_runtlen: size of ra_runt.
148  */
149 struct xfrm_iptfs_data {
150 	struct xfrm_iptfs_config cfg;
151 
152 	/* Ingress User Input */
153 	struct xfrm_state *x;	   /* owning state */
154 	struct sk_buff_head queue; /* output queue */
155 
156 	u32 queue_size;		    /* octets */
157 	u32 ecn_queue_size;	    /* octets above which ECN mark */
158 	u64 init_delay_ns;	    /* nanoseconds */
159 	struct hrtimer iptfs_timer; /* output timer */
160 	time64_t iptfs_settime;	    /* time timer was set */
161 	u32 payload_mtu;	    /* max payload size */
162 
163 	/* Tunnel input reordering */
164 	bool w_seq_set;		  /* true after first seq received */
165 	u64 w_wantseq;		  /* expected next sequence */
166 	struct skb_wseq *w_saved; /* the saved buf array */
167 	u32 w_savedlen;		  /* the saved len (not size) */
168 	spinlock_t drop_lock;
169 	struct hrtimer drop_timer;
170 	u64 drop_time_ns;
171 
172 	/* Tunnel input reassembly */
173 	struct sk_buff *ra_newskb; /* new pkt being reassembled */
174 	u64 ra_wantseq;		   /* expected next sequence */
175 	u8 ra_runt[6];		   /* last pkt bytes from last skb */
176 	u8 ra_runtlen;		   /* count of ra_runt */
177 };
178 
179 static u32 __iptfs_get_inner_mtu(struct xfrm_state *x, int outer_mtu);
180 static enum hrtimer_restart iptfs_delay_timer(struct hrtimer *me);
181 static enum hrtimer_restart iptfs_drop_timer(struct hrtimer *me);
182 
183 /* ================= */
184 /* Utility Functions */
185 /* ================= */
186 
187 #ifdef TRACEPOINTS_ENABLED
188 static u32 __trace_ip_proto(struct iphdr *iph)
189 {
190 	if (iph->version == 4)
191 		return iph->protocol;
192 	return ((struct ipv6hdr *)iph)->nexthdr;
193 }
194 
195 static u32 __trace_ip_proto_seq(struct iphdr *iph)
196 {
197 	void *nexthdr;
198 	u32 protocol = 0;
199 
200 	if (iph->version == 4) {
201 		nexthdr = (void *)(iph + 1);
202 		protocol = iph->protocol;
203 	} else if (iph->version == 6) {
204 		nexthdr = (void *)(((struct ipv6hdr *)(iph)) + 1);
205 		protocol = ((struct ipv6hdr *)(iph))->nexthdr;
206 	}
207 	switch (protocol) {
208 	case IPPROTO_ICMP:
209 		return ntohs(((struct icmphdr *)nexthdr)->un.echo.sequence);
210 	case IPPROTO_ICMPV6:
211 		return ntohs(((struct icmp6hdr *)nexthdr)->icmp6_sequence);
212 	case IPPROTO_TCP:
213 		return ntohl(((struct tcphdr *)nexthdr)->seq);
214 	case IPPROTO_UDP:
215 		return ntohs(((struct udphdr *)nexthdr)->source);
216 	default:
217 		return 0;
218 	}
219 }
220 #endif /*TRACEPOINTS_ENABLED*/
221 
222 static u64 __esp_seq(struct sk_buff *skb)
223 {
224 	u64 seq = ntohl(XFRM_SKB_CB(skb)->seq.input.low);
225 
226 	return seq | (u64)ntohl(XFRM_SKB_CB(skb)->seq.input.hi) << 32;
227 }
228 
229 /* ======================= */
230 /* IPTFS SK_BUFF Functions */
231 /* ======================= */
232 
233 /**
234  * iptfs_alloc_skb() - Allocate a new `skb`.
235  * @tpl: the skb to copy required meta-data from.
236  * @len: the linear length of the head data, zero is fine.
237  * @l3resv: true if skb reserve needs to support pushing L3 headers
238  *
239  * A new `skb` is allocated and required meta-data is copied from `tpl`, the
240  * head data is sized to `len` + reserved space set according to the @l3resv
241  * boolean.
242  *
243  * When @l3resv is false, resv is XFRM_IPTFS_MIN_L2HEADROOM which arranges for
244  * `skb->data - 16`  which is a good guess for good cache alignment (placing the
245  * to be pushed L2 header at the start of a cacheline.
246  *
247  * Otherwise, @l3resv is true and resv is set to the correct reserved space for
248  * dst->dev plus the calculated L3 overhead for the xfrm dst or
249  * XFRM_IPTFS_MIN_L3HEADROOM whichever is larger. This is then cache aligned so
250  * that all the headers will commonly fall in a cacheline when possible.
251  *
252  * l3resv=true is used on tunnel ingress (tx), because we need to reserve for
253  * the new IPTFS packet (i.e., L2+L3 headers). On tunnel egress (rx) the data
254  * being copied into the skb includes the user L3 headers already so we only
255  * need to reserve for L2.
256  *
257  * Return: the new skb or NULL.
258  */
259 static struct sk_buff *iptfs_alloc_skb(struct sk_buff *tpl, u32 len, bool l3resv)
260 {
261 	struct sk_buff *skb;
262 	u32 resv;
263 
264 	if (!l3resv) {
265 		resv = XFRM_IPTFS_MIN_L2HEADROOM;
266 	} else {
267 		struct dst_entry *dst = skb_dst(tpl);
268 
269 		resv = LL_RESERVED_SPACE(dst->dev) + dst->header_len;
270 		resv = max(resv, XFRM_IPTFS_MIN_L3HEADROOM);
271 		resv = L1_CACHE_ALIGN(resv);
272 	}
273 
274 	skb = alloc_skb(len + resv, GFP_ATOMIC | __GFP_NOWARN);
275 	if (!skb)
276 		return NULL;
277 
278 	skb_reserve(skb, resv);
279 
280 	if (!l3resv) {
281 		/* xfrm_input resume needs dev and xfrm ext from tunnel pkt */
282 		skb->dev = tpl->dev;
283 		__skb_ext_copy(skb, tpl);
284 	}
285 
286 	/* dropped by xfrm_input, used by xfrm_output */
287 	skb_dst_copy(skb, tpl);
288 
289 	return skb;
290 }
291 
292 /**
293  * iptfs_skb_head_to_frag() - initialize a skb_frag_t based on skb head data
294  * @skb: skb with the head data
295  * @frag: frag to initialize
296  */
297 static void iptfs_skb_head_to_frag(const struct sk_buff *skb, skb_frag_t *frag)
298 {
299 	struct page *page = virt_to_head_page(skb->data);
300 	unsigned char *addr = (unsigned char *)page_address(page);
301 
302 	skb_frag_fill_page_desc(frag, page, skb->data - addr, skb_headlen(skb));
303 }
304 
305 /**
306  * struct iptfs_skb_frag_walk - use to track a walk through fragments
307  * @fragi: current fragment index
308  * @past: length of data in fragments before @fragi
309  * @total: length of data in all fragments
310  * @nr_frags: number of fragments present in array
311  * @initial_offset: the value passed in to skb_prepare_frag_walk()
312  * @frags: the page fragments inc. room for head page
313  * @pp_recycle: copy of skb->pp_recycle
314  */
315 struct iptfs_skb_frag_walk {
316 	u32 fragi;
317 	u32 past;
318 	u32 total;
319 	u32 nr_frags;
320 	u32 initial_offset;
321 	skb_frag_t frags[MAX_SKB_FRAGS + 1];
322 	bool pp_recycle;
323 };
324 
325 /**
326  * iptfs_skb_prepare_frag_walk() - initialize a frag walk over an skb.
327  * @skb: the skb to walk.
328  * @initial_offset: start the walk @initial_offset into the skb.
329  * @walk: the walk to initialize
330  *
331  * Future calls to skb_add_frags() will expect the @offset value to be at
332  * least @initial_offset large.
333  */
334 static void iptfs_skb_prepare_frag_walk(struct sk_buff *skb, u32 initial_offset,
335 					struct iptfs_skb_frag_walk *walk)
336 {
337 	struct skb_shared_info *shinfo = skb_shinfo(skb);
338 	skb_frag_t *frag, *from;
339 	u32 i;
340 
341 	walk->initial_offset = initial_offset;
342 	walk->fragi = 0;
343 	walk->past = 0;
344 	walk->total = 0;
345 	walk->nr_frags = 0;
346 	walk->pp_recycle = skb->pp_recycle;
347 
348 	if (skb->head_frag) {
349 		if (initial_offset >= skb_headlen(skb)) {
350 			initial_offset -= skb_headlen(skb);
351 		} else {
352 			frag = &walk->frags[walk->nr_frags++];
353 			iptfs_skb_head_to_frag(skb, frag);
354 			frag->offset += initial_offset;
355 			frag->len -= initial_offset;
356 			walk->total += frag->len;
357 			initial_offset = 0;
358 		}
359 	} else {
360 		initial_offset -= skb_headlen(skb);
361 	}
362 
363 	for (i = 0; i < shinfo->nr_frags; i++) {
364 		from = &shinfo->frags[i];
365 		if (initial_offset >= from->len) {
366 			initial_offset -= from->len;
367 			continue;
368 		}
369 		frag = &walk->frags[walk->nr_frags++];
370 		*frag = *from;
371 		if (initial_offset) {
372 			frag->offset += initial_offset;
373 			frag->len -= initial_offset;
374 			initial_offset = 0;
375 		}
376 		walk->total += frag->len;
377 	}
378 }
379 
380 static u32 iptfs_skb_reset_frag_walk(struct iptfs_skb_frag_walk *walk,
381 				     u32 offset)
382 {
383 	/* Adjust offset to refer to internal walk values */
384 	offset -= walk->initial_offset;
385 
386 	/* Get to the correct fragment for offset */
387 	while (offset < walk->past) {
388 		walk->past -= walk->frags[--walk->fragi].len;
389 		if (offset >= walk->past)
390 			break;
391 	}
392 	while (offset >= walk->past + walk->frags[walk->fragi].len)
393 		walk->past += walk->frags[walk->fragi++].len;
394 
395 	/* offset now relative to this current frag */
396 	offset -= walk->past;
397 	return offset;
398 }
399 
400 /**
401  * iptfs_skb_can_add_frags() - check if ok to add frags from walk to skb
402  * @skb: skb to check for adding frags to
403  * @walk: the walk that will be used as source for frags.
404  * @offset: offset from beginning of original skb to start from.
405  * @len: amount of data to add frag references to in @skb.
406  *
407  * Return: true if ok to add frags.
408  */
409 static bool iptfs_skb_can_add_frags(const struct sk_buff *skb,
410 				    struct iptfs_skb_frag_walk *walk,
411 				    u32 offset, u32 len)
412 {
413 	struct skb_shared_info *shinfo = skb_shinfo(skb);
414 	u32 fragi, nr_frags, fraglen;
415 
416 	if (skb_has_frag_list(skb) || skb->pp_recycle != walk->pp_recycle)
417 		return false;
418 
419 	/* Make offset relative to current frag after setting that */
420 	offset = iptfs_skb_reset_frag_walk(walk, offset);
421 
422 	/* Verify we have array space for the fragments we need to add */
423 	fragi = walk->fragi;
424 	nr_frags = shinfo->nr_frags;
425 	while (len && fragi < walk->nr_frags) {
426 		skb_frag_t *frag = &walk->frags[fragi];
427 
428 		fraglen = frag->len;
429 		if (offset) {
430 			fraglen -= offset;
431 			offset = 0;
432 		}
433 		if (++nr_frags > MAX_SKB_FRAGS)
434 			return false;
435 		if (len <= fraglen)
436 			return true;
437 		len -= fraglen;
438 		fragi++;
439 	}
440 	/* We may not copy all @len but what we have will fit. */
441 	return true;
442 }
443 
444 /**
445  * iptfs_skb_add_frags() - add a range of fragment references into an skb
446  * @skb: skb to add references into
447  * @walk: the walk to add referenced fragments from.
448  * @offset: offset from beginning of original skb to start from.
449  * @len: amount of data to add frag references to in @skb.
450  *
451  * iptfs_skb_can_add_frags() should be called before this function to verify
452  * that the destination @skb is compatible with the walk and has space in the
453  * array for the to be added frag references.
454  *
455  * Return: The number of bytes not added to @skb b/c we reached the end of the
456  * walk before adding all of @len.
457  */
458 static int iptfs_skb_add_frags(struct sk_buff *skb,
459 			       struct iptfs_skb_frag_walk *walk, u32 offset,
460 			       u32 len)
461 {
462 	struct skb_shared_info *shinfo = skb_shinfo(skb);
463 	u32 fraglen;
464 
465 	if (!walk->nr_frags || offset >= walk->total + walk->initial_offset)
466 		return len;
467 
468 	/* make offset relative to current frag after setting that */
469 	offset = iptfs_skb_reset_frag_walk(walk, offset);
470 
471 	while (len && walk->fragi < walk->nr_frags) {
472 		skb_frag_t *frag = &walk->frags[walk->fragi];
473 		skb_frag_t *tofrag = &shinfo->frags[shinfo->nr_frags];
474 
475 		*tofrag = *frag;
476 		if (offset) {
477 			tofrag->offset += offset;
478 			tofrag->len -= offset;
479 			offset = 0;
480 		}
481 		__skb_frag_ref(tofrag);
482 		shinfo->nr_frags++;
483 
484 		/* see if we are done */
485 		fraglen = tofrag->len;
486 		if (len < fraglen) {
487 			tofrag->len = len;
488 			skb->len += len;
489 			skb->data_len += len;
490 			return 0;
491 		}
492 		/* advance to next source fragment */
493 		len -= fraglen;			/* careful, use dst bv_len */
494 		skb->len += fraglen;		/* careful, "   "    "     */
495 		skb->data_len += fraglen;	/* careful, "   "    "     */
496 		walk->past += frag->len;	/* careful, use src bv_len */
497 		walk->fragi++;
498 	}
499 	return len;
500 }
501 
502 /* ================================== */
503 /* IPTFS Trace Event Definitions      */
504 /* ================================== */
505 
506 #define CREATE_TRACE_POINTS
507 #include "trace_iptfs.h"
508 
509 /* ================================== */
510 /* IPTFS Receiving (egress) Functions */
511 /* ================================== */
512 
513 /**
514  * iptfs_pskb_add_frags() - Create and add frags into a new sk_buff.
515  * @tpl: template to create new skb from.
516  * @walk: The source for fragments to add.
517  * @off: The offset into @walk to add frags from, also used with @st and
518  *       @copy_len.
519  * @len: The length of data to add covering frags from @walk into @skb.
520  *       This must be <= @skblen.
521  * @st: The sequence state to copy from into the new head skb.
522  * @copy_len: Copy @copy_len bytes from @st at offset @off into the new skb
523  *            linear space.
524  *
525  * Create a new sk_buff `skb` using the template @tpl. Copy @copy_len bytes from
526  * @st into the new skb linear space, and then add shared fragments from the
527  * frag walk for the remaining @len of data (i.e., @len - @copy_len bytes).
528  *
529  * Return: The newly allocated sk_buff `skb` or NULL if an error occurs.
530  */
531 static struct sk_buff *
532 iptfs_pskb_add_frags(struct sk_buff *tpl, struct iptfs_skb_frag_walk *walk,
533 		     u32 off, u32 len, struct skb_seq_state *st, u32 copy_len)
534 {
535 	struct sk_buff *skb;
536 
537 	skb = iptfs_alloc_skb(tpl, copy_len, false);
538 	if (!skb)
539 		return NULL;
540 
541 	/* this should not normally be happening */
542 	if (!iptfs_skb_can_add_frags(skb, walk, off + copy_len,
543 				     len - copy_len)) {
544 		kfree_skb(skb);
545 		return NULL;
546 	}
547 
548 	if (copy_len &&
549 	    skb_copy_seq_read(st, off, skb_put(skb, copy_len), copy_len)) {
550 		XFRM_INC_STATS(dev_net(st->root_skb->dev),
551 			       LINUX_MIB_XFRMINERROR);
552 		kfree_skb(skb);
553 		return NULL;
554 	}
555 
556 	iptfs_skb_add_frags(skb, walk, off + copy_len, len - copy_len);
557 	return skb;
558 }
559 
560 /**
561  * iptfs_pskb_extract_seq() - Create and load data into a new sk_buff.
562  * @skblen: the total data size for `skb`.
563  * @st: The source for the rest of the data to copy into `skb`.
564  * @off: The offset into @st to copy data from.
565  * @len: The length of data to copy from @st into `skb`. This must be <=
566  *       @skblen.
567  *
568  * Create a new sk_buff `skb` with @skblen of packet data space. If non-zero,
569  * copy @rlen bytes of @runt into `skb`. Then using seq functions copy @len
570  * bytes from @st into `skb` starting from @off.
571  *
572  * It is an error for @len to be greater than the amount of data left in @st.
573  *
574  * Return: The newly allocated sk_buff `skb` or NULL if an error occurs.
575  */
576 static struct sk_buff *
577 iptfs_pskb_extract_seq(u32 skblen, struct skb_seq_state *st, u32 off, int len)
578 {
579 	struct sk_buff *skb = iptfs_alloc_skb(st->root_skb, skblen, false);
580 
581 	if (!skb)
582 		return NULL;
583 	if (skb_copy_seq_read(st, off, skb_put(skb, len), len)) {
584 		XFRM_INC_STATS(dev_net(st->root_skb->dev), LINUX_MIB_XFRMINERROR);
585 		kfree_skb(skb);
586 		return NULL;
587 	}
588 	return skb;
589 }
590 
591 /**
592  * iptfs_input_save_runt() - save data in xtfs runt space.
593  * @xtfs: xtfs state
594  * @seq: the current sequence
595  * @buf: packet data
596  * @len: length of packet data
597  *
598  * Save the small (`len`) start of a fragmented packet in `buf` in the xtfs data
599  * runt space.
600  */
601 static void iptfs_input_save_runt(struct xfrm_iptfs_data *xtfs, u64 seq,
602 				  u8 *buf, int len)
603 {
604 	memcpy(xtfs->ra_runt, buf, len);
605 
606 	xtfs->ra_runtlen = len;
607 	xtfs->ra_wantseq = seq + 1;
608 }
609 
610 /**
611  * __iptfs_iphlen() - return the v4/v6 header length using packet data.
612  * @data: pointer at octet with version nibble
613  *
614  * The version data has been checked to be valid (i.e., either 4 or 6).
615  *
616  * Return: the IP header size based on the IP version.
617  */
618 static u32 __iptfs_iphlen(u8 *data)
619 {
620 	struct iphdr *iph = (struct iphdr *)data;
621 
622 	if (iph->version == 0x4)
623 		return sizeof(*iph);
624 	return sizeof(struct ipv6hdr);
625 }
626 
627 /**
628  * __iptfs_iplen() - return the v4/v6 length using packet data.
629  * @data: pointer to ip (v4/v6) packet header
630  *
631  * Grab the IPv4 or IPv6 length value in the start of the inner packet header
632  * pointed to by `data`. Assumes data len is enough for the length field only.
633  *
634  * The version data has been checked to be valid (i.e., either 4 or 6).
635  *
636  * Return: the length value.
637  */
638 static u32 __iptfs_iplen(u8 *data)
639 {
640 	struct iphdr *iph = (struct iphdr *)data;
641 
642 	if (iph->version == 0x4)
643 		return ntohs(iph->tot_len);
644 	return ntohs(((struct ipv6hdr *)iph)->payload_len) +
645 		sizeof(struct ipv6hdr);
646 }
647 
648 /**
649  * iptfs_complete_inner_skb() - finish preparing the inner packet for gro recv.
650  * @x: xfrm state
651  * @skb: the inner packet
652  *
653  * Finish the standard xfrm processing on the inner packet prior to sending back
654  * through gro_cells_receive. We do this separately b/c we are building a list
655  * of packets in the hopes that one day a list will be taken by
656  * xfrm_input.
657  */
658 static void iptfs_complete_inner_skb(struct xfrm_state *x, struct sk_buff *skb)
659 {
660 	skb_reset_network_header(skb);
661 
662 	/* The packet is going back through gro_cells_receive no need to
663 	 * set this.
664 	 */
665 	skb_reset_transport_header(skb);
666 
667 	/* Packet already has checksum value set. */
668 	skb->ip_summed = CHECKSUM_NONE;
669 
670 	/* Our skb will contain the header data copied when this outer packet
671 	 * which contained the start of this inner packet. This is true
672 	 * when we allocate a new skb as well as when we reuse the existing skb.
673 	 */
674 	if (ip_hdr(skb)->version == 0x4) {
675 		struct iphdr *iph = ip_hdr(skb);
676 
677 		if (x->props.flags & XFRM_STATE_DECAP_DSCP)
678 			ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, iph);
679 		if (!(x->props.flags & XFRM_STATE_NOECN))
680 			if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
681 				IP_ECN_set_ce(iph);
682 
683 		skb->protocol = htons(ETH_P_IP);
684 	} else {
685 		struct ipv6hdr *iph = ipv6_hdr(skb);
686 
687 		if (x->props.flags & XFRM_STATE_DECAP_DSCP)
688 			ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, iph);
689 		if (!(x->props.flags & XFRM_STATE_NOECN))
690 			if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
691 				IP6_ECN_set_ce(skb, iph);
692 
693 		skb->protocol = htons(ETH_P_IPV6);
694 	}
695 }
696 
697 static void __iptfs_reassem_done(struct xfrm_iptfs_data *xtfs, bool free)
698 {
699 	assert_spin_locked(&xtfs->drop_lock);
700 
701 	/* We don't care if it works locking takes care of things */
702 	hrtimer_try_to_cancel(&xtfs->drop_timer);
703 	if (free)
704 		kfree_skb(xtfs->ra_newskb);
705 	xtfs->ra_newskb = NULL;
706 }
707 
708 /**
709  * iptfs_reassem_abort() - In-progress packet is aborted free the state.
710  * @xtfs: xtfs state
711  */
712 static void iptfs_reassem_abort(struct xfrm_iptfs_data *xtfs)
713 {
714 	__iptfs_reassem_done(xtfs, true);
715 }
716 
717 /**
718  * iptfs_reassem_done() - In-progress packet is complete, clear the state.
719  * @xtfs: xtfs state
720  */
721 static void iptfs_reassem_done(struct xfrm_iptfs_data *xtfs)
722 {
723 	__iptfs_reassem_done(xtfs, false);
724 }
725 
726 /**
727  * iptfs_reassem_cont() - Continue the reassembly of an inner packets.
728  * @xtfs: xtfs state
729  * @seq: sequence of current packet
730  * @st: seq read stat for current packet
731  * @skb: current packet
732  * @data: offset into sequential packet data
733  * @blkoff: packet blkoff value
734  * @list: list of skbs to enqueue completed packet on
735  *
736  * Process an IPTFS payload that has a non-zero `blkoff` or when we are
737  * expecting the continuation b/c we have a runt or in-progress packet.
738  *
739  * Return: the new data offset to continue processing from.
740  */
741 static u32 iptfs_reassem_cont(struct xfrm_iptfs_data *xtfs, u64 seq,
742 			      struct skb_seq_state *st, struct sk_buff *skb,
743 			      u32 data, u32 blkoff, struct list_head *list)
744 {
745 	struct iptfs_skb_frag_walk _fragwalk;
746 	struct iptfs_skb_frag_walk *fragwalk = NULL;
747 	struct sk_buff *newskb = xtfs->ra_newskb;
748 	u32 remaining = skb->len - data;
749 	u32 runtlen = xtfs->ra_runtlen;
750 	u32 copylen, fraglen, ipremain, iphlen, iphremain, rrem;
751 
752 	/* Handle packet fragment we aren't expecting */
753 	if (!runtlen && !xtfs->ra_newskb)
754 		return data + min(blkoff, remaining);
755 
756 	/* Important to remember that input to this function is an ordered
757 	 * packet stream (unless the user disabled the reorder window). Thus if
758 	 * we are waiting for, and expecting the next packet so we can continue
759 	 * assembly, a newer sequence number indicates older ones are not coming
760 	 * (or if they do should be ignored). Technically we can receive older
761 	 * ones when the reorder window is disabled; however, the user should
762 	 * have disabled fragmentation in this case, and regardless we don't
763 	 * deal with it.
764 	 *
765 	 * blkoff could be zero if the stream is messed up (or it's an all pad
766 	 * insertion) be careful to handle that case in each of the below
767 	 */
768 
769 	/* Too old case: This can happen when the reorder window is disabled so
770 	 * ordering isn't actually guaranteed.
771 	 */
772 	if (seq < xtfs->ra_wantseq)
773 		return data + remaining;
774 
775 	/* Too new case: We missed what we wanted cleanup. */
776 	if (seq > xtfs->ra_wantseq) {
777 		XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMINIPTFSERROR);
778 		goto abandon;
779 	}
780 
781 	if (blkoff == 0) {
782 		if ((*skb->data & 0xF0) != 0) {
783 			XFRM_INC_STATS(xs_net(xtfs->x),
784 				       LINUX_MIB_XFRMINIPTFSERROR);
785 			goto abandon;
786 		}
787 		/* Handle all pad case, advance expected sequence number.
788 		 * (RFC 9347 S2.2.3)
789 		 */
790 		xtfs->ra_wantseq++;
791 		/* will end parsing */
792 		return data + remaining;
793 	}
794 
795 	if (runtlen) {
796 		/* Regardless of what happens we're done with the runt */
797 		xtfs->ra_runtlen = 0;
798 
799 		/* The start of this inner packet was at the very end of the last
800 		 * iptfs payload which didn't include enough for the ip header
801 		 * length field. We must have *at least* that now.
802 		 */
803 		rrem = sizeof(xtfs->ra_runt) - runtlen;
804 		if (remaining < rrem || blkoff < rrem) {
805 			XFRM_INC_STATS(xs_net(xtfs->x),
806 				       LINUX_MIB_XFRMINIPTFSERROR);
807 			goto abandon;
808 		}
809 
810 		/* fill in the runt data */
811 		if (skb_copy_seq_read(st, data, &xtfs->ra_runt[runtlen],
812 				      rrem)) {
813 			XFRM_INC_STATS(xs_net(xtfs->x),
814 				       LINUX_MIB_XFRMINBUFFERERROR);
815 			goto abandon;
816 		}
817 
818 		/* We have enough data to get the ip length value now,
819 		 * allocate an in progress skb
820 		 */
821 		ipremain = __iptfs_iplen(xtfs->ra_runt);
822 		if (ipremain < sizeof(xtfs->ra_runt)) {
823 			/* length has to be at least runtsize large */
824 			XFRM_INC_STATS(xs_net(xtfs->x),
825 				       LINUX_MIB_XFRMINIPTFSERROR);
826 			goto abandon;
827 		}
828 
829 		/* For the runt case we don't attempt sharing currently. NOTE:
830 		 * Currently, this IPTFS implementation will not create runts.
831 		 */
832 
833 		newskb = iptfs_alloc_skb(skb, ipremain, false);
834 		if (!newskb) {
835 			XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMINERROR);
836 			goto abandon;
837 		}
838 		xtfs->ra_newskb = newskb;
839 
840 		/* Copy the runt data into the buffer, but leave data
841 		 * pointers the same as normal non-runt case. The extra `rrem`
842 		 * recopied bytes are basically cacheline free. Allows using
843 		 * same logic below to complete.
844 		 */
845 		memcpy(skb_put(newskb, runtlen), xtfs->ra_runt,
846 		       sizeof(xtfs->ra_runt));
847 	}
848 
849 	/* Continue reassembling the packet */
850 	ipremain = __iptfs_iplen(newskb->data);
851 	iphlen = __iptfs_iphlen(newskb->data);
852 
853 	ipremain -= newskb->len;
854 	if (blkoff < ipremain) {
855 		/* Corrupt data, we don't have enough to complete the packet */
856 		XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMINIPTFSERROR);
857 		goto abandon;
858 	}
859 
860 	/* We want the IP header in linear space */
861 	if (newskb->len < iphlen) {
862 		iphremain = iphlen - newskb->len;
863 		if (blkoff < iphremain) {
864 			XFRM_INC_STATS(xs_net(xtfs->x),
865 				       LINUX_MIB_XFRMINIPTFSERROR);
866 			goto abandon;
867 		}
868 		fraglen = min(blkoff, remaining);
869 		copylen = min(fraglen, iphremain);
870 		if (skb_copy_seq_read(st, data, skb_put(newskb, copylen),
871 				      copylen)) {
872 			XFRM_INC_STATS(xs_net(xtfs->x),
873 				       LINUX_MIB_XFRMINBUFFERERROR);
874 			goto abandon;
875 		}
876 		/* this is a silly condition that might occur anyway */
877 		if (copylen < iphremain) {
878 			xtfs->ra_wantseq++;
879 			return data + fraglen;
880 		}
881 		/* update data and things derived from it */
882 		data += copylen;
883 		blkoff -= copylen;
884 		remaining -= copylen;
885 		ipremain -= copylen;
886 	}
887 
888 	fraglen = min(blkoff, remaining);
889 	copylen = min(fraglen, ipremain);
890 
891 	/* If we may have the opportunity to share prepare a fragwalk. */
892 	if (!skb_has_frag_list(skb) && !skb_has_frag_list(newskb) &&
893 	    (skb->head_frag || skb->len == skb->data_len) &&
894 	    skb->pp_recycle == newskb->pp_recycle) {
895 		fragwalk = &_fragwalk;
896 		iptfs_skb_prepare_frag_walk(skb, data, fragwalk);
897 	}
898 
899 	/* Try share then copy. */
900 	if (fragwalk &&
901 	    iptfs_skb_can_add_frags(newskb, fragwalk, data, copylen)) {
902 		iptfs_skb_add_frags(newskb, fragwalk, data, copylen);
903 	} else {
904 		if (skb_linearize(newskb)) {
905 			XFRM_INC_STATS(xs_net(xtfs->x),
906 				       LINUX_MIB_XFRMINBUFFERERROR);
907 			goto abandon;
908 		}
909 
910 		/* copy fragment data into newskb */
911 		if (skb_copy_seq_read(st, data, skb_put(newskb, copylen),
912 				      copylen)) {
913 			XFRM_INC_STATS(xs_net(xtfs->x),
914 				       LINUX_MIB_XFRMINBUFFERERROR);
915 			goto abandon;
916 		}
917 	}
918 
919 	if (copylen < ipremain) {
920 		xtfs->ra_wantseq++;
921 	} else {
922 		/* We are done with packet reassembly! */
923 		iptfs_reassem_done(xtfs);
924 		iptfs_complete_inner_skb(xtfs->x, newskb);
925 		list_add_tail(&newskb->list, list);
926 	}
927 
928 	/* will continue on to new data block or end */
929 	return data + fraglen;
930 
931 abandon:
932 	if (xtfs->ra_newskb) {
933 		iptfs_reassem_abort(xtfs);
934 	} else {
935 		xtfs->ra_runtlen = 0;
936 		xtfs->ra_wantseq = 0;
937 	}
938 	/* skip past fragment, maybe to end */
939 	return data + min(blkoff, remaining);
940 }
941 
942 static bool __input_process_payload(struct xfrm_state *x, u32 data,
943 				    struct skb_seq_state *skbseq,
944 				    struct list_head *sublist)
945 {
946 	u8 hbytes[sizeof(struct ipv6hdr)];
947 	struct iptfs_skb_frag_walk _fragwalk;
948 	struct iptfs_skb_frag_walk *fragwalk = NULL;
949 	struct sk_buff *defer, *first_skb, *next, *skb;
950 	const unsigned char *old_mac;
951 	struct xfrm_iptfs_data *xtfs;
952 	struct iphdr *iph;
953 	struct net *net;
954 	u32 first_iplen, iphlen, iplen, remaining, tail;
955 	u32 capturelen;
956 	u64 seq;
957 
958 	xtfs = x->mode_data;
959 	net = xs_net(x);
960 	skb = skbseq->root_skb;
961 	first_skb = NULL;
962 	defer = NULL;
963 
964 	seq = __esp_seq(skb);
965 
966 	/* Save the old mac header if set */
967 	old_mac = skb_mac_header_was_set(skb) ? skb_mac_header(skb) : NULL;
968 
969 	/* New packets */
970 
971 	tail = skb->len;
972 	while (data < tail) {
973 		__be16 protocol = 0;
974 
975 		/* Gather information on the next data block.
976 		 * `data` points to the start of the data block.
977 		 */
978 		remaining = tail - data;
979 
980 		/* try and copy enough bytes to read length from ipv4/ipv6 */
981 		iphlen = min_t(u32, remaining, 6);
982 		if (skb_copy_seq_read(skbseq, data, hbytes, iphlen)) {
983 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
984 			goto done;
985 		}
986 
987 		iph = (struct iphdr *)hbytes;
988 		if (iph->version == 0x4) {
989 			/* must have at least tot_len field present */
990 			if (remaining < 4) {
991 				/* save the bytes we have, advance data and exit */
992 				iptfs_input_save_runt(xtfs, seq, hbytes,
993 						      remaining);
994 				data += remaining;
995 				break;
996 			}
997 
998 			iplen = be16_to_cpu(iph->tot_len);
999 			iphlen = iph->ihl << 2;
1000 			if (iplen < iphlen || iphlen < sizeof(*iph)) {
1001 				XFRM_INC_STATS(net,
1002 					       LINUX_MIB_XFRMINHDRERROR);
1003 				goto done;
1004 			}
1005 			protocol = cpu_to_be16(ETH_P_IP);
1006 			XFRM_MODE_SKB_CB(skbseq->root_skb)->tos = iph->tos;
1007 		} else if (iph->version == 0x6) {
1008 			/* must have at least payload_len field present */
1009 			if (remaining < 6) {
1010 				/* save the bytes we have, advance data and exit */
1011 				iptfs_input_save_runt(xtfs, seq, hbytes,
1012 						      remaining);
1013 				data += remaining;
1014 				break;
1015 			}
1016 
1017 			iplen = be16_to_cpu(((struct ipv6hdr *)hbytes)->payload_len);
1018 			iplen += sizeof(struct ipv6hdr);
1019 			iphlen = sizeof(struct ipv6hdr);
1020 			protocol = cpu_to_be16(ETH_P_IPV6);
1021 			XFRM_MODE_SKB_CB(skbseq->root_skb)->tos =
1022 				ipv6_get_dsfield((struct ipv6hdr *)iph);
1023 		} else if (iph->version == 0x0) {
1024 			/* pad */
1025 			data = tail;
1026 			break;
1027 		} else {
1028 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
1029 			goto done;
1030 		}
1031 
1032 		if (unlikely(skbseq->stepped_offset)) {
1033 			/* We need to reset our seq read, it can't backup at
1034 			 * this point.
1035 			 */
1036 			struct sk_buff *save = skbseq->root_skb;
1037 
1038 			skb_abort_seq_read(skbseq);
1039 			skb_prepare_seq_read(save, data, tail, skbseq);
1040 		}
1041 
1042 		if (first_skb) {
1043 			skb = NULL;
1044 		} else {
1045 			first_skb = skb;
1046 			first_iplen = iplen;
1047 			fragwalk = NULL;
1048 
1049 			/* We are going to skip over `data` bytes to reach the
1050 			 * start of the IP header of `iphlen` len for `iplen`
1051 			 * inner packet.
1052 			 */
1053 
1054 			if (skb_has_frag_list(skb)) {
1055 				defer = skb;
1056 				skb = NULL;
1057 			} else if (data + iphlen <= skb_headlen(skb) &&
1058 				   /* make sure our header is 32-bit aligned? */
1059 				   /* ((uintptr_t)(skb->data + data) & 0x3) == 0 && */
1060 				   skb_tailroom(skb) + tail - data >= iplen) {
1061 				/* Reuse the received skb.
1062 				 *
1063 				 * We have enough headlen to pull past any
1064 				 * initial fragment data, leaving at least the
1065 				 * IP header in the linear buffer space.
1066 				 *
1067 				 * For linear buffer space we only require that
1068 				 * linear buffer space is large enough to
1069 				 * eventually hold the entire reassembled
1070 				 * packet (by including tailroom in the check).
1071 				 *
1072 				 * For non-linear tailroom is 0 and so we only
1073 				 * re-use if the entire packet is present
1074 				 * already.
1075 				 *
1076 				 * NOTE: there are many more options for
1077 				 * sharing, KISS for now. Also, this can produce
1078 				 * skb's with the IP header unaligned to 32
1079 				 * bits. If that ends up being a problem then a
1080 				 * check should be added to the conditional
1081 				 * above that the header lies on a 32-bit
1082 				 * boundary as well.
1083 				 */
1084 				skb_pull(skb, data);
1085 
1086 				/* our range just changed */
1087 				data = 0;
1088 				tail = skb->len;
1089 				remaining = skb->len;
1090 
1091 				skb->protocol = protocol;
1092 				skb_mac_header_rebuild(skb);
1093 				if (skb->mac_len)
1094 					eth_hdr(skb)->h_proto = skb->protocol;
1095 
1096 				/* all pointers could be changed now reset walk */
1097 				skb_abort_seq_read(skbseq);
1098 				skb_prepare_seq_read(skb, data, tail, skbseq);
1099 			} else if (skb->head_frag &&
1100 				   /* We have the IP header right now */
1101 				   remaining >= iphlen) {
1102 				fragwalk = &_fragwalk;
1103 				iptfs_skb_prepare_frag_walk(skb, data, fragwalk);
1104 				defer = skb;
1105 				skb = NULL;
1106 			} else {
1107 				/* We couldn't reuse the input skb so allocate a
1108 				 * new one.
1109 				 */
1110 				defer = skb;
1111 				skb = NULL;
1112 			}
1113 
1114 			/* Don't trim `first_skb` until the end as we are
1115 			 * walking that data now.
1116 			 */
1117 		}
1118 
1119 		capturelen = min(iplen, remaining);
1120 		if (!skb) {
1121 			if (!fragwalk ||
1122 			    /* Large enough to be worth sharing */
1123 			    iplen < IPTFS_PKT_SHARE_MIN ||
1124 			    /* Have IP header + some data to share. */
1125 			    capturelen <= iphlen ||
1126 			    /* Try creating skb and adding frags */
1127 			    !(skb = iptfs_pskb_add_frags(first_skb, fragwalk,
1128 							 data, capturelen,
1129 							 skbseq, iphlen))) {
1130 				skb = iptfs_pskb_extract_seq(iplen, skbseq, data, capturelen);
1131 			}
1132 			if (!skb) {
1133 				/* skip to next packet or done */
1134 				data += capturelen;
1135 				continue;
1136 			}
1137 
1138 			skb->protocol = protocol;
1139 			if (old_mac) {
1140 				/* rebuild the mac header */
1141 				skb_set_mac_header(skb, -first_skb->mac_len);
1142 				memcpy(skb_mac_header(skb), old_mac, first_skb->mac_len);
1143 				eth_hdr(skb)->h_proto = skb->protocol;
1144 			}
1145 		}
1146 
1147 		data += capturelen;
1148 
1149 		if (skb->len < iplen) {
1150 			/* Start reassembly */
1151 			spin_lock(&xtfs->drop_lock);
1152 
1153 			xtfs->ra_newskb = skb;
1154 			xtfs->ra_wantseq = seq + 1;
1155 			if (!hrtimer_is_queued(&xtfs->drop_timer)) {
1156 				/* softirq blocked lest the timer fire and interrupt us */
1157 				hrtimer_start(&xtfs->drop_timer,
1158 					      xtfs->drop_time_ns,
1159 					      IPTFS_HRTIMER_MODE);
1160 			}
1161 
1162 			spin_unlock(&xtfs->drop_lock);
1163 
1164 			break;
1165 		}
1166 
1167 		iptfs_complete_inner_skb(x, skb);
1168 		list_add_tail(&skb->list, sublist);
1169 	}
1170 
1171 	if (data != tail)
1172 		/* this should not happen from the above code */
1173 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINIPTFSERROR);
1174 
1175 	if (first_skb && first_iplen && !defer && first_skb != xtfs->ra_newskb) {
1176 		/* first_skb is queued b/c !defer and not partial */
1177 		if (pskb_trim(first_skb, first_iplen)) {
1178 			/* error trimming */
1179 			list_del(&first_skb->list);
1180 			defer = first_skb;
1181 		}
1182 		first_skb->ip_summed = CHECKSUM_NONE;
1183 	}
1184 
1185 	/* Send the packets! */
1186 	list_for_each_entry_safe(skb, next, sublist, list) {
1187 		skb_list_del_init(skb);
1188 		if (xfrm_input(skb, 0, 0, -2))
1189 			kfree_skb(skb);
1190 	}
1191 done:
1192 	skb = skbseq->root_skb;
1193 	skb_abort_seq_read(skbseq);
1194 
1195 	if (defer) {
1196 		consume_skb(defer);
1197 	} else if (!first_skb) {
1198 		/* skb is the original passed in skb, but we didn't get far
1199 		 * enough to process it as the first_skb, if we had it would
1200 		 * either be save in ra_newskb, trimmed and sent on as an skb or
1201 		 * placed in defer to be freed.
1202 		 */
1203 		kfree_skb(skb);
1204 	}
1205 	return true;
1206 }
1207 
1208 /**
1209  * iptfs_input_ordered() - handle next in order IPTFS payload.
1210  * @x: xfrm state
1211  * @skb: current packet
1212  *
1213  * Process the IPTFS payload in `skb` and consume it afterwards.
1214  */
1215 static void iptfs_input_ordered(struct xfrm_state *x, struct sk_buff *skb)
1216 {
1217 	struct ip_iptfs_cc_hdr iptcch;
1218 	struct skb_seq_state skbseq;
1219 	struct list_head sublist; /* rename this it's just a list */
1220 	struct xfrm_iptfs_data *xtfs;
1221 	struct ip_iptfs_hdr *ipth;
1222 	struct net *net;
1223 	u32 blkoff, data, remaining;
1224 	bool consumed = false;
1225 	u64 seq;
1226 
1227 	xtfs = x->mode_data;
1228 	net = xs_net(x);
1229 
1230 	seq = __esp_seq(skb);
1231 
1232 	/* Large enough to hold both types of header */
1233 	ipth = (struct ip_iptfs_hdr *)&iptcch;
1234 
1235 	skb_prepare_seq_read(skb, 0, skb->len, &skbseq);
1236 
1237 	/* Get the IPTFS header and validate it */
1238 
1239 	if (skb_copy_seq_read(&skbseq, 0, ipth, sizeof(*ipth))) {
1240 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
1241 		goto done;
1242 	}
1243 	data = sizeof(*ipth);
1244 
1245 	trace_iptfs_egress_recv(skb, xtfs, be16_to_cpu(ipth->block_offset));
1246 
1247 	/* Set data past the basic header */
1248 	if (ipth->subtype == IPTFS_SUBTYPE_CC) {
1249 		/* Copy the rest of the CC header */
1250 		remaining = sizeof(iptcch) - sizeof(*ipth);
1251 		if (skb_copy_seq_read(&skbseq, data, ipth + 1, remaining)) {
1252 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
1253 			goto done;
1254 		}
1255 		data += remaining;
1256 	} else if (ipth->subtype != IPTFS_SUBTYPE_BASIC) {
1257 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
1258 		goto done;
1259 	}
1260 
1261 	if (ipth->flags != 0) {
1262 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
1263 		goto done;
1264 	}
1265 
1266 	INIT_LIST_HEAD(&sublist);
1267 
1268 	/* Handle fragment at start of payload, and/or waiting reassembly. */
1269 
1270 	blkoff = ntohs(ipth->block_offset);
1271 	/* check before locking i.e., maybe */
1272 	if (blkoff || xtfs->ra_runtlen || xtfs->ra_newskb) {
1273 		spin_lock(&xtfs->drop_lock);
1274 
1275 		/* check again after lock */
1276 		if (blkoff || xtfs->ra_runtlen || xtfs->ra_newskb) {
1277 			data = iptfs_reassem_cont(xtfs, seq, &skbseq, skb, data,
1278 						  blkoff, &sublist);
1279 		}
1280 
1281 		spin_unlock(&xtfs->drop_lock);
1282 	}
1283 
1284 	/* New packets */
1285 	consumed = __input_process_payload(x, data, &skbseq, &sublist);
1286 done:
1287 	if (!consumed) {
1288 		skb = skbseq.root_skb;
1289 		skb_abort_seq_read(&skbseq);
1290 		kfree_skb(skb);
1291 	}
1292 }
1293 
1294 /* ------------------------------- */
1295 /* Input (Egress) Re-ordering Code */
1296 /* ------------------------------- */
1297 
1298 static void __vec_shift(struct xfrm_iptfs_data *xtfs, u32 shift)
1299 {
1300 	u32 savedlen = xtfs->w_savedlen;
1301 
1302 	if (shift > savedlen)
1303 		shift = savedlen;
1304 	if (shift != savedlen)
1305 		memcpy(xtfs->w_saved, xtfs->w_saved + shift,
1306 		       (savedlen - shift) * sizeof(*xtfs->w_saved));
1307 	memset(xtfs->w_saved + savedlen - shift, 0,
1308 	       shift * sizeof(*xtfs->w_saved));
1309 	xtfs->w_savedlen -= shift;
1310 }
1311 
1312 static void __reorder_past(struct xfrm_iptfs_data *xtfs, struct sk_buff *inskb,
1313 			   struct list_head *freelist)
1314 {
1315 	list_add_tail(&inskb->list, freelist);
1316 }
1317 
1318 static u32 __reorder_drop(struct xfrm_iptfs_data *xtfs, struct list_head *list)
1319 
1320 {
1321 	struct skb_wseq *s, *se;
1322 	const u32 savedlen = xtfs->w_savedlen;
1323 	time64_t now = ktime_get_raw_fast_ns();
1324 	u32 count = 0;
1325 	u32 scount = 0;
1326 
1327 	if (xtfs->w_saved[0].drop_time > now)
1328 		goto set_timer;
1329 
1330 	++xtfs->w_wantseq;
1331 
1332 	/* Keep flushing packets until we reach a drop time greater than now. */
1333 	s = xtfs->w_saved;
1334 	se = s + savedlen;
1335 	do {
1336 		/* Walking past empty slots until we reach a packet */
1337 		for (; s < se && !s->skb; s++) {
1338 			if (s->drop_time > now)
1339 				goto outerdone;
1340 		}
1341 		/* Sending packets until we hit another empty slot. */
1342 		for (; s < se && s->skb; scount++, s++)
1343 			list_add_tail(&s->skb->list, list);
1344 	} while (s < se);
1345 outerdone:
1346 
1347 	count = s - xtfs->w_saved;
1348 	if (count) {
1349 		xtfs->w_wantseq += count;
1350 
1351 		/* Shift handled slots plus final empty slot into slot 0. */
1352 		__vec_shift(xtfs, count);
1353 	}
1354 
1355 	if (xtfs->w_savedlen) {
1356 set_timer:
1357 		/* Drifting is OK */
1358 		hrtimer_start(&xtfs->drop_timer,
1359 			      xtfs->w_saved[0].drop_time - now,
1360 			      IPTFS_HRTIMER_MODE);
1361 	}
1362 	return scount;
1363 }
1364 
1365 static void __reorder_this(struct xfrm_iptfs_data *xtfs, struct sk_buff *inskb,
1366 			   struct list_head *list)
1367 {
1368 	struct skb_wseq *s, *se;
1369 	const u32 savedlen = xtfs->w_savedlen;
1370 	u32 count = 0;
1371 
1372 	/* Got what we wanted. */
1373 	list_add_tail(&inskb->list, list);
1374 	++xtfs->w_wantseq;
1375 	if (!savedlen)
1376 		return;
1377 
1378 	/* Flush remaining consecutive packets. */
1379 
1380 	/* Keep sending until we hit another missed pkt. */
1381 	for (s = xtfs->w_saved, se = s + savedlen; s < se && s->skb; s++)
1382 		list_add_tail(&s->skb->list, list);
1383 	count = s - xtfs->w_saved;
1384 	if (count)
1385 		xtfs->w_wantseq += count;
1386 
1387 	/* Shift handled slots plus final empty slot into slot 0. */
1388 	__vec_shift(xtfs, count + 1);
1389 }
1390 
1391 /* Set the slot's drop time and all the empty slots below it until reaching a
1392  * filled slot which will already be set.
1393  */
1394 static void iptfs_set_window_drop_times(struct xfrm_iptfs_data *xtfs, int index)
1395 {
1396 	const u32 savedlen = xtfs->w_savedlen;
1397 	struct skb_wseq *s = xtfs->w_saved;
1398 	time64_t drop_time;
1399 
1400 	assert_spin_locked(&xtfs->drop_lock);
1401 
1402 	if (savedlen > index + 1) {
1403 		/* we are below another, our drop time and the timer are already set */
1404 		return;
1405 	}
1406 	/* we are the most future so get a new drop time. */
1407 	drop_time = ktime_get_raw_fast_ns();
1408 	drop_time += xtfs->drop_time_ns;
1409 
1410 	/* Walk back through the array setting drop times as we go */
1411 	s[index].drop_time = drop_time;
1412 	while (index-- > 0 && !s[index].skb)
1413 		s[index].drop_time = drop_time;
1414 
1415 	/* If we walked all the way back, schedule the drop timer if needed */
1416 	if (index == -1 && !hrtimer_is_queued(&xtfs->drop_timer))
1417 		hrtimer_start(&xtfs->drop_timer, xtfs->drop_time_ns,
1418 			      IPTFS_HRTIMER_MODE);
1419 }
1420 
1421 static void __reorder_future_fits(struct xfrm_iptfs_data *xtfs,
1422 				  struct sk_buff *inskb,
1423 				  struct list_head *freelist)
1424 {
1425 	const u64 inseq = __esp_seq(inskb);
1426 	const u64 wantseq = xtfs->w_wantseq;
1427 	const u64 distance = inseq - wantseq;
1428 	const u32 savedlen = xtfs->w_savedlen;
1429 	const u32 index = distance - 1;
1430 
1431 	/* Handle future sequence number received which fits in the window.
1432 	 *
1433 	 * We know we don't have the seq we want so we won't be able to flush
1434 	 * anything.
1435 	 */
1436 
1437 	/* slot count is 4, saved size is 3 savedlen is 2
1438 	 *
1439 	 * "window boundary" is based on the fixed window size
1440 	 * distance is also slot number
1441 	 * index is an array index (i.e., - 1 of slot)
1442 	 * : : - implicit NULL after array len
1443 	 *
1444 	 *          +--------- used length (savedlen == 2)
1445 	 *          |   +----- array size (nslots - 1 == 3)
1446 	 *          |   |   + window boundary (nslots == 4)
1447 	 *          V   V | V
1448 	 *                |
1449 	 *  0   1   2   3 |   slot number
1450 	 * ---  0   1   2 |   array index
1451 	 *     [-] [b] : :|   array
1452 	 *
1453 	 * "2" "3" "4" *5*|   seq numbers
1454 	 *
1455 	 * We receive seq number 5
1456 	 * distance == 3 [inseq(5) - w_wantseq(2)]
1457 	 * index == 2 [distance(6) - 1]
1458 	 */
1459 
1460 	if (xtfs->w_saved[index].skb) {
1461 		/* a dup of a future */
1462 		list_add_tail(&inskb->list, freelist);
1463 		return;
1464 	}
1465 
1466 	xtfs->w_saved[index].skb = inskb;
1467 	xtfs->w_savedlen = max(savedlen, index + 1);
1468 	iptfs_set_window_drop_times(xtfs, index);
1469 }
1470 
1471 static void __reorder_future_shifts(struct xfrm_iptfs_data *xtfs,
1472 				    struct sk_buff *inskb,
1473 				    struct list_head *list)
1474 {
1475 	const u32 nslots = xtfs->cfg.reorder_win_size + 1;
1476 	const u64 inseq = __esp_seq(inskb);
1477 	u32 savedlen = xtfs->w_savedlen;
1478 	u64 wantseq = xtfs->w_wantseq;
1479 	struct skb_wseq *wnext;
1480 	struct sk_buff *slot0;
1481 	u32 beyond, shifting, slot;
1482 	u64 distance;
1483 
1484 	/* Handle future sequence number received.
1485 	 *
1486 	 * IMPORTANT: we are at least advancing w_wantseq (i.e., wantseq) by 1
1487 	 * b/c we are beyond the window boundary.
1488 	 *
1489 	 * We know we don't have the wantseq so that counts as a drop.
1490 	 */
1491 
1492 	/* example: slot count is 4, array size is 3 savedlen is 2, slot 0 is
1493 	 * the missing sequence number.
1494 	 *
1495 	 * the final slot at savedlen (index savedlen - 1) is always occupied.
1496 	 *
1497 	 * beyond is "beyond array size" not savedlen.
1498 	 *
1499 	 *          +--------- array length (savedlen == 2)
1500 	 *          |   +----- array size (nslots - 1 == 3)
1501 	 *          |   | +- window boundary (nslots == 4)
1502 	 *          V   V |
1503 	 *                |
1504 	 *  0   1   2   3 |   slot number
1505 	 * ---  0   1   2 |   array index
1506 	 *     [b] [c] : :|   array
1507 	 *                |
1508 	 * "2" "3" "4" "5"|*6*  seq numbers
1509 	 *
1510 	 * We receive seq number 6
1511 	 * distance == 4 [inseq(6) - w_wantseq(2)]
1512 	 * newslot == distance
1513 	 * index == 3 [distance(4) - 1]
1514 	 * beyond == 1 [newslot(4) - lastslot((nslots(4) - 1))]
1515 	 * shifting == 1 [min(savedlen(2), beyond(1)]
1516 	 * slot0_skb == [b], and should match w_wantseq
1517 	 *
1518 	 *                +--- window boundary (nslots == 4)
1519 	 *  0   1   2   3 | 4   slot number
1520 	 * ---  0   1   2 | 3   array index
1521 	 *     [b] : : : :|     array
1522 	 * "2" "3" "4" "5" *6*  seq numbers
1523 	 *
1524 	 * We receive seq number 6
1525 	 * distance == 4 [inseq(6) - w_wantseq(2)]
1526 	 * newslot == distance
1527 	 * index == 3 [distance(4) - 1]
1528 	 * beyond == 1 [newslot(4) - lastslot((nslots(4) - 1))]
1529 	 * shifting == 1 [min(savedlen(1), beyond(1)]
1530 	 * slot0_skb == [b] and should match w_wantseq
1531 	 *
1532 	 *                +-- window boundary (nslots == 4)
1533 	 *  0   1   2   3 | 4   5   6   slot number
1534 	 * ---  0   1   2 | 3   4   5   array index
1535 	 *     [-] [c] : :|             array
1536 	 * "2" "3" "4" "5" "6" "7" *8*  seq numbers
1537 	 *
1538 	 * savedlen = 2, beyond = 3
1539 	 * iter 1: slot0 == NULL, missed++, lastdrop = 2 (2+1-1), slot0 = [-]
1540 	 * iter 2: slot0 == NULL, missed++, lastdrop = 3 (2+2-1), slot0 = [c]
1541 	 * 2 < 3, extra = 1 (3-2), missed += extra, lastdrop = 4 (2+2+1-1)
1542 	 *
1543 	 * We receive seq number 8
1544 	 * distance == 6 [inseq(8) - w_wantseq(2)]
1545 	 * newslot == distance
1546 	 * index == 5 [distance(6) - 1]
1547 	 * beyond == 3 [newslot(6) - lastslot((nslots(4) - 1))]
1548 	 * shifting == 2 [min(savedlen(2), beyond(3)]
1549 	 *
1550 	 * slot0_skb == NULL changed from [b] when "savedlen < beyond" is true.
1551 	 */
1552 
1553 	/* Now send any packets that are being shifted out of saved, and account
1554 	 * for missing packets that are exiting the window as we shift it.
1555 	 */
1556 
1557 	distance = inseq - wantseq;
1558 	beyond = distance - (nslots - 1);
1559 
1560 	/* If savedlen > beyond we are shifting some, else all. */
1561 	shifting = min(savedlen, beyond);
1562 
1563 	/* slot0 is the buf that just shifted out and into slot0 */
1564 	slot0 = NULL;
1565 	wnext = xtfs->w_saved;
1566 	for (slot = 1; slot <= shifting; slot++, wnext++) {
1567 		/* handle what was in slot0 before we occupy it */
1568 		if (slot0)
1569 			list_add_tail(&slot0->list, list);
1570 		slot0 = wnext->skb;
1571 		wnext->skb = NULL;
1572 	}
1573 
1574 	/* slot0 is now either NULL (in which case it's what we now are waiting
1575 	 * for, or a buf in which case we need to handle it like we received it;
1576 	 * however, we may be advancing past that buffer as well..
1577 	 */
1578 
1579 	/* Handle case where we need to shift more than we had saved, slot0 will
1580 	 * be NULL iff savedlen is 0, otherwise slot0 will always be
1581 	 * non-NULL b/c we shifted the final element, which is always set if
1582 	 * there is any saved, into slot0.
1583 	 */
1584 	if (savedlen < beyond) {
1585 		if (savedlen != 0)
1586 			list_add_tail(&slot0->list, list);
1587 		slot0 = NULL;
1588 		/* slot0 has had an empty slot pushed into it */
1589 	}
1590 
1591 	/* Remove the entries */
1592 	__vec_shift(xtfs, beyond);
1593 
1594 	/* Advance want seq */
1595 	xtfs->w_wantseq += beyond;
1596 
1597 	/* Process drops here when implementing congestion control */
1598 
1599 	/* We've shifted. plug the packet in at the end. */
1600 	xtfs->w_savedlen = nslots - 1;
1601 	xtfs->w_saved[xtfs->w_savedlen - 1].skb = inskb;
1602 	iptfs_set_window_drop_times(xtfs, xtfs->w_savedlen - 1);
1603 
1604 	/* if we don't have a slot0 then we must wait for it */
1605 	if (!slot0)
1606 		return;
1607 
1608 	/* If slot0, seq must match new want seq */
1609 
1610 	/* slot0 is valid, treat like we received expected. */
1611 	__reorder_this(xtfs, slot0, list);
1612 }
1613 
1614 /* Receive a new packet into the reorder window. Return a list of ordered
1615  * packets from the window.
1616  */
1617 static void iptfs_input_reorder(struct xfrm_iptfs_data *xtfs,
1618 				struct sk_buff *inskb, struct list_head *list,
1619 				struct list_head *freelist)
1620 {
1621 	const u32 nslots = xtfs->cfg.reorder_win_size + 1;
1622 	u64 inseq = __esp_seq(inskb);
1623 	u64 wantseq;
1624 
1625 	assert_spin_locked(&xtfs->drop_lock);
1626 
1627 	if (unlikely(!xtfs->w_seq_set)) {
1628 		xtfs->w_seq_set = true;
1629 		xtfs->w_wantseq = inseq;
1630 	}
1631 	wantseq = xtfs->w_wantseq;
1632 
1633 	if (likely(inseq == wantseq))
1634 		__reorder_this(xtfs, inskb, list);
1635 	else if (inseq < wantseq)
1636 		__reorder_past(xtfs, inskb, freelist);
1637 	else if ((inseq - wantseq) < nslots)
1638 		__reorder_future_fits(xtfs, inskb, freelist);
1639 	else
1640 		__reorder_future_shifts(xtfs, inskb, list);
1641 }
1642 
1643 /**
1644  * iptfs_drop_timer() - Handle drop timer expiry.
1645  * @me: the timer
1646  *
1647  * This is similar to our input function.
1648  *
1649  * The drop timer is set when we start an in progress reassembly, and also when
1650  * we save a future packet in the window saved array.
1651  *
1652  * NOTE packets in the save window are always newer WRT drop times as
1653  * they get further in the future. i.e. for:
1654  *
1655  *    if slots (S0, S1, ... Sn) and `Dn` is the drop time for slot `Sn`,
1656  *    then D(n-1) <= D(n).
1657  *
1658  * So, regardless of why the timer is firing we can always discard any inprogress
1659  * fragment; either it's the reassembly timer, or slot 0 is going to be
1660  * dropped as S0 must have the most recent drop time, and slot 0 holds the
1661  * continuation fragment of the in progress packet.
1662  *
1663  * Returns HRTIMER_NORESTART.
1664  */
1665 static enum hrtimer_restart iptfs_drop_timer(struct hrtimer *me)
1666 {
1667 	struct sk_buff *skb, *next;
1668 	struct list_head list;
1669 	struct xfrm_iptfs_data *xtfs;
1670 	struct xfrm_state *x;
1671 	u32 count;
1672 
1673 	xtfs = container_of(me, typeof(*xtfs), drop_timer);
1674 	x = xtfs->x;
1675 
1676 	INIT_LIST_HEAD(&list);
1677 
1678 	spin_lock(&xtfs->drop_lock);
1679 
1680 	/* Drop any in progress packet */
1681 	skb = xtfs->ra_newskb;
1682 	xtfs->ra_newskb = NULL;
1683 
1684 	/* Now drop as many packets as we should from the reordering window
1685 	 * saved array
1686 	 */
1687 	count = xtfs->w_savedlen ? __reorder_drop(xtfs, &list) : 0;
1688 
1689 	spin_unlock(&xtfs->drop_lock);
1690 
1691 	if (skb)
1692 		kfree_skb_reason(skb, SKB_DROP_REASON_FRAG_REASM_TIMEOUT);
1693 
1694 	if (count) {
1695 		list_for_each_entry_safe(skb, next, &list, list) {
1696 			skb_list_del_init(skb);
1697 			iptfs_input_ordered(x, skb);
1698 		}
1699 	}
1700 
1701 	return HRTIMER_NORESTART;
1702 }
1703 
1704 /**
1705  * iptfs_input() - handle receipt of iptfs payload
1706  * @x: xfrm state
1707  * @skb: the packet
1708  *
1709  * We have an IPTFS payload order it if needed, then process newly in order
1710  * packets.
1711  *
1712  * Return: -EINPROGRESS to inform xfrm_input to stop processing the skb.
1713  */
1714 static int iptfs_input(struct xfrm_state *x, struct sk_buff *skb)
1715 {
1716 	struct list_head freelist, list;
1717 	struct xfrm_iptfs_data *xtfs = x->mode_data;
1718 	struct sk_buff *next;
1719 
1720 	/* Fast path for no reorder window. */
1721 	if (xtfs->cfg.reorder_win_size == 0) {
1722 		iptfs_input_ordered(x, skb);
1723 		goto done;
1724 	}
1725 
1726 	/* Fetch list of in-order packets from the reordering window as well as
1727 	 * a list of buffers we need to now free.
1728 	 */
1729 	INIT_LIST_HEAD(&list);
1730 	INIT_LIST_HEAD(&freelist);
1731 
1732 	spin_lock(&xtfs->drop_lock);
1733 	iptfs_input_reorder(xtfs, skb, &list, &freelist);
1734 	spin_unlock(&xtfs->drop_lock);
1735 
1736 	list_for_each_entry_safe(skb, next, &list, list) {
1737 		skb_list_del_init(skb);
1738 		iptfs_input_ordered(x, skb);
1739 	}
1740 
1741 	list_for_each_entry_safe(skb, next, &freelist, list) {
1742 		skb_list_del_init(skb);
1743 		kfree_skb(skb);
1744 	}
1745 done:
1746 	/* We always have dealt with the input SKB, either we are re-using it,
1747 	 * or we have freed it. Return EINPROGRESS so that xfrm_input stops
1748 	 * processing it.
1749 	 */
1750 	return -EINPROGRESS;
1751 }
1752 
1753 /* ================================= */
1754 /* IPTFS Sending (ingress) Functions */
1755 /* ================================= */
1756 
1757 /* ------------------------- */
1758 /* Enqueue to send functions */
1759 /* ------------------------- */
1760 
1761 /**
1762  * iptfs_enqueue() - enqueue packet if ok to send.
1763  * @xtfs: xtfs state
1764  * @skb: the packet
1765  *
1766  * Return: true if packet enqueued.
1767  */
1768 static bool iptfs_enqueue(struct xfrm_iptfs_data *xtfs, struct sk_buff *skb)
1769 {
1770 	u64 newsz = xtfs->queue_size + skb->len;
1771 	struct iphdr *iph;
1772 
1773 	assert_spin_locked(&xtfs->x->lock);
1774 
1775 	if (newsz > xtfs->cfg.max_queue_size)
1776 		return false;
1777 
1778 	/* Set ECN CE if we are above our ECN queue threshold */
1779 	if (newsz > xtfs->ecn_queue_size) {
1780 		iph = ip_hdr(skb);
1781 		if (iph->version == 4)
1782 			IP_ECN_set_ce(iph);
1783 		else if (iph->version == 6)
1784 			IP6_ECN_set_ce(skb, ipv6_hdr(skb));
1785 	}
1786 
1787 	__skb_queue_tail(&xtfs->queue, skb);
1788 	xtfs->queue_size += skb->len;
1789 	return true;
1790 }
1791 
1792 static int iptfs_get_cur_pmtu(struct xfrm_state *x, struct xfrm_iptfs_data *xtfs,
1793 			      struct sk_buff *skb)
1794 {
1795 	struct xfrm_dst *xdst = (struct xfrm_dst *)skb_dst(skb);
1796 	u32 payload_mtu = xtfs->payload_mtu;
1797 	u32 pmtu = __iptfs_get_inner_mtu(x, xdst->child_mtu_cached);
1798 
1799 	if (payload_mtu && payload_mtu < pmtu)
1800 		pmtu = payload_mtu;
1801 
1802 	return pmtu;
1803 }
1804 
1805 static int iptfs_is_too_big(struct sock *sk, struct sk_buff *skb, u32 pmtu)
1806 {
1807 	if (skb->len <= pmtu)
1808 		return 0;
1809 
1810 	/* We only send ICMP too big if the user has configured us as
1811 	 * dont-fragment.
1812 	 */
1813 	if (skb->dev)
1814 		XFRM_INC_STATS(dev_net(skb->dev), LINUX_MIB_XFRMOUTERROR);
1815 
1816 	if (sk)
1817 		xfrm_local_error(skb, pmtu);
1818 	else if (ip_hdr(skb)->version == 4)
1819 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(pmtu));
1820 	else
1821 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, pmtu);
1822 
1823 	return 1;
1824 }
1825 
1826 /* IPv4/IPv6 packet ingress to IPTFS tunnel, arrange to send in IPTFS payload
1827  * (i.e., aggregating or fragmenting as appropriate).
1828  * This is set in dst->output for an SA.
1829  */
1830 static int iptfs_output_collect(struct net *net, struct sock *sk, struct sk_buff *skb)
1831 {
1832 	struct dst_entry *dst = skb_dst(skb);
1833 	struct xfrm_state *x = dst->xfrm;
1834 	struct xfrm_iptfs_data *xtfs = x->mode_data;
1835 	struct sk_buff *segs, *nskb;
1836 	u32 pmtu = 0;
1837 	bool ok = true;
1838 	bool was_gso;
1839 
1840 	/* We have hooked into dst_entry->output which means we have skipped the
1841 	 * protocol specific netfilter (see xfrm4_output, xfrm6_output).
1842 	 * when our timer runs we will end up calling xfrm_output directly on
1843 	 * the encapsulated traffic.
1844 	 *
1845 	 * For both cases this is the NF_INET_POST_ROUTING hook which allows
1846 	 * changing the skb->dst entry which then may not be xfrm based anymore
1847 	 * in which case a REROUTED flag is set. and dst_output is called.
1848 	 *
1849 	 * For IPv6 we are also skipping fragmentation handling for local
1850 	 * sockets, which may or may not be good depending on our tunnel DF
1851 	 * setting. Normally with fragmentation supported we want to skip this
1852 	 * fragmentation.
1853 	 */
1854 
1855 	if (xtfs->cfg.dont_frag)
1856 		pmtu = iptfs_get_cur_pmtu(x, xtfs, skb);
1857 
1858 	/* Break apart GSO skbs. If the queue is nearing full then we want the
1859 	 * accounting and queuing to be based on the individual packets not on the
1860 	 * aggregate GSO buffer.
1861 	 */
1862 	was_gso = skb_is_gso(skb);
1863 	if (!was_gso) {
1864 		segs = skb;
1865 	} else {
1866 		segs = skb_gso_segment(skb, 0);
1867 		if (IS_ERR_OR_NULL(segs)) {
1868 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
1869 			kfree_skb(skb);
1870 			if (IS_ERR(segs))
1871 				return PTR_ERR(segs);
1872 			return -EINVAL;
1873 		}
1874 		consume_skb(skb);
1875 		skb = NULL;
1876 	}
1877 
1878 	/* We can be running on multiple cores and from the network softirq or
1879 	 * from user context depending on where the packet is coming from.
1880 	 */
1881 	spin_lock_bh(&x->lock);
1882 
1883 	skb_list_walk_safe(segs, skb, nskb) {
1884 		skb_mark_not_on_list(skb);
1885 
1886 		/* Once we drop due to no queue space we continue to drop the
1887 		 * rest of the packets from that GRO.
1888 		 */
1889 		if (!ok) {
1890 nospace:
1891 			trace_iptfs_no_queue_space(skb, xtfs, pmtu, was_gso);
1892 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOQSPACE);
1893 			kfree_skb_reason(skb, SKB_DROP_REASON_FULL_RING);
1894 			continue;
1895 		}
1896 
1897 		/* If the user indicated no iptfs fragmenting check before
1898 		 * enqueue.
1899 		 */
1900 		if (xtfs->cfg.dont_frag && iptfs_is_too_big(sk, skb, pmtu)) {
1901 			trace_iptfs_too_big(skb, xtfs, pmtu, was_gso);
1902 			kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
1903 			continue;
1904 		}
1905 
1906 		/* Enqueue to send in tunnel */
1907 		ok = iptfs_enqueue(xtfs, skb);
1908 		if (!ok)
1909 			goto nospace;
1910 
1911 		trace_iptfs_enqueue(skb, xtfs, pmtu, was_gso);
1912 	}
1913 
1914 	/* Start a delay timer if we don't have one yet */
1915 	if (!hrtimer_is_queued(&xtfs->iptfs_timer)) {
1916 		hrtimer_start(&xtfs->iptfs_timer, xtfs->init_delay_ns, IPTFS_HRTIMER_MODE);
1917 		xtfs->iptfs_settime = ktime_get_raw_fast_ns();
1918 		trace_iptfs_timer_start(xtfs, xtfs->init_delay_ns);
1919 	}
1920 
1921 	spin_unlock_bh(&x->lock);
1922 	return 0;
1923 }
1924 
1925 /* -------------------------- */
1926 /* Dequeue and send functions */
1927 /* -------------------------- */
1928 
1929 static void iptfs_output_prepare_skb(struct sk_buff *skb, u32 blkoff)
1930 {
1931 	struct ip_iptfs_hdr *h;
1932 	size_t hsz = sizeof(*h);
1933 
1934 	/* now reset values to be pointing at the rest of the packets */
1935 	h = skb_push(skb, hsz);
1936 	memset(h, 0, hsz);
1937 	if (blkoff)
1938 		h->block_offset = htons(blkoff);
1939 
1940 	/* network_header current points at the inner IP packet
1941 	 * move it to the iptfs header
1942 	 */
1943 	skb->transport_header = skb->network_header;
1944 	skb->network_header -= hsz;
1945 
1946 	IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
1947 }
1948 
1949 /**
1950  * iptfs_copy_create_frag() - create an inner fragment skb.
1951  * @st: The source packet data.
1952  * @offset: offset in @st of the new fragment data.
1953  * @copy_len: the amount of data to copy from @st.
1954  *
1955  * Create a new skb holding a single IPTFS inner packet fragment. @copy_len must
1956  * not be greater than the max fragment size.
1957  *
1958  * Return: the new fragment skb or an ERR_PTR().
1959  */
1960 static struct sk_buff *iptfs_copy_create_frag(struct skb_seq_state *st, u32 offset, u32 copy_len)
1961 {
1962 	struct sk_buff *src = st->root_skb;
1963 	struct sk_buff *skb;
1964 	int err;
1965 
1966 	skb = iptfs_alloc_skb(src, copy_len, true);
1967 	if (!skb)
1968 		return ERR_PTR(-ENOMEM);
1969 
1970 	/* Now copy `copy_len` data from src */
1971 	err = skb_copy_seq_read(st, offset, skb_put(skb, copy_len), copy_len);
1972 	if (err) {
1973 		kfree_skb(skb);
1974 		return ERR_PTR(err);
1975 	}
1976 
1977 	return skb;
1978 }
1979 
1980 /**
1981  * iptfs_copy_create_frags() - create and send N-1 fragments of a larger skb.
1982  * @skbp: the source packet skb (IN), skb holding the last fragment in
1983  *        the fragment stream (OUT).
1984  * @xtfs: IPTFS SA state.
1985  * @mtu: the max IPTFS fragment size.
1986  *
1987  * This function is responsible for fragmenting a larger inner packet into a
1988  * sequence of IPTFS payload packets. The last fragment is returned rather than
1989  * being sent so that the caller can append more inner packets (aggregation) if
1990  * there is room.
1991  *
1992  * Return: 0 on success or a negative error code on failure
1993  */
1994 static int iptfs_copy_create_frags(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs, u32 mtu)
1995 {
1996 	struct skb_seq_state skbseq;
1997 	struct list_head sublist;
1998 	struct sk_buff *skb = *skbp;
1999 	struct sk_buff *nskb = *skbp;
2000 	u32 copy_len, offset;
2001 	u32 to_copy = skb->len - mtu;
2002 	u32 blkoff = 0;
2003 	int err = 0;
2004 
2005 	INIT_LIST_HEAD(&sublist);
2006 
2007 	skb_prepare_seq_read(skb, 0, skb->len, &skbseq);
2008 
2009 	/* A trimmed `skb` will be sent as the first fragment, later. */
2010 	offset = mtu;
2011 	to_copy = skb->len - offset;
2012 	while (to_copy) {
2013 		/* Send all but last fragment to allow agg. append */
2014 		trace_iptfs_first_fragmenting(nskb, mtu, to_copy, NULL);
2015 		list_add_tail(&nskb->list, &sublist);
2016 
2017 		/* FUTURE: if the packet has an odd/non-aligning length we could
2018 		 * send less data in the penultimate fragment so that the last
2019 		 * fragment then ends on an aligned boundary.
2020 		 */
2021 		copy_len = min(to_copy, mtu);
2022 		nskb = iptfs_copy_create_frag(&skbseq, offset, copy_len);
2023 		if (IS_ERR(nskb)) {
2024 			XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMOUTERROR);
2025 			skb_abort_seq_read(&skbseq);
2026 			err = PTR_ERR(nskb);
2027 			nskb = NULL;
2028 			break;
2029 		}
2030 		iptfs_output_prepare_skb(nskb, to_copy);
2031 		offset += copy_len;
2032 		to_copy -= copy_len;
2033 		blkoff = to_copy;
2034 	}
2035 	skb_abort_seq_read(&skbseq);
2036 
2037 	/* return last fragment that will be unsent (or NULL) */
2038 	*skbp = nskb;
2039 	if (nskb)
2040 		trace_iptfs_first_final_fragment(nskb, mtu, blkoff, NULL);
2041 
2042 	/* trim the original skb to MTU */
2043 	if (!err)
2044 		err = pskb_trim(skb, mtu);
2045 
2046 	if (err) {
2047 		/* Free all frags. Don't bother sending a partial packet we will
2048 		 * never complete.
2049 		 */
2050 		kfree_skb(nskb);
2051 		list_for_each_entry_safe(skb, nskb, &sublist, list) {
2052 			skb_list_del_init(skb);
2053 			kfree_skb(skb);
2054 		}
2055 		return err;
2056 	}
2057 
2058 	/* prepare the initial fragment with an iptfs header */
2059 	iptfs_output_prepare_skb(skb, 0);
2060 
2061 	/* Send all but last fragment, if we fail to send a fragment then free
2062 	 * the rest -- no point in sending a packet that can't be reassembled.
2063 	 */
2064 	list_for_each_entry_safe(skb, nskb, &sublist, list) {
2065 		skb_list_del_init(skb);
2066 		if (!err)
2067 			err = xfrm_output(NULL, skb);
2068 		else
2069 			kfree_skb(skb);
2070 	}
2071 	if (err)
2072 		kfree_skb(*skbp);
2073 	return err;
2074 }
2075 
2076 /**
2077  * iptfs_first_skb() - handle the first dequeued inner packet for output
2078  * @skbp: the source packet skb (IN), skb holding the last fragment in
2079  *        the fragment stream (OUT).
2080  * @xtfs: IPTFS SA state.
2081  * @mtu: the max IPTFS fragment size.
2082  *
2083  * This function is responsible for fragmenting a larger inner packet into a
2084  * sequence of IPTFS payload packets.
2085  *
2086  * The last fragment is returned rather than being sent so that the caller can
2087  * append more inner packets (aggregation) if there is room.
2088  *
2089  * Return: 0 on success or a negative error code on failure
2090  */
2091 static int iptfs_first_skb(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs, u32 mtu)
2092 {
2093 	struct sk_buff *skb = *skbp;
2094 	int err;
2095 
2096 	/* Classic ESP skips the don't fragment ICMP error if DF is clear on
2097 	 * the inner packet or ignore_df is set. Otherwise it will send an ICMP
2098 	 * or local error if the inner packet won't fit it's MTU.
2099 	 *
2100 	 * With IPTFS we do not care about the inner packet DF bit. If the
2101 	 * tunnel is configured to "don't fragment" we error back if things
2102 	 * don't fit in our max packet size. Otherwise we iptfs-fragment as
2103 	 * normal.
2104 	 */
2105 
2106 	/* The opportunity for HW offload has ended */
2107 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2108 		err = skb_checksum_help(skb);
2109 		if (err)
2110 			return err;
2111 	}
2112 
2113 	/* We've split gso up before queuing */
2114 
2115 	trace_iptfs_first_dequeue(skb, mtu, 0, ip_hdr(skb));
2116 
2117 	/* Consider the buffer Tx'd and no longer owned */
2118 	skb_orphan(skb);
2119 
2120 	/* Simple case -- it fits. `mtu` accounted for all the overhead
2121 	 * including the basic IPTFS header.
2122 	 */
2123 	if (skb->len <= mtu) {
2124 		iptfs_output_prepare_skb(skb, 0);
2125 		return 0;
2126 	}
2127 
2128 	return iptfs_copy_create_frags(skbp, xtfs, mtu);
2129 }
2130 
2131 static struct sk_buff **iptfs_rehome_fraglist(struct sk_buff **nextp, struct sk_buff *child)
2132 {
2133 	u32 fllen = 0;
2134 
2135 	/* It might be possible to account for a frag list in addition to page
2136 	 * fragment if it's a valid state to be in. The page fragments size
2137 	 * should be kept as data_len so only the frag_list size is removed,
2138 	 * this must be done above as well.
2139 	 */
2140 	*nextp = skb_shinfo(child)->frag_list;
2141 	while (*nextp) {
2142 		fllen += (*nextp)->len;
2143 		nextp = &(*nextp)->next;
2144 	}
2145 	skb_frag_list_init(child);
2146 	child->len -= fllen;
2147 	child->data_len -= fllen;
2148 
2149 	return nextp;
2150 }
2151 
2152 static void iptfs_consume_frags(struct sk_buff *to, struct sk_buff *from)
2153 {
2154 	struct skb_shared_info *fromi = skb_shinfo(from);
2155 	struct skb_shared_info *toi = skb_shinfo(to);
2156 	unsigned int new_truesize;
2157 
2158 	/* If we have data in a head page, grab it */
2159 	if (!skb_headlen(from)) {
2160 		new_truesize = SKB_TRUESIZE(skb_end_offset(from));
2161 	} else {
2162 		iptfs_skb_head_to_frag(from, &toi->frags[toi->nr_frags]);
2163 		skb_frag_ref(to, toi->nr_frags++);
2164 		new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
2165 	}
2166 
2167 	/* Move any other page fragments rather than copy */
2168 	memcpy(&toi->frags[toi->nr_frags], fromi->frags,
2169 	       sizeof(fromi->frags[0]) * fromi->nr_frags);
2170 	toi->nr_frags += fromi->nr_frags;
2171 	fromi->nr_frags = 0;
2172 	from->data_len = 0;
2173 	from->len = 0;
2174 	to->truesize += from->truesize - new_truesize;
2175 	from->truesize = new_truesize;
2176 
2177 	/* We are done with this SKB */
2178 	consume_skb(from);
2179 }
2180 
2181 static void iptfs_output_queued(struct xfrm_state *x, struct sk_buff_head *list)
2182 {
2183 	struct xfrm_iptfs_data *xtfs = x->mode_data;
2184 	struct sk_buff *skb, *skb2, **nextp;
2185 	struct skb_shared_info *shi, *shi2;
2186 
2187 	/* If we are fragmenting due to a large inner packet we will output all
2188 	 * the outer IPTFS packets required to contain the fragments of the
2189 	 * single large inner packet. These outer packets need to be sent
2190 	 * consecutively (ESP seq-wise). Since this output function is always
2191 	 * running from a timer we do not need a lock to provide this guarantee.
2192 	 * We will output our packets consecutively before the timer is allowed
2193 	 * to run again on some other CPU.
2194 	 */
2195 
2196 	while ((skb = __skb_dequeue(list))) {
2197 		u32 mtu = iptfs_get_cur_pmtu(x, xtfs, skb);
2198 		bool share_ok = true;
2199 		int remaining;
2200 
2201 		/* protocol comes to us cleared sometimes */
2202 		skb->protocol = x->outer_mode.family == AF_INET ? htons(ETH_P_IP) :
2203 								  htons(ETH_P_IPV6);
2204 
2205 		if (skb->len > mtu && xtfs->cfg.dont_frag) {
2206 			/* We handle this case before enqueueing so we are only
2207 			 * here b/c MTU changed after we enqueued before we
2208 			 * dequeued, just drop these.
2209 			 */
2210 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTERROR);
2211 
2212 			trace_iptfs_first_toobig(skb, mtu, 0, ip_hdr(skb));
2213 			kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
2214 			continue;
2215 		}
2216 
2217 		/* Convert first inner packet into an outer IPTFS packet,
2218 		 * dealing with any fragmentation into multiple outer packets
2219 		 * if necessary.
2220 		 */
2221 		if (iptfs_first_skb(&skb, xtfs, mtu))
2222 			continue;
2223 
2224 		/* If fragmentation was required the returned skb is the last
2225 		 * IPTFS fragment in the chain, and it's IPTFS header blkoff has
2226 		 * been set just past the end of the fragment data.
2227 		 *
2228 		 * In either case the space remaining to send more inner packet
2229 		 * data is `mtu` - (skb->len - sizeof iptfs header). This is b/c
2230 		 * the `mtu` value has the basic IPTFS header len accounted for,
2231 		 * and we added that header to the skb so it is a part of
2232 		 * skb->len, thus we subtract it from the skb length.
2233 		 */
2234 		remaining = mtu - (skb->len - sizeof(struct ip_iptfs_hdr));
2235 
2236 		/* Re-home (un-nest) nested fragment lists. We need to do this
2237 		 * b/c we will simply be appending any following aggregated
2238 		 * inner packets using the frag list.
2239 		 */
2240 		shi = skb_shinfo(skb);
2241 		nextp = &shi->frag_list;
2242 		while (*nextp) {
2243 			if (skb_has_frag_list(*nextp))
2244 				nextp = iptfs_rehome_fraglist(&(*nextp)->next, *nextp);
2245 			else
2246 				nextp = &(*nextp)->next;
2247 		}
2248 
2249 		if (shi->frag_list || skb_cloned(skb) || skb_shared(skb))
2250 			share_ok = false;
2251 
2252 		/* See if we have enough space to simply append.
2253 		 *
2254 		 * NOTE: Maybe do not append if we will be mis-aligned,
2255 		 * SW-based endpoints will probably have to copy in this
2256 		 * case.
2257 		 */
2258 		while ((skb2 = skb_peek(list))) {
2259 			trace_iptfs_ingress_nth_peek(skb2, remaining);
2260 			if (skb2->len > remaining)
2261 				break;
2262 
2263 			__skb_unlink(skb2, list);
2264 
2265 			/* Consider the buffer Tx'd and no longer owned */
2266 			skb_orphan(skb);
2267 
2268 			/* If we don't have a cksum in the packet we need to add
2269 			 * one before encapsulation.
2270 			 */
2271 			if (skb2->ip_summed == CHECKSUM_PARTIAL) {
2272 				if (skb_checksum_help(skb2)) {
2273 					XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTERROR);
2274 					kfree_skb(skb2);
2275 					continue;
2276 				}
2277 			}
2278 
2279 			/* skb->pp_recycle is passed to __skb_flag_unref for all
2280 			 * frag pages so we can only share pages with skb's who
2281 			 * match ourselves.
2282 			 */
2283 			shi2 = skb_shinfo(skb2);
2284 			if (share_ok &&
2285 			    (shi2->frag_list ||
2286 			     (!skb2->head_frag && skb_headlen(skb)) ||
2287 			     skb->pp_recycle != skb2->pp_recycle ||
2288 			     skb_zcopy(skb2) ||
2289 			     (shi->nr_frags + shi2->nr_frags + 1 > MAX_SKB_FRAGS)))
2290 				share_ok = false;
2291 
2292 			/* Do accounting */
2293 			skb->data_len += skb2->len;
2294 			skb->len += skb2->len;
2295 			remaining -= skb2->len;
2296 
2297 			trace_iptfs_ingress_nth_add(skb2, share_ok);
2298 
2299 			if (share_ok) {
2300 				iptfs_consume_frags(skb, skb2);
2301 			} else {
2302 				/* Append to the frag_list */
2303 				*nextp = skb2;
2304 				nextp = &skb2->next;
2305 				if (skb_has_frag_list(skb2))
2306 					nextp = iptfs_rehome_fraglist(nextp,
2307 								      skb2);
2308 				skb->truesize += skb2->truesize;
2309 			}
2310 		}
2311 
2312 		xfrm_output(NULL, skb);
2313 	}
2314 }
2315 
2316 static enum hrtimer_restart iptfs_delay_timer(struct hrtimer *me)
2317 {
2318 	struct sk_buff_head list;
2319 	struct xfrm_iptfs_data *xtfs;
2320 	struct xfrm_state *x;
2321 	time64_t settime;
2322 
2323 	xtfs = container_of(me, typeof(*xtfs), iptfs_timer);
2324 	x = xtfs->x;
2325 
2326 	/* Process all the queued packets
2327 	 *
2328 	 * softirq execution order: timer > tasklet > hrtimer
2329 	 *
2330 	 * Network rx will have run before us giving one last chance to queue
2331 	 * ingress packets for us to process and transmit.
2332 	 */
2333 
2334 	spin_lock(&x->lock);
2335 	__skb_queue_head_init(&list);
2336 	skb_queue_splice_init(&xtfs->queue, &list);
2337 	xtfs->queue_size = 0;
2338 	settime = xtfs->iptfs_settime;
2339 	spin_unlock(&x->lock);
2340 
2341 	/* After the above unlock, packets can begin queuing again, and the
2342 	 * timer can be set again, from another CPU either in softirq or user
2343 	 * context (not from this one since we are running at softirq level
2344 	 * already).
2345 	 */
2346 
2347 	trace_iptfs_timer_expire(xtfs, (unsigned long long)(ktime_get_raw_fast_ns() - settime));
2348 
2349 	iptfs_output_queued(x, &list);
2350 
2351 	return HRTIMER_NORESTART;
2352 }
2353 
2354 /**
2355  * iptfs_encap_add_ipv4() - add outer encaps
2356  * @x: xfrm state
2357  * @skb: the packet
2358  *
2359  * This was originally taken from xfrm4_tunnel_encap_add. The reason for the
2360  * copy is that IP-TFS/AGGFRAG can have different functionality for how to set
2361  * the TOS/DSCP bits. Sets the protocol to a different value and doesn't do
2362  * anything with inner headers as they aren't pointing into a normal IP
2363  * singleton inner packet.
2364  *
2365  * Return: 0 on success or a negative error code on failure
2366  */
2367 static int iptfs_encap_add_ipv4(struct xfrm_state *x, struct sk_buff *skb)
2368 {
2369 	struct dst_entry *dst = skb_dst(skb);
2370 	struct iphdr *top_iph;
2371 
2372 	skb_reset_inner_network_header(skb);
2373 	skb_reset_inner_transport_header(skb);
2374 
2375 	skb_set_network_header(skb, -(x->props.header_len - x->props.enc_hdr_len));
2376 	skb->mac_header = skb->network_header + offsetof(struct iphdr, protocol);
2377 	skb->transport_header = skb->network_header + sizeof(*top_iph);
2378 
2379 	top_iph = ip_hdr(skb);
2380 	top_iph->ihl = 5;
2381 	top_iph->version = 4;
2382 	top_iph->protocol = IPPROTO_AGGFRAG;
2383 
2384 	/* As we have 0, fractional, 1 or N inner packets there's no obviously
2385 	 * correct DSCP mapping to inherit. ECN should be cleared per RFC9347
2386 	 * 3.1.
2387 	 */
2388 	top_iph->tos = 0;
2389 
2390 	top_iph->frag_off = htons(IP_DF);
2391 	top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
2392 	top_iph->saddr = x->props.saddr.a4;
2393 	top_iph->daddr = x->id.daddr.a4;
2394 	ip_select_ident(dev_net(dst->dev), skb, NULL);
2395 
2396 	return 0;
2397 }
2398 
2399 #if IS_ENABLED(CONFIG_IPV6)
2400 /**
2401  * iptfs_encap_add_ipv6() - add outer encaps
2402  * @x: xfrm state
2403  * @skb: the packet
2404  *
2405  * This was originally taken from xfrm6_tunnel_encap_add. The reason for the
2406  * copy is that IP-TFS/AGGFRAG can have different functionality for how to set
2407  * the flow label and TOS/DSCP bits. It also sets the protocol to a different
2408  * value and doesn't do anything with inner headers as they aren't pointing into
2409  * a normal IP singleton inner packet.
2410  *
2411  * Return: 0 on success or a negative error code on failure
2412  */
2413 static int iptfs_encap_add_ipv6(struct xfrm_state *x, struct sk_buff *skb)
2414 {
2415 	struct dst_entry *dst = skb_dst(skb);
2416 	struct ipv6hdr *top_iph;
2417 	int dsfield;
2418 
2419 	skb_reset_inner_network_header(skb);
2420 	skb_reset_inner_transport_header(skb);
2421 
2422 	skb_set_network_header(skb, -x->props.header_len + x->props.enc_hdr_len);
2423 	skb->mac_header = skb->network_header + offsetof(struct ipv6hdr, nexthdr);
2424 	skb->transport_header = skb->network_header + sizeof(*top_iph);
2425 
2426 	top_iph = ipv6_hdr(skb);
2427 	top_iph->version = 6;
2428 	top_iph->priority = 0;
2429 	memset(top_iph->flow_lbl, 0, sizeof(top_iph->flow_lbl));
2430 	top_iph->nexthdr = IPPROTO_AGGFRAG;
2431 
2432 	/* As we have 0, fractional, 1 or N inner packets there's no obviously
2433 	 * correct DSCP mapping to inherit. ECN should be cleared per RFC9347
2434 	 * 3.1.
2435 	 */
2436 	dsfield = 0;
2437 	ipv6_change_dsfield(top_iph, 0, dsfield);
2438 
2439 	top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
2440 	top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
2441 	top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
2442 
2443 	return 0;
2444 }
2445 #endif
2446 
2447 /**
2448  * iptfs_prepare_output() -  prepare the skb for output
2449  * @x: xfrm state
2450  * @skb: the packet
2451  *
2452  * Return: Error value, if 0 then skb values should be as follows:
2453  *    - transport_header should point at ESP header
2454  *    - network_header should point at Outer IP header
2455  *    - mac_header should point at protocol/nexthdr of the outer IP
2456  */
2457 static int iptfs_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
2458 {
2459 	if (x->outer_mode.family == AF_INET)
2460 		return iptfs_encap_add_ipv4(x, skb);
2461 	if (x->outer_mode.family == AF_INET6) {
2462 #if IS_ENABLED(CONFIG_IPV6)
2463 		return iptfs_encap_add_ipv6(x, skb);
2464 #else
2465 		return -EAFNOSUPPORT;
2466 #endif
2467 	}
2468 	return -EOPNOTSUPP;
2469 }
2470 
2471 /* ========================== */
2472 /* State Management Functions */
2473 /* ========================== */
2474 
2475 /**
2476  * __iptfs_get_inner_mtu() - return inner MTU with no fragmentation.
2477  * @x: xfrm state.
2478  * @outer_mtu: the outer mtu
2479  *
2480  * Return: Correct MTU taking in to account the encap overhead.
2481  */
2482 static u32 __iptfs_get_inner_mtu(struct xfrm_state *x, int outer_mtu)
2483 {
2484 	struct crypto_aead *aead;
2485 	u32 blksize;
2486 
2487 	aead = x->data;
2488 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
2489 	return ((outer_mtu - x->props.header_len - crypto_aead_authsize(aead)) &
2490 		~(blksize - 1)) - 2;
2491 }
2492 
2493 /**
2494  * iptfs_get_inner_mtu() - return the inner MTU for an IPTFS xfrm.
2495  * @x: xfrm state.
2496  * @outer_mtu: Outer MTU for the encapsulated packet.
2497  *
2498  * Return: Correct MTU taking in to account the encap overhead.
2499  */
2500 static u32 iptfs_get_inner_mtu(struct xfrm_state *x, int outer_mtu)
2501 {
2502 	struct xfrm_iptfs_data *xtfs = x->mode_data;
2503 
2504 	/* If not dont-frag we have no MTU */
2505 	if (!xtfs->cfg.dont_frag)
2506 		return x->outer_mode.family == AF_INET ? IP_MAX_MTU : IP6_MAX_MTU;
2507 	return __iptfs_get_inner_mtu(x, outer_mtu);
2508 }
2509 
2510 /**
2511  * iptfs_user_init() - initialize the SA with IPTFS options from netlink.
2512  * @net: the net data
2513  * @x: xfrm state
2514  * @attrs: netlink attributes
2515  * @extack: extack return data
2516  *
2517  * Return: 0 on success or a negative error code on failure
2518  */
2519 static int iptfs_user_init(struct net *net, struct xfrm_state *x,
2520 			   struct nlattr **attrs,
2521 			   struct netlink_ext_ack *extack)
2522 {
2523 	struct xfrm_iptfs_data *xtfs = x->mode_data;
2524 	struct xfrm_iptfs_config *xc;
2525 	u64 q;
2526 
2527 	xc = &xtfs->cfg;
2528 	xc->max_queue_size = IPTFS_DEFAULT_MAX_QUEUE_SIZE;
2529 	xc->reorder_win_size = IPTFS_DEFAULT_REORDER_WINDOW;
2530 	xtfs->drop_time_ns = IPTFS_DEFAULT_DROP_TIME_USECS * NSECS_IN_USEC;
2531 	xtfs->init_delay_ns = IPTFS_DEFAULT_INIT_DELAY_USECS * NSECS_IN_USEC;
2532 
2533 	if (attrs[XFRMA_IPTFS_DONT_FRAG])
2534 		xc->dont_frag = true;
2535 	if (attrs[XFRMA_IPTFS_REORDER_WINDOW])
2536 		xc->reorder_win_size =
2537 			nla_get_u16(attrs[XFRMA_IPTFS_REORDER_WINDOW]);
2538 	/* saved array is for saving 1..N seq nums from wantseq */
2539 	if (xc->reorder_win_size) {
2540 		xtfs->w_saved = kzalloc_objs(*xtfs->w_saved,
2541 					     xc->reorder_win_size);
2542 		if (!xtfs->w_saved) {
2543 			NL_SET_ERR_MSG(extack, "Cannot alloc reorder window");
2544 			return -ENOMEM;
2545 		}
2546 	}
2547 	if (attrs[XFRMA_IPTFS_PKT_SIZE]) {
2548 		xc->pkt_size = nla_get_u32(attrs[XFRMA_IPTFS_PKT_SIZE]);
2549 		if (!xc->pkt_size) {
2550 			xtfs->payload_mtu = 0;
2551 		} else if (xc->pkt_size > x->props.header_len) {
2552 			xtfs->payload_mtu = xc->pkt_size - x->props.header_len;
2553 		} else {
2554 			NL_SET_ERR_MSG(extack,
2555 				       "Packet size must be 0 or greater than IPTFS/ESP header length");
2556 			return -EINVAL;
2557 		}
2558 	}
2559 	if (attrs[XFRMA_IPTFS_MAX_QSIZE])
2560 		xc->max_queue_size = nla_get_u32(attrs[XFRMA_IPTFS_MAX_QSIZE]);
2561 	if (attrs[XFRMA_IPTFS_DROP_TIME])
2562 		xtfs->drop_time_ns =
2563 			(u64)nla_get_u32(attrs[XFRMA_IPTFS_DROP_TIME]) *
2564 			NSECS_IN_USEC;
2565 	if (attrs[XFRMA_IPTFS_INIT_DELAY])
2566 		xtfs->init_delay_ns =
2567 			(u64)nla_get_u32(attrs[XFRMA_IPTFS_INIT_DELAY]) * NSECS_IN_USEC;
2568 
2569 	q = (u64)xc->max_queue_size * 95;
2570 	do_div(q, 100);
2571 	xtfs->ecn_queue_size = (u32)q;
2572 
2573 	return 0;
2574 }
2575 
2576 static unsigned int iptfs_sa_len(const struct xfrm_state *x)
2577 {
2578 	struct xfrm_iptfs_data *xtfs = x->mode_data;
2579 	struct xfrm_iptfs_config *xc = &xtfs->cfg;
2580 	unsigned int l = 0;
2581 
2582 	if (x->dir == XFRM_SA_DIR_IN) {
2583 		l += nla_total_size(sizeof(u32)); /* drop time usec */
2584 		l += nla_total_size(sizeof(xc->reorder_win_size));
2585 	} else {
2586 		if (xc->dont_frag)
2587 			l += nla_total_size(0);	  /* dont-frag flag */
2588 		l += nla_total_size(sizeof(u32)); /* init delay usec */
2589 		l += nla_total_size(sizeof(xc->max_queue_size));
2590 		l += nla_total_size(sizeof(xc->pkt_size));
2591 	}
2592 
2593 	return l;
2594 }
2595 
2596 static int iptfs_copy_to_user(struct xfrm_state *x, struct sk_buff *skb)
2597 {
2598 	struct xfrm_iptfs_data *xtfs = x->mode_data;
2599 	struct xfrm_iptfs_config *xc = &xtfs->cfg;
2600 	int ret = 0;
2601 	u64 q;
2602 
2603 	if (x->dir == XFRM_SA_DIR_IN) {
2604 		q = xtfs->drop_time_ns;
2605 		do_div(q, NSECS_IN_USEC);
2606 		ret = nla_put_u32(skb, XFRMA_IPTFS_DROP_TIME, q);
2607 		if (ret)
2608 			return ret;
2609 
2610 		ret = nla_put_u16(skb, XFRMA_IPTFS_REORDER_WINDOW,
2611 				  xc->reorder_win_size);
2612 	} else {
2613 		if (xc->dont_frag) {
2614 			ret = nla_put_flag(skb, XFRMA_IPTFS_DONT_FRAG);
2615 			if (ret)
2616 				return ret;
2617 		}
2618 
2619 		q = xtfs->init_delay_ns;
2620 		do_div(q, NSECS_IN_USEC);
2621 		ret = nla_put_u32(skb, XFRMA_IPTFS_INIT_DELAY, q);
2622 		if (ret)
2623 			return ret;
2624 
2625 		ret = nla_put_u32(skb, XFRMA_IPTFS_MAX_QSIZE, xc->max_queue_size);
2626 		if (ret)
2627 			return ret;
2628 
2629 		ret = nla_put_u32(skb, XFRMA_IPTFS_PKT_SIZE, xc->pkt_size);
2630 	}
2631 
2632 	return ret;
2633 }
2634 
2635 static void __iptfs_init_state(struct xfrm_state *x,
2636 			       struct xfrm_iptfs_data *xtfs)
2637 {
2638 	__skb_queue_head_init(&xtfs->queue);
2639 	hrtimer_setup(&xtfs->iptfs_timer, iptfs_delay_timer, CLOCK_MONOTONIC, IPTFS_HRTIMER_MODE);
2640 
2641 	spin_lock_init(&xtfs->drop_lock);
2642 	hrtimer_setup(&xtfs->drop_timer, iptfs_drop_timer, CLOCK_MONOTONIC, IPTFS_HRTIMER_MODE);
2643 
2644 	/* Modify type (esp) adjustment values */
2645 
2646 	if (x->props.family == AF_INET)
2647 		x->props.header_len += sizeof(struct iphdr) + sizeof(struct ip_iptfs_hdr);
2648 	else if (x->props.family == AF_INET6)
2649 		x->props.header_len += sizeof(struct ipv6hdr) + sizeof(struct ip_iptfs_hdr);
2650 	x->props.enc_hdr_len = sizeof(struct ip_iptfs_hdr);
2651 
2652 	/* Always keep a module reference when x->mode_data is set */
2653 	__module_get(x->mode_cbs->owner);
2654 
2655 	x->mode_data = xtfs;
2656 	xtfs->x = x;
2657 }
2658 
2659 static int iptfs_clone_state(struct xfrm_state *x, struct xfrm_state *orig)
2660 {
2661 	struct xfrm_iptfs_data *xtfs;
2662 
2663 	xtfs = kmemdup(orig->mode_data, sizeof(*xtfs), GFP_KERNEL);
2664 	if (!xtfs)
2665 		return -ENOMEM;
2666 
2667 	xtfs->ra_newskb = NULL;
2668 	if (xtfs->cfg.reorder_win_size) {
2669 		xtfs->w_saved = kzalloc_objs(*xtfs->w_saved,
2670 					     xtfs->cfg.reorder_win_size);
2671 		if (!xtfs->w_saved) {
2672 			kfree_sensitive(xtfs);
2673 			return -ENOMEM;
2674 		}
2675 	}
2676 
2677 	x->mode_data = xtfs;
2678 	xtfs->x = x;
2679 
2680 	return 0;
2681 }
2682 
2683 static int iptfs_init_state(struct xfrm_state *x)
2684 {
2685 	struct xfrm_iptfs_data *xtfs;
2686 
2687 	if (x->mode_data) {
2688 		/* We have arrived here from xfrm_state_clone() */
2689 		xtfs = x->mode_data;
2690 	} else {
2691 		xtfs = kzalloc_obj(*xtfs);
2692 		if (!xtfs)
2693 			return -ENOMEM;
2694 	}
2695 
2696 	__iptfs_init_state(x, xtfs);
2697 
2698 	return 0;
2699 }
2700 
2701 static void iptfs_destroy_state(struct xfrm_state *x)
2702 {
2703 	struct xfrm_iptfs_data *xtfs = x->mode_data;
2704 	struct sk_buff_head list;
2705 	struct skb_wseq *s, *se;
2706 	struct sk_buff *skb;
2707 
2708 	if (!xtfs)
2709 		return;
2710 
2711 	spin_lock_bh(&xtfs->x->lock);
2712 	hrtimer_cancel(&xtfs->iptfs_timer);
2713 	__skb_queue_head_init(&list);
2714 	skb_queue_splice_init(&xtfs->queue, &list);
2715 	spin_unlock_bh(&xtfs->x->lock);
2716 
2717 	while ((skb = __skb_dequeue(&list)))
2718 		kfree_skb(skb);
2719 
2720 	spin_lock_bh(&xtfs->drop_lock);
2721 	hrtimer_cancel(&xtfs->drop_timer);
2722 	spin_unlock_bh(&xtfs->drop_lock);
2723 
2724 	if (xtfs->ra_newskb)
2725 		kfree_skb(xtfs->ra_newskb);
2726 
2727 	for (s = xtfs->w_saved, se = s + xtfs->w_savedlen; s < se; s++) {
2728 		if (s->skb)
2729 			kfree_skb(s->skb);
2730 	}
2731 
2732 	kfree_sensitive(xtfs->w_saved);
2733 	kfree_sensitive(xtfs);
2734 
2735 	module_put(x->mode_cbs->owner);
2736 }
2737 
2738 static const struct xfrm_mode_cbs iptfs_mode_cbs = {
2739 	.owner = THIS_MODULE,
2740 	.init_state = iptfs_init_state,
2741 	.clone_state = iptfs_clone_state,
2742 	.destroy_state = iptfs_destroy_state,
2743 	.user_init = iptfs_user_init,
2744 	.copy_to_user = iptfs_copy_to_user,
2745 	.sa_len = iptfs_sa_len,
2746 	.get_inner_mtu = iptfs_get_inner_mtu,
2747 	.input = iptfs_input,
2748 	.output = iptfs_output_collect,
2749 	.prepare_output = iptfs_prepare_output,
2750 };
2751 
2752 static int __init xfrm_iptfs_init(void)
2753 {
2754 	int err;
2755 
2756 	pr_info("xfrm_iptfs: IPsec IP-TFS tunnel mode module\n");
2757 
2758 	err = xfrm_register_mode_cbs(XFRM_MODE_IPTFS, &iptfs_mode_cbs);
2759 	if (err < 0)
2760 		pr_info("%s: can't register IP-TFS\n", __func__);
2761 
2762 	return err;
2763 }
2764 
2765 static void __exit xfrm_iptfs_fini(void)
2766 {
2767 	xfrm_unregister_mode_cbs(XFRM_MODE_IPTFS);
2768 }
2769 
2770 module_init(xfrm_iptfs_init);
2771 module_exit(xfrm_iptfs_fini);
2772 MODULE_LICENSE("GPL");
2773 MODULE_DESCRIPTION("IP-TFS support for xfrm ipsec tunnels");
2774