1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPv6 fragment reassembly for connection tracking
4 *
5 * Copyright (C)2004 USAGI/WIDE Project
6 *
7 * Author:
8 * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
9 *
10 * Based on: net/ipv6/reassembly.c
11 */
12
13 #define pr_fmt(fmt) "IPv6-nf: " fmt
14
15 #include <linux/errno.h>
16 #include <linux/types.h>
17 #include <linux/string.h>
18 #include <linux/net.h>
19 #include <linux/netdevice.h>
20 #include <linux/ipv6.h>
21 #include <linux/slab.h>
22
23 #include <net/ipv6_frag.h>
24
25 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
26 #include <linux/sysctl.h>
27 #include <linux/netfilter.h>
28 #include <linux/netfilter_ipv6.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
32 #include <net/netns/generic.h>
33
34 static const char nf_frags_cache_name[] = "nf-frags";
35
36 static unsigned int nf_frag_pernet_id __read_mostly;
37 static struct inet_frags nf_frags;
38
nf_frag_pernet(struct net * net)39 static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net)
40 {
41 return net_generic(net, nf_frag_pernet_id);
42 }
43
44 #ifdef CONFIG_SYSCTL
45
46 static struct ctl_table nf_ct_frag6_sysctl_table[] = {
47 {
48 .procname = "nf_conntrack_frag6_timeout",
49 .maxlen = sizeof(unsigned int),
50 .mode = 0644,
51 .proc_handler = proc_dointvec_jiffies,
52 },
53 {
54 .procname = "nf_conntrack_frag6_low_thresh",
55 .maxlen = sizeof(unsigned long),
56 .mode = 0644,
57 .proc_handler = proc_doulongvec_minmax,
58 },
59 {
60 .procname = "nf_conntrack_frag6_high_thresh",
61 .maxlen = sizeof(unsigned long),
62 .mode = 0644,
63 .proc_handler = proc_doulongvec_minmax,
64 },
65 };
66
nf_ct_frag6_sysctl_register(struct net * net)67 static int nf_ct_frag6_sysctl_register(struct net *net)
68 {
69 struct nft_ct_frag6_pernet *nf_frag;
70 struct ctl_table *table;
71 struct ctl_table_header *hdr;
72
73 table = nf_ct_frag6_sysctl_table;
74 if (!net_eq(net, &init_net)) {
75 table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
76 GFP_KERNEL);
77 if (table == NULL)
78 goto err_alloc;
79 }
80
81 nf_frag = nf_frag_pernet(net);
82
83 table[0].data = &nf_frag->fqdir->timeout;
84 table[1].data = &nf_frag->fqdir->low_thresh;
85 table[1].extra2 = &nf_frag->fqdir->high_thresh;
86 table[2].data = &nf_frag->fqdir->high_thresh;
87 table[2].extra1 = &nf_frag->fqdir->low_thresh;
88
89 hdr = register_net_sysctl_sz(net, "net/netfilter", table,
90 ARRAY_SIZE(nf_ct_frag6_sysctl_table));
91 if (hdr == NULL)
92 goto err_reg;
93
94 nf_frag->nf_frag_frags_hdr = hdr;
95 return 0;
96
97 err_reg:
98 if (!net_eq(net, &init_net))
99 kfree(table);
100 err_alloc:
101 return -ENOMEM;
102 }
103
nf_ct_frags6_sysctl_unregister(struct net * net)104 static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
105 {
106 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
107 const struct ctl_table *table;
108
109 table = nf_frag->nf_frag_frags_hdr->ctl_table_arg;
110 unregister_net_sysctl_table(nf_frag->nf_frag_frags_hdr);
111 if (!net_eq(net, &init_net))
112 kfree(table);
113 }
114
115 #else
nf_ct_frag6_sysctl_register(struct net * net)116 static int nf_ct_frag6_sysctl_register(struct net *net)
117 {
118 return 0;
119 }
nf_ct_frags6_sysctl_unregister(struct net * net)120 static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
121 {
122 }
123 #endif
124
125 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
126 struct sk_buff *prev_tail, struct net_device *dev,
127 int *refs);
128
ip6_frag_ecn(const struct ipv6hdr * ipv6h)129 static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
130 {
131 return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
132 }
133
nf_ct_frag6_expire(struct timer_list * t)134 static void nf_ct_frag6_expire(struct timer_list *t)
135 {
136 struct inet_frag_queue *frag = timer_container_of(frag, t, timer);
137 struct frag_queue *fq;
138
139 fq = container_of(frag, struct frag_queue, q);
140
141 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq);
142 }
143
144 /* Creation primitives. */
fq_find(struct net * net,__be32 id,u32 user,const struct ipv6hdr * hdr,int iif)145 static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
146 const struct ipv6hdr *hdr, int iif)
147 {
148 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
149 struct frag_v6_compare_key key = {
150 .id = id,
151 .saddr = hdr->saddr,
152 .daddr = hdr->daddr,
153 .user = user,
154 .iif = iif,
155 };
156 struct inet_frag_queue *q;
157
158 if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
159 IPV6_ADDR_LINKLOCAL)))
160 key.iif = 0;
161
162 q = inet_frag_find(nf_frag->fqdir, &key);
163 if (!q)
164 return NULL;
165
166 return container_of(q, struct frag_queue, q);
167 }
168
169
nf_ct_frag6_queue(struct frag_queue * fq,struct sk_buff * skb,const struct frag_hdr * fhdr,int nhoff,int * refs)170 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
171 const struct frag_hdr *fhdr, int nhoff,
172 int *refs)
173 {
174 unsigned int payload_len;
175 struct net_device *dev;
176 struct sk_buff *prev;
177 int offset, end, err;
178 u8 ecn;
179
180 if (fq->q.flags & INET_FRAG_COMPLETE) {
181 pr_debug("Already completed\n");
182 goto err;
183 }
184
185 payload_len = ntohs(ipv6_hdr(skb)->payload_len);
186
187 offset = ntohs(fhdr->frag_off) & ~0x7;
188 end = offset + (payload_len -
189 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
190
191 if ((unsigned int)end > IPV6_MAXPLEN) {
192 pr_debug("offset is too large.\n");
193 return -EINVAL;
194 }
195
196 ecn = ip6_frag_ecn(ipv6_hdr(skb));
197
198 if (skb->ip_summed == CHECKSUM_COMPLETE) {
199 const unsigned char *nh = skb_network_header(skb);
200 skb->csum = csum_sub(skb->csum,
201 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
202 0));
203 }
204
205 /* Is this the final fragment? */
206 if (!(fhdr->frag_off & htons(IP6_MF))) {
207 /* If we already have some bits beyond end
208 * or have different end, the segment is corrupted.
209 */
210 if (end < fq->q.len ||
211 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) {
212 pr_debug("already received last fragment\n");
213 goto err;
214 }
215 fq->q.flags |= INET_FRAG_LAST_IN;
216 fq->q.len = end;
217 } else {
218 /* Check if the fragment is rounded to 8 bytes.
219 * Required by the RFC.
220 */
221 if (end & 0x7) {
222 /* RFC2460 says always send parameter problem in
223 * this case. -DaveM
224 */
225 pr_debug("end of fragment not rounded to 8 bytes.\n");
226 inet_frag_kill(&fq->q, refs);
227 return -EPROTO;
228 }
229 if (end > fq->q.len) {
230 /* Some bits beyond end -> corruption. */
231 if (fq->q.flags & INET_FRAG_LAST_IN) {
232 pr_debug("last packet already reached.\n");
233 goto err;
234 }
235 fq->q.len = end;
236 }
237 }
238
239 if (end == offset)
240 goto err;
241
242 /* Point into the IP datagram 'data' part. */
243 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
244 pr_debug("queue: message is too short.\n");
245 goto err;
246 }
247 if (pskb_trim_rcsum(skb, end - offset)) {
248 pr_debug("Can't trim\n");
249 goto err;
250 }
251
252 /* Note : skb->rbnode and skb->dev share the same location. */
253 dev = skb->dev;
254 /* Makes sure compiler wont do silly aliasing games */
255 barrier();
256
257 prev = fq->q.fragments_tail;
258 err = inet_frag_queue_insert(&fq->q, skb, offset, end);
259 if (err) {
260 if (err == IPFRAG_DUP) {
261 /* No error for duplicates, pretend they got queued. */
262 kfree_skb_reason(skb, SKB_DROP_REASON_DUP_FRAG);
263 return -EINPROGRESS;
264 }
265 goto insert_error;
266 }
267
268 if (dev)
269 fq->iif = dev->ifindex;
270
271 fq->q.stamp = skb->tstamp;
272 fq->q.tstamp_type = skb->tstamp_type;
273 fq->q.meat += skb->len;
274 fq->ecn |= ecn;
275 if (payload_len > fq->q.max_size)
276 fq->q.max_size = payload_len;
277 add_frag_mem_limit(fq->q.fqdir, skb->truesize);
278
279 /* The first fragment.
280 * nhoffset is obtained from the first fragment, of course.
281 */
282 if (offset == 0) {
283 fq->nhoffset = nhoff;
284 fq->q.flags |= INET_FRAG_FIRST_IN;
285 }
286
287 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
288 fq->q.meat == fq->q.len) {
289 unsigned long orefdst = skb->_skb_refdst;
290
291 skb->_skb_refdst = 0UL;
292 err = nf_ct_frag6_reasm(fq, skb, prev, dev, refs);
293 skb->_skb_refdst = orefdst;
294
295 /* After queue has assumed skb ownership, only 0 or
296 * -EINPROGRESS must be returned.
297 */
298 return err ? -EINPROGRESS : 0;
299 }
300
301 skb_dst_drop(skb);
302 skb_orphan(skb);
303 return -EINPROGRESS;
304
305 insert_error:
306 inet_frag_kill(&fq->q, refs);
307 err:
308 skb_dst_drop(skb);
309 return -EINVAL;
310 }
311
312 /*
313 * Check if this packet is complete.
314 *
315 * It is called with locked fq, and caller must check that
316 * queue is eligible for reassembly i.e. it is not COMPLETE,
317 * the last and the first frames arrived and all the bits are here.
318 */
nf_ct_frag6_reasm(struct frag_queue * fq,struct sk_buff * skb,struct sk_buff * prev_tail,struct net_device * dev,int * refs)319 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
320 struct sk_buff *prev_tail, struct net_device *dev,
321 int *refs)
322 {
323 void *reasm_data;
324 int payload_len;
325 u8 ecn;
326
327 inet_frag_kill(&fq->q, refs);
328
329 ecn = ip_frag_ecn_table[fq->ecn];
330 if (unlikely(ecn == 0xff))
331 goto err;
332
333 reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
334 if (!reasm_data)
335 goto err;
336
337 payload_len = -skb_network_offset(skb) -
338 sizeof(struct ipv6hdr) + fq->q.len -
339 sizeof(struct frag_hdr);
340 if (payload_len > IPV6_MAXPLEN) {
341 net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
342 payload_len);
343 goto err;
344 }
345
346 /* We have to remove fragment header from datagram and to relocate
347 * header in order to calculate ICV correctly. */
348 skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
349 memmove(skb->head + sizeof(struct frag_hdr), skb->head,
350 (skb->data - skb->head) - sizeof(struct frag_hdr));
351 skb->mac_header += sizeof(struct frag_hdr);
352 skb->network_header += sizeof(struct frag_hdr);
353
354 skb_reset_transport_header(skb);
355
356 inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
357
358 skb->ignore_df = 1;
359 skb->dev = dev;
360 ipv6_hdr(skb)->payload_len = htons(payload_len);
361 ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
362 IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
363 IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
364
365 /* Yes, and fold redundant checksum back. 8) */
366 if (skb->ip_summed == CHECKSUM_COMPLETE)
367 skb->csum = csum_partial(skb_network_header(skb),
368 skb_network_header_len(skb),
369 skb->csum);
370
371 fq->q.rb_fragments = RB_ROOT;
372 fq->q.fragments_tail = NULL;
373 fq->q.last_run_head = NULL;
374
375 return 0;
376
377 err:
378 inet_frag_kill(&fq->q, refs);
379 return -EINVAL;
380 }
381
382 /*
383 * find the header just before Fragment Header.
384 *
385 * if success return 0 and set ...
386 * (*prevhdrp): the value of "Next Header Field" in the header
387 * just before Fragment Header.
388 * (*prevhoff): the offset of "Next Header Field" in the header
389 * just before Fragment Header.
390 * (*fhoff) : the offset of Fragment Header.
391 *
392 * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c
393 *
394 */
395 static int
find_prev_fhdr(struct sk_buff * skb,u8 * prevhdrp,int * prevhoff,int * fhoff)396 find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
397 {
398 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
399 const int netoff = skb_network_offset(skb);
400 u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr);
401 int start = netoff + sizeof(struct ipv6hdr);
402 int len = skb->len - start;
403 u8 prevhdr = NEXTHDR_IPV6;
404
405 while (nexthdr != NEXTHDR_FRAGMENT) {
406 struct ipv6_opt_hdr hdr;
407 int hdrlen;
408
409 if (!ipv6_ext_hdr(nexthdr)) {
410 return -1;
411 }
412 if (nexthdr == NEXTHDR_NONE) {
413 pr_debug("next header is none\n");
414 return -1;
415 }
416 if (len < (int)sizeof(struct ipv6_opt_hdr)) {
417 pr_debug("too short\n");
418 return -1;
419 }
420 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
421 BUG();
422 if (nexthdr == NEXTHDR_AUTH)
423 hdrlen = ipv6_authlen(&hdr);
424 else
425 hdrlen = ipv6_optlen(&hdr);
426
427 prevhdr = nexthdr;
428 prev_nhoff = start;
429
430 nexthdr = hdr.nexthdr;
431 len -= hdrlen;
432 start += hdrlen;
433 }
434
435 if (len < 0)
436 return -1;
437
438 *prevhdrp = prevhdr;
439 *prevhoff = prev_nhoff;
440 *fhoff = start;
441
442 return 0;
443 }
444
nf_ct_frag6_gather(struct net * net,struct sk_buff * skb,u32 user)445 int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
446 {
447 u16 savethdr = skb->transport_header;
448 u8 nexthdr = NEXTHDR_FRAGMENT;
449 int fhoff, nhoff, ret;
450 struct frag_hdr *fhdr;
451 struct frag_queue *fq;
452 struct ipv6hdr *hdr;
453 int refs = 0;
454 u8 prevhdr;
455
456 /* Jumbo payload inhibits frag. header */
457 if (ipv6_hdr(skb)->payload_len == 0) {
458 pr_debug("payload len = 0\n");
459 return 0;
460 }
461
462 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
463 return 0;
464
465 /* Discard the first fragment if it does not include all headers
466 * RFC 8200, Section 4.5
467 */
468 if (ipv6frag_thdr_truncated(skb, fhoff, &nexthdr)) {
469 pr_debug("Drop incomplete fragment\n");
470 return 0;
471 }
472
473 if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
474 return -ENOMEM;
475
476 skb_set_transport_header(skb, fhoff);
477 hdr = ipv6_hdr(skb);
478 fhdr = (struct frag_hdr *)skb_transport_header(skb);
479
480 rcu_read_lock();
481 fq = fq_find(net, fhdr->identification, user, hdr,
482 skb->dev ? skb->dev->ifindex : 0);
483 if (fq == NULL) {
484 rcu_read_unlock();
485 pr_debug("Can't find and can't create new queue\n");
486 return -ENOMEM;
487 }
488
489 spin_lock_bh(&fq->q.lock);
490
491 ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff, &refs);
492 if (ret == -EPROTO) {
493 skb->transport_header = savethdr;
494 ret = 0;
495 }
496
497 spin_unlock_bh(&fq->q.lock);
498 rcu_read_unlock();
499 inet_frag_putn(&fq->q, refs);
500 return ret;
501 }
502 EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
503
nf_ct_net_init(struct net * net)504 static int nf_ct_net_init(struct net *net)
505 {
506 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
507 int res;
508
509 res = fqdir_init(&nf_frag->fqdir, &nf_frags, net);
510 if (res < 0)
511 return res;
512
513 nf_frag->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH;
514 nf_frag->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH;
515 nf_frag->fqdir->timeout = IPV6_FRAG_TIMEOUT;
516
517 res = nf_ct_frag6_sysctl_register(net);
518 if (res < 0)
519 fqdir_exit(nf_frag->fqdir);
520 return res;
521 }
522
nf_ct_net_pre_exit(struct net * net)523 static void nf_ct_net_pre_exit(struct net *net)
524 {
525 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
526
527 fqdir_pre_exit(nf_frag->fqdir);
528 }
529
nf_ct_net_exit(struct net * net)530 static void nf_ct_net_exit(struct net *net)
531 {
532 struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net);
533
534 nf_ct_frags6_sysctl_unregister(net);
535 fqdir_exit(nf_frag->fqdir);
536 }
537
538 static struct pernet_operations nf_ct_net_ops = {
539 .init = nf_ct_net_init,
540 .pre_exit = nf_ct_net_pre_exit,
541 .exit = nf_ct_net_exit,
542 .id = &nf_frag_pernet_id,
543 .size = sizeof(struct nft_ct_frag6_pernet),
544 };
545
546 static const struct rhashtable_params nfct_rhash_params = {
547 .head_offset = offsetof(struct inet_frag_queue, node),
548 .hashfn = ip6frag_key_hashfn,
549 .obj_hashfn = ip6frag_obj_hashfn,
550 .obj_cmpfn = ip6frag_obj_cmpfn,
551 .automatic_shrinking = true,
552 };
553
nf_ct_frag6_init(void)554 int nf_ct_frag6_init(void)
555 {
556 int ret = 0;
557
558 nf_frags.constructor = ip6frag_init;
559 nf_frags.destructor = NULL;
560 nf_frags.qsize = sizeof(struct frag_queue);
561 nf_frags.frag_expire = nf_ct_frag6_expire;
562 nf_frags.frags_cache_name = nf_frags_cache_name;
563 nf_frags.rhash_params = nfct_rhash_params;
564 ret = inet_frags_init(&nf_frags);
565 if (ret)
566 goto out;
567 ret = register_pernet_subsys(&nf_ct_net_ops);
568 if (ret)
569 inet_frags_fini(&nf_frags);
570
571 out:
572 return ret;
573 }
574
nf_ct_frag6_cleanup(void)575 void nf_ct_frag6_cleanup(void)
576 {
577 unregister_pernet_subsys(&nf_ct_net_ops);
578 inet_frags_fini(&nf_frags);
579 }
580