xref: /linux/include/net/ipv6_frag.h (revision b61104e7a6349bd2c2b3e2fb3260d87f15eda8f4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _IPV6_FRAG_H
3 #define _IPV6_FRAG_H
4 #include <linux/icmpv6.h>
5 #include <linux/kernel.h>
6 #include <net/addrconf.h>
7 #include <net/ipv6.h>
8 #include <net/inet_frag.h>
9 
10 enum ip6_defrag_users {
11 	IP6_DEFRAG_LOCAL_DELIVER,
12 	IP6_DEFRAG_CONNTRACK_IN,
13 	__IP6_DEFRAG_CONNTRACK_IN	= IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
14 	IP6_DEFRAG_CONNTRACK_OUT,
15 	__IP6_DEFRAG_CONNTRACK_OUT	= IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
16 	IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
17 	__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
18 };
19 
20 /*
21  *	Equivalent of ipv4 struct ip
22  */
23 struct frag_queue {
24 	struct inet_frag_queue	q;
25 
26 	int			iif;
27 	__u16			nhoffset;
28 	u8			ecn;
29 };
30 
31 #if IS_ENABLED(CONFIG_IPV6)
32 static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
33 {
34 	struct frag_queue *fq = container_of(q, struct frag_queue, q);
35 	const struct frag_v6_compare_key *key = a;
36 
37 	q->key.v6 = *key;
38 	fq->ecn = 0;
39 }
40 
41 static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
42 {
43 	return jhash2(data,
44 		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
45 }
46 
47 static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
48 {
49 	const struct inet_frag_queue *fq = data;
50 
51 	return jhash2((const u32 *)&fq->key.v6,
52 		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
53 }
54 
55 static inline int
56 ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
57 {
58 	const struct frag_v6_compare_key *key = arg->key;
59 	const struct inet_frag_queue *fq = ptr;
60 
61 	return !!memcmp(&fq->key, key, sizeof(*key));
62 }
63 
64 static inline void
65 ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
66 {
67 	struct net_device *dev = NULL;
68 	struct sk_buff *head;
69 	int refs = 1;
70 
71 	rcu_read_lock();
72 	spin_lock(&fq->q.lock);
73 
74 	if (fq->q.flags & INET_FRAG_COMPLETE)
75 		goto out;
76 
77 	fq->q.flags |= INET_FRAG_DROP;
78 	inet_frag_kill(&fq->q, &refs);
79 
80 	/* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
81 	if (READ_ONCE(fq->q.fqdir->dead)) {
82 		inet_frag_queue_flush(&fq->q, 0);
83 		goto out;
84 	}
85 
86 	dev = dev_get_by_index_rcu(net, fq->iif);
87 	if (!dev)
88 		goto out;
89 
90 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
91 	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
92 
93 	/* Don't send error if the first segment did not arrive. */
94 	if (!(fq->q.flags & INET_FRAG_FIRST_IN))
95 		goto out;
96 
97 	/* sk_buff::dev and sk_buff::rbnode are unionized. So we
98 	 * pull the head out of the tree in order to be able to
99 	 * deal with head->dev.
100 	 */
101 	head = inet_frag_pull_head(&fq->q);
102 	if (!head)
103 		goto out;
104 
105 	head->dev = dev;
106 	spin_unlock(&fq->q.lock);
107 
108 	icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
109 	kfree_skb_reason(head, SKB_DROP_REASON_FRAG_REASM_TIMEOUT);
110 	goto out_rcu_unlock;
111 
112 out:
113 	spin_unlock(&fq->q.lock);
114 out_rcu_unlock:
115 	rcu_read_unlock();
116 	inet_frag_putn(&fq->q, refs);
117 }
118 
119 /* Check if the upper layer header is truncated in the first fragment. */
120 static inline bool
121 ipv6frag_thdr_truncated(struct sk_buff *skb, int start, u8 *nexthdrp)
122 {
123 	u8 nexthdr = *nexthdrp;
124 	__be16 frag_off;
125 	int offset;
126 
127 	offset = ipv6_skip_exthdr(skb, start, &nexthdr, &frag_off);
128 	if (offset < 0 || (frag_off & htons(IP6_OFFSET)))
129 		return false;
130 	switch (nexthdr) {
131 	case NEXTHDR_TCP:
132 		offset += sizeof(struct tcphdr);
133 		break;
134 	case NEXTHDR_UDP:
135 		offset += sizeof(struct udphdr);
136 		break;
137 	case NEXTHDR_ICMP:
138 		offset += sizeof(struct icmp6hdr);
139 		break;
140 	default:
141 		offset += 1;
142 	}
143 	if (offset > skb->len)
144 		return true;
145 	return false;
146 }
147 
148 #endif
149 #endif
150