xref: /freebsd/sys/netinet/ip_id.c (revision ec4deee4e4f2aef1b97d9424f25d04e91fd7dc10)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Michael J. Silbersack.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 /*
33  * IP ID generation is a fascinating topic.
34  *
35  * In order to avoid ID collisions during packet reassembly, common sense
36  * dictates that the period between reuse of IDs be as large as possible.
37  * This leads to the classic implementation of a system-wide counter, thereby
38  * ensuring that IDs repeat only once every 2^16 packets.
39  *
40  * Subsequent security researchers have pointed out that using a global
41  * counter makes ID values predictable.  This predictability allows traffic
42  * analysis, idle scanning, and even packet injection in specific cases.
43  * These results suggest that IP IDs should be as random as possible.
44  *
45  * The "searchable queues" algorithm used in this IP ID implementation was
46  * proposed by Amit Klein.  It is a compromise between the above two
47  * viewpoints that has provable behavior that can be tuned to the user's
48  * requirements.
49  *
50  * The basic concept is that we supplement a standard random number generator
51  * with a queue of the last L IDs that we have handed out to ensure that all
52  * IDs have a period of at least L.
53  *
54  * To efficiently implement this idea, we keep two data structures: a
55  * circular array of IDs of size L and a bitstring of 65536 bits.
56  *
57  * To start, we ask the RNG for a new ID.  A quick index into the bitstring
58  * is used to determine if this is a recently used value.  The process is
59  * repeated until a value is returned that is not in the bitstring.
60  *
61  * Having found a usable ID, we remove the ID stored at the current position
62  * in the queue from the bitstring and replace it with our new ID.  Our new
63  * ID is then added to the bitstring and the queue pointer is incremented.
64  *
65  * The lower limit of 512 was chosen because there doesn't seem to be much
66  * point to having a smaller value.  The upper limit of 32768 was chosen for
67  * two reasons.  First, every step above 32768 decreases the entropy.  Taken
68  * to an extreme, 65533 would offer 1 bit of entropy.  Second, the number of
69  * attempts it takes the algorithm to find an unused ID drastically
70  * increases, killing performance.  The default value of 8192 was chosen
71  * because it provides a good tradeoff between randomness and non-repetition.
72  *
73  * With L=8192, the queue will use 16K of memory.  The bitstring always
74  * uses 8K of memory.  No memory is allocated until the use of random ids is
75  * enabled.
76  */
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/counter.h>
81 #include <sys/kernel.h>
82 #include <sys/malloc.h>
83 #include <sys/lock.h>
84 #include <sys/mutex.h>
85 #include <sys/random.h>
86 #include <sys/smp.h>
87 #include <sys/sysctl.h>
88 #include <sys/bitstring.h>
89 
90 #include <net/vnet.h>
91 
92 #include <netinet/in.h>
93 #include <netinet/ip.h>
94 #include <netinet/ip_var.h>
95 
96 /*
97  * By default we generate IP ID only for non-atomic datagrams, as
98  * suggested by RFC6864.  We use per-CPU counter for that, or if
99  * user wants to, we can turn on random ID generation.
100  */
101 VNET_DEFINE_STATIC(int, ip_rfc6864) = 1;
102 VNET_DEFINE_STATIC(int, ip_do_randomid) = 0;
103 #define	V_ip_rfc6864		VNET(ip_rfc6864)
104 #define	V_ip_do_randomid	VNET(ip_do_randomid)
105 
106 /*
107  * Random ID state engine.
108  */
109 static MALLOC_DEFINE(M_IPID, "ipid", "randomized ip id state");
110 VNET_DEFINE_STATIC(uint16_t *, id_array);
111 VNET_DEFINE_STATIC(bitstr_t *, id_bits);
112 VNET_DEFINE_STATIC(int, array_ptr);
113 VNET_DEFINE_STATIC(int, array_size);
114 VNET_DEFINE_STATIC(int, random_id_collisions);
115 VNET_DEFINE_STATIC(int, random_id_total);
116 VNET_DEFINE_STATIC(struct mtx, ip_id_mtx);
117 #define	V_id_array	VNET(id_array)
118 #define	V_id_bits	VNET(id_bits)
119 #define	V_array_ptr	VNET(array_ptr)
120 #define	V_array_size	VNET(array_size)
121 #define	V_random_id_collisions	VNET(random_id_collisions)
122 #define	V_random_id_total	VNET(random_id_total)
123 #define	V_ip_id_mtx	VNET(ip_id_mtx)
124 
125 /*
126  * Non-random ID state engine is simply a per-cpu counter.
127  */
128 VNET_DEFINE_STATIC(counter_u64_t, ip_id);
129 #define	V_ip_id		VNET(ip_id)
130 
131 static int	sysctl_ip_randomid(SYSCTL_HANDLER_ARGS);
132 static int	sysctl_ip_id_change(SYSCTL_HANDLER_ARGS);
133 static void	ip_initid(int);
134 static uint16_t ip_randomid(void);
135 static void	ipid_sysinit(void);
136 static void	ipid_sysuninit(void);
137 
138 SYSCTL_DECL(_net_inet_ip);
139 SYSCTL_PROC(_net_inet_ip, OID_AUTO, random_id,
140     CTLTYPE_INT | CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_MPSAFE,
141     &VNET_NAME(ip_do_randomid), 0, sysctl_ip_randomid, "IU",
142     "Assign random ip_id values");
143 SYSCTL_INT(_net_inet_ip, OID_AUTO, rfc6864, CTLFLAG_VNET | CTLFLAG_RW,
144     &VNET_NAME(ip_rfc6864), 0,
145     "Use constant IP ID for atomic datagrams");
146 SYSCTL_PROC(_net_inet_ip, OID_AUTO, random_id_period,
147     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_MPSAFE,
148     &VNET_NAME(array_size), 0, sysctl_ip_id_change, "IU", "IP ID Array size");
149 SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id_collisions,
150     CTLFLAG_RD | CTLFLAG_VNET,
151     &VNET_NAME(random_id_collisions), 0, "Count of IP ID collisions");
152 SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id_total, CTLFLAG_RD | CTLFLAG_VNET,
153     &VNET_NAME(random_id_total), 0, "Count of IP IDs created");
154 
155 static int
156 sysctl_ip_randomid(SYSCTL_HANDLER_ARGS)
157 {
158 	int error, new;
159 
160 	new = V_ip_do_randomid;
161 	error = sysctl_handle_int(oidp, &new, 0, req);
162 	if (error || req->newptr == NULL)
163 		return (error);
164 	if (new != 0 && new != 1)
165 		return (EINVAL);
166 	if (new == V_ip_do_randomid)
167 		return (0);
168 	if (new == 1 && V_ip_do_randomid == 0)
169 		ip_initid(8192);
170 	/* We don't free memory when turning random ID off, due to race. */
171 	V_ip_do_randomid = new;
172 	return (0);
173 }
174 
175 static int
176 sysctl_ip_id_change(SYSCTL_HANDLER_ARGS)
177 {
178 	int error, new;
179 
180 	new = V_array_size;
181 	error = sysctl_handle_int(oidp, &new, 0, req);
182 	if (error == 0 && req->newptr) {
183 		if (new >= 512 && new <= 32768)
184 			ip_initid(new);
185 		else
186 			error = EINVAL;
187 	}
188 	return (error);
189 }
190 
191 static void
192 ip_initid(int new_size)
193 {
194 	uint16_t *new_array;
195 	bitstr_t *new_bits;
196 
197 	new_array = malloc(new_size * sizeof(uint16_t), M_IPID,
198 	    M_WAITOK | M_ZERO);
199 	new_bits = malloc(bitstr_size(65536), M_IPID, M_WAITOK | M_ZERO);
200 
201 	mtx_lock(&V_ip_id_mtx);
202 	if (V_id_array != NULL) {
203 		free(V_id_array, M_IPID);
204 		free(V_id_bits, M_IPID);
205 	}
206 	V_id_array = new_array;
207 	V_id_bits = new_bits;
208 	V_array_size = new_size;
209 	V_array_ptr = 0;
210 	V_random_id_collisions = 0;
211 	V_random_id_total = 0;
212 	mtx_unlock(&V_ip_id_mtx);
213 }
214 
215 static uint16_t
216 ip_randomid(void)
217 {
218 	uint16_t new_id;
219 
220 	mtx_lock(&V_ip_id_mtx);
221 	/*
222 	 * To avoid a conflict with the zeros that the array is initially
223 	 * filled with, we never hand out an id of zero.
224 	 */
225 	new_id = 0;
226 	do {
227 		if (new_id != 0)
228 			V_random_id_collisions++;
229 		arc4rand(&new_id, sizeof(new_id), 0);
230 	} while (bit_test(V_id_bits, new_id) || new_id == 0);
231 	bit_clear(V_id_bits, V_id_array[V_array_ptr]);
232 	bit_set(V_id_bits, new_id);
233 	V_id_array[V_array_ptr] = new_id;
234 	V_array_ptr++;
235 	if (V_array_ptr == V_array_size)
236 		V_array_ptr = 0;
237 	V_random_id_total++;
238 	mtx_unlock(&V_ip_id_mtx);
239 	return (new_id);
240 }
241 
242 void
243 ip_fillid(struct ip *ip)
244 {
245 
246 	/*
247 	 * Per RFC6864 Section 4
248 	 *
249 	 * o  Atomic datagrams: (DF==1) && (MF==0) && (frag_offset==0)
250 	 * o  Non-atomic datagrams: (DF==0) || (MF==1) || (frag_offset>0)
251 	 */
252 	if (V_ip_rfc6864 && (ip->ip_off & htons(IP_DF)) == htons(IP_DF))
253 		ip->ip_id = 0;
254 	else if (V_ip_do_randomid)
255 		ip->ip_id = ip_randomid();
256 	else {
257 		counter_u64_add(V_ip_id, 1);
258 		/*
259 		 * There are two issues about this trick, to be kept in mind.
260 		 * 1) We can migrate between counter_u64_add() and next
261 		 *    line, and grab counter from other CPU, resulting in too
262 		 *    quick ID reuse. This is tolerable in our particular case,
263 		 *    since probability of such event is much lower then reuse
264 		 *    of ID due to legitimate overflow, that at modern Internet
265 		 *    speeds happens all the time.
266 		 * 2) We are relying on the fact that counter(9) is based on
267 		 *    UMA_ZONE_PCPU uma(9) zone. We also take only last
268 		 *    sixteen bits of a counter, so we don't care about the
269 		 *    fact that machines with 32-bit word update their counters
270 		 *    not atomically.
271 		 */
272 		ip->ip_id = htons((*(uint64_t *)zpcpu_get(V_ip_id)) & 0xffff);
273 	}
274 }
275 
276 static void
277 ipid_sysinit(void)
278 {
279 	int i;
280 
281 	mtx_init(&V_ip_id_mtx, "ip_id_mtx", NULL, MTX_DEF);
282 	V_ip_id = counter_u64_alloc(M_WAITOK);
283 
284 	CPU_FOREACH(i)
285 		arc4rand(zpcpu_get_cpu(V_ip_id, i), sizeof(uint64_t), 0);
286 }
287 VNET_SYSINIT(ip_id, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY, ipid_sysinit, NULL);
288 
289 static void
290 ipid_sysuninit(void)
291 {
292 
293 	if (V_id_array != NULL) {
294 		free(V_id_array, M_IPID);
295 		free(V_id_bits, M_IPID);
296 	}
297 	counter_u64_free(V_ip_id);
298 	mtx_destroy(&V_ip_id_mtx);
299 }
300 VNET_SYSUNINIT(ip_id, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ipid_sysuninit, NULL);
301