1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008 Michael J. Silbersack.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * IP ID generation is a fascinating topic.
31 *
32 * In order to avoid ID collisions during packet reassembly, common sense
33 * dictates that the period between reuse of IDs be as large as possible.
34 * This leads to the classic implementation of a system-wide counter, thereby
35 * ensuring that IDs repeat only once every 2^16 packets.
36 *
37 * Subsequent security researchers have pointed out that using a global
38 * counter makes ID values predictable. This predictability allows traffic
39 * analysis, idle scanning, and even packet injection in specific cases.
40 * These results suggest that IP IDs should be as random as possible.
41 *
42 * The "searchable queues" algorithm used in this IP ID implementation was
43 * proposed by Amit Klein. It is a compromise between the above two
44 * viewpoints that has provable behavior that can be tuned to the user's
45 * requirements.
46 *
47 * The basic concept is that we supplement a standard random number generator
48 * with a queue of the last L IDs that we have handed out to ensure that all
49 * IDs have a period of at least L.
50 *
51 * To efficiently implement this idea, we keep two data structures: a
52 * circular array of IDs of size L and a bitstring of 65536 bits.
53 *
54 * To start, we ask the RNG for a new ID. A quick index into the bitstring
55 * is used to determine if this is a recently used value. The process is
56 * repeated until a value is returned that is not in the bitstring.
57 *
58 * Having found a usable ID, we remove the ID stored at the current position
59 * in the queue from the bitstring and replace it with our new ID. Our new
60 * ID is then added to the bitstring and the queue pointer is incremented.
61 *
62 * The lower limit of 512 was chosen because there doesn't seem to be much
63 * point to having a smaller value. The upper limit of 32768 was chosen for
64 * two reasons. First, every step above 32768 decreases the entropy. Taken
65 * to an extreme, 65533 would offer 1 bit of entropy. Second, the number of
66 * attempts it takes the algorithm to find an unused ID drastically
67 * increases, killing performance. The default value of 8192 was chosen
68 * because it provides a good tradeoff between randomness and non-repetition.
69 *
70 * With L=8192, the queue will use 16K of memory. The bitstring always
71 * uses 8K of memory. No memory is allocated until the use of random ids is
72 * enabled.
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/counter.h>
78 #include <sys/kernel.h>
79 #include <sys/malloc.h>
80 #include <sys/lock.h>
81 #include <sys/mutex.h>
82 #include <sys/random.h>
83 #include <sys/smp.h>
84 #include <sys/sysctl.h>
85 #include <sys/bitstring.h>
86
87 #include <net/vnet.h>
88
89 #include <netinet/in.h>
90 #include <netinet/ip.h>
91 #include <netinet/ip_var.h>
92
93 /*
94 * By default we generate IP ID only for non-atomic datagrams, as
95 * suggested by RFC6864. We use per-CPU counter for that, or if
96 * user wants to, we can turn on random ID generation.
97 */
98 VNET_DEFINE_STATIC(int, ip_rfc6864) = 1;
99 #define V_ip_rfc6864 VNET(ip_rfc6864)
100
101 VNET_DEFINE(int, ip_random_id) = 0;
102
103 /*
104 * Random ID state engine.
105 */
106 static MALLOC_DEFINE(M_IPID, "ipid", "randomized ip id state");
107 VNET_DEFINE_STATIC(uint16_t *, id_array);
108 VNET_DEFINE_STATIC(bitstr_t *, id_bits);
109 VNET_DEFINE_STATIC(int, array_ptr);
110 VNET_DEFINE_STATIC(int, array_size);
111 VNET_DEFINE_STATIC(int, random_id_collisions);
112 VNET_DEFINE_STATIC(int, random_id_total);
113 VNET_DEFINE_STATIC(struct mtx, ip_id_mtx);
114 #define V_id_array VNET(id_array)
115 #define V_id_bits VNET(id_bits)
116 #define V_array_ptr VNET(array_ptr)
117 #define V_array_size VNET(array_size)
118 #define V_random_id_collisions VNET(random_id_collisions)
119 #define V_random_id_total VNET(random_id_total)
120 #define V_ip_id_mtx VNET(ip_id_mtx)
121
122 /*
123 * Non-random ID state engine is simply a per-cpu counter.
124 */
125 VNET_DEFINE_STATIC(counter_u64_t, ip_id);
126 #define V_ip_id VNET(ip_id)
127
128 static int sysctl_ip_random_id(SYSCTL_HANDLER_ARGS);
129 static int sysctl_ip_id_change(SYSCTL_HANDLER_ARGS);
130 static void ip_initid(int);
131 static uint16_t ip_randomid(void);
132 static void ipid_sysinit(void);
133 static void ipid_sysuninit(void);
134
135 SYSCTL_DECL(_net_inet_ip);
136 SYSCTL_PROC(_net_inet_ip, OID_AUTO, random_id,
137 CTLTYPE_INT | CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_MPSAFE,
138 &VNET_NAME(ip_random_id), 0, sysctl_ip_random_id, "IU",
139 "Assign random ip_id values");
140 SYSCTL_INT(_net_inet_ip, OID_AUTO, rfc6864, CTLFLAG_VNET | CTLFLAG_RW,
141 &VNET_NAME(ip_rfc6864), 0,
142 "Use constant IP ID for atomic datagrams");
143 SYSCTL_PROC(_net_inet_ip, OID_AUTO, random_id_period,
144 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_MPSAFE,
145 &VNET_NAME(array_size), 0, sysctl_ip_id_change, "IU", "IP ID Array size");
146 SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id_collisions,
147 CTLFLAG_RD | CTLFLAG_VNET,
148 &VNET_NAME(random_id_collisions), 0, "Count of IP ID collisions");
149 SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id_total, CTLFLAG_RD | CTLFLAG_VNET,
150 &VNET_NAME(random_id_total), 0, "Count of IP IDs created");
151
152 static int
sysctl_ip_random_id(SYSCTL_HANDLER_ARGS)153 sysctl_ip_random_id(SYSCTL_HANDLER_ARGS)
154 {
155 int error, new;
156
157 new = V_ip_random_id;
158 error = sysctl_handle_int(oidp, &new, 0, req);
159 if (error || req->newptr == NULL)
160 return (error);
161 if (new != 0 && new != 1)
162 return (EINVAL);
163 if (new == V_ip_random_id)
164 return (0);
165 if (new == 1 && V_ip_random_id == 0)
166 ip_initid(8192);
167 /* We don't free memory when turning random ID off, due to race. */
168 V_ip_random_id = new;
169 return (0);
170 }
171
172 static int
sysctl_ip_id_change(SYSCTL_HANDLER_ARGS)173 sysctl_ip_id_change(SYSCTL_HANDLER_ARGS)
174 {
175 int error, new;
176
177 new = V_array_size;
178 error = sysctl_handle_int(oidp, &new, 0, req);
179 if (error == 0 && req->newptr) {
180 if (new >= 512 && new <= 32768)
181 ip_initid(new);
182 else
183 error = EINVAL;
184 }
185 return (error);
186 }
187
188 static void
ip_initid(int new_size)189 ip_initid(int new_size)
190 {
191 uint16_t *new_array;
192 bitstr_t *new_bits;
193
194 new_array = malloc(new_size * sizeof(uint16_t), M_IPID,
195 M_WAITOK | M_ZERO);
196 new_bits = malloc(bitstr_size(65536), M_IPID, M_WAITOK | M_ZERO);
197
198 mtx_lock(&V_ip_id_mtx);
199 if (V_id_array != NULL) {
200 free(V_id_array, M_IPID);
201 free(V_id_bits, M_IPID);
202 }
203 V_id_array = new_array;
204 V_id_bits = new_bits;
205 V_array_size = new_size;
206 V_array_ptr = 0;
207 V_random_id_collisions = 0;
208 V_random_id_total = 0;
209 mtx_unlock(&V_ip_id_mtx);
210 }
211
212 static uint16_t
ip_randomid(void)213 ip_randomid(void)
214 {
215 uint16_t new_id;
216
217 mtx_lock(&V_ip_id_mtx);
218 /*
219 * To avoid a conflict with the zeros that the array is initially
220 * filled with, we never hand out an id of zero.
221 */
222 new_id = 0;
223 do {
224 if (new_id != 0)
225 V_random_id_collisions++;
226 arc4rand(&new_id, sizeof(new_id), 0);
227 } while (bit_test(V_id_bits, new_id) || new_id == 0);
228 bit_clear(V_id_bits, V_id_array[V_array_ptr]);
229 bit_set(V_id_bits, new_id);
230 V_id_array[V_array_ptr] = new_id;
231 V_array_ptr++;
232 if (V_array_ptr == V_array_size)
233 V_array_ptr = 0;
234 V_random_id_total++;
235 mtx_unlock(&V_ip_id_mtx);
236 return (new_id);
237 }
238
239 void
ip_fillid(struct ip * ip,bool do_randomid)240 ip_fillid(struct ip *ip, bool do_randomid)
241 {
242
243 /*
244 * Per RFC6864 Section 4
245 *
246 * o Atomic datagrams: (DF==1) && (MF==0) && (frag_offset==0)
247 * o Non-atomic datagrams: (DF==0) || (MF==1) || (frag_offset>0)
248 */
249 if (V_ip_rfc6864 && (ip->ip_off & htons(IP_DF)) == htons(IP_DF))
250 ip->ip_id = 0;
251 else if (do_randomid)
252 ip->ip_id = ip_randomid();
253 else {
254 counter_u64_add(V_ip_id, 1);
255 /*
256 * There are two issues about this trick, to be kept in mind.
257 * 1) We can migrate between counter_u64_add() and next
258 * line, and grab counter from other CPU, resulting in too
259 * quick ID reuse. This is tolerable in our particular case,
260 * since probability of such event is much lower then reuse
261 * of ID due to legitimate overflow, that at modern Internet
262 * speeds happens all the time.
263 * 2) We are relying on the fact that counter(9) is based on
264 * UMA_ZONE_PCPU uma(9) zone. We also take only last
265 * sixteen bits of a counter, so we don't care about the
266 * fact that machines with 32-bit word update their counters
267 * not atomically.
268 */
269 ip->ip_id = htons((*(uint64_t *)zpcpu_get(V_ip_id)) & 0xffff);
270 }
271 }
272
273 static void
ipid_sysinit(void)274 ipid_sysinit(void)
275 {
276 int i;
277
278 mtx_init(&V_ip_id_mtx, "ip_id_mtx", NULL, MTX_DEF);
279 V_ip_id = counter_u64_alloc(M_WAITOK);
280
281 CPU_FOREACH(i)
282 arc4rand(zpcpu_get_cpu(V_ip_id, i), sizeof(uint64_t), 0);
283 }
284 VNET_SYSINIT(ip_id, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY, ipid_sysinit, NULL);
285
286 static void
ipid_sysuninit(void)287 ipid_sysuninit(void)
288 {
289
290 if (V_id_array != NULL) {
291 free(V_id_array, M_IPID);
292 free(V_id_bits, M_IPID);
293 }
294 counter_u64_free(V_ip_id);
295 mtx_destroy(&V_ip_id_mtx);
296 }
297 VNET_SYSUNINIT(ip_id, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ipid_sysuninit, NULL);
298