1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3
4 #define MAX_INSNS 512
5 #define MAX_MATCHES 24
6
7 struct bpf_reg_match {
8 unsigned int line;
9 const char *reg;
10 const char *match;
11 };
12
13 struct bpf_align_test {
14 const char *descr;
15 struct bpf_insn insns[MAX_INSNS];
16 enum {
17 UNDEF,
18 ACCEPT,
19 REJECT
20 } result;
21 enum bpf_prog_type prog_type;
22 /* Matches must be in order of increasing line */
23 struct bpf_reg_match matches[MAX_MATCHES];
24 };
25
26 static struct bpf_align_test tests[] = {
27 /* Four tests of known constants. These aren't staggeringly
28 * interesting since we track exact values now.
29 */
30 {
31 .descr = "mov",
32 .insns = {
33 BPF_MOV64_IMM(BPF_REG_3, 2),
34 BPF_MOV64_IMM(BPF_REG_3, 4),
35 BPF_MOV64_IMM(BPF_REG_3, 8),
36 BPF_MOV64_IMM(BPF_REG_3, 16),
37 BPF_MOV64_IMM(BPF_REG_3, 32),
38 BPF_MOV64_IMM(BPF_REG_0, 0),
39 BPF_EXIT_INSN(),
40 },
41 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
42 .matches = {
43 {0, "R1", "ctx()"},
44 {0, "R10", "fp0"},
45 {0, "R3_w", "2"},
46 {1, "R3_w", "4"},
47 {2, "R3_w", "8"},
48 {3, "R3_w", "16"},
49 {4, "R3_w", "32"},
50 },
51 },
52 {
53 .descr = "shift",
54 .insns = {
55 BPF_MOV64_IMM(BPF_REG_3, 1),
56 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
57 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
58 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
59 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
60 BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
61 BPF_MOV64_IMM(BPF_REG_4, 32),
62 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
63 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
64 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
65 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
66 BPF_MOV64_IMM(BPF_REG_0, 0),
67 BPF_EXIT_INSN(),
68 },
69 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
70 .matches = {
71 {0, "R1", "ctx()"},
72 {0, "R10", "fp0"},
73 {0, "R3_w", "1"},
74 {1, "R3_w", "2"},
75 {2, "R3_w", "4"},
76 {3, "R3_w", "8"},
77 {4, "R3_w", "16"},
78 {5, "R3_w", "1"},
79 {6, "R4_w", "32"},
80 {7, "R4_w", "16"},
81 {8, "R4_w", "8"},
82 {9, "R4_w", "4"},
83 {10, "R4_w", "2"},
84 },
85 },
86 {
87 .descr = "addsub",
88 .insns = {
89 BPF_MOV64_IMM(BPF_REG_3, 4),
90 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
91 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
92 BPF_MOV64_IMM(BPF_REG_4, 8),
93 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
94 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
95 BPF_MOV64_IMM(BPF_REG_0, 0),
96 BPF_EXIT_INSN(),
97 },
98 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
99 .matches = {
100 {0, "R1", "ctx()"},
101 {0, "R10", "fp0"},
102 {0, "R3_w", "4"},
103 {1, "R3_w", "8"},
104 {2, "R3_w", "10"},
105 {3, "R4_w", "8"},
106 {4, "R4_w", "12"},
107 {5, "R4_w", "14"},
108 },
109 },
110 {
111 .descr = "mul",
112 .insns = {
113 BPF_MOV64_IMM(BPF_REG_3, 7),
114 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
115 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
116 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
117 BPF_MOV64_IMM(BPF_REG_0, 0),
118 BPF_EXIT_INSN(),
119 },
120 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
121 .matches = {
122 {0, "R1", "ctx()"},
123 {0, "R10", "fp0"},
124 {0, "R3_w", "7"},
125 {1, "R3_w", "7"},
126 {2, "R3_w", "14"},
127 {3, "R3_w", "56"},
128 },
129 },
130
131 /* Tests using unknown values */
132 #define PREP_PKT_POINTERS \
133 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
134 offsetof(struct __sk_buff, data)), \
135 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
136 offsetof(struct __sk_buff, data_end))
137
138 #define LOAD_UNKNOWN(DST_REG) \
139 PREP_PKT_POINTERS, \
140 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
141 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
142 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
143 BPF_EXIT_INSN(), \
144 BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
145
146 {
147 .descr = "unknown shift",
148 .insns = {
149 LOAD_UNKNOWN(BPF_REG_3),
150 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
151 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
152 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
153 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
154 LOAD_UNKNOWN(BPF_REG_4),
155 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
156 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
157 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
158 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
159 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
160 BPF_MOV64_IMM(BPF_REG_0, 0),
161 BPF_EXIT_INSN(),
162 },
163 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
164 .matches = {
165 {6, "R0_w", "pkt(off=8,r=8)"},
166 {6, "R3_w", "var_off=(0x0; 0xff)"},
167 {7, "R3_w", "var_off=(0x0; 0x1fe)"},
168 {8, "R3_w", "var_off=(0x0; 0x3fc)"},
169 {9, "R3_w", "var_off=(0x0; 0x7f8)"},
170 {10, "R3_w", "var_off=(0x0; 0xff0)"},
171 {12, "R3_w", "pkt_end()"},
172 {17, "R4_w", "var_off=(0x0; 0xff)"},
173 {18, "R4_w", "var_off=(0x0; 0x1fe0)"},
174 {19, "R4_w", "var_off=(0x0; 0xff0)"},
175 {20, "R4_w", "var_off=(0x0; 0x7f8)"},
176 {21, "R4_w", "var_off=(0x0; 0x3fc)"},
177 {22, "R4_w", "var_off=(0x0; 0x1fe)"},
178 },
179 },
180 {
181 .descr = "unknown mul",
182 .insns = {
183 LOAD_UNKNOWN(BPF_REG_3),
184 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
185 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
186 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
187 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
188 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
189 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
190 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
191 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
192 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
193 BPF_MOV64_IMM(BPF_REG_0, 0),
194 BPF_EXIT_INSN(),
195 },
196 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
197 .matches = {
198 {6, "R3_w", "var_off=(0x0; 0xff)"},
199 {7, "R4_w", "var_off=(0x0; 0xff)"},
200 {8, "R4_w", "var_off=(0x0; 0xff)"},
201 {9, "R4_w", "var_off=(0x0; 0xff)"},
202 {10, "R4_w", "var_off=(0x0; 0x1fe)"},
203 {11, "R4_w", "var_off=(0x0; 0xff)"},
204 {12, "R4_w", "var_off=(0x0; 0x3fc)"},
205 {13, "R4_w", "var_off=(0x0; 0xff)"},
206 {14, "R4_w", "var_off=(0x0; 0x7f8)"},
207 {15, "R4_w", "var_off=(0x0; 0xff0)"},
208 },
209 },
210 {
211 .descr = "packet const offset",
212 .insns = {
213 PREP_PKT_POINTERS,
214 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
215
216 BPF_MOV64_IMM(BPF_REG_0, 0),
217
218 /* Skip over ethernet header. */
219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
220 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
222 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
223 BPF_EXIT_INSN(),
224
225 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
226 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
227 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
228 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
229 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
230 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
231 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
232
233 BPF_MOV64_IMM(BPF_REG_0, 0),
234 BPF_EXIT_INSN(),
235 },
236 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
237 .matches = {
238 {2, "R5_w", "pkt(r=0)"},
239 {4, "R5_w", "pkt(off=14,r=0)"},
240 {5, "R4_w", "pkt(off=14,r=0)"},
241 {9, "R2", "pkt(r=18)"},
242 {10, "R5", "pkt(off=14,r=18)"},
243 {10, "R4_w", "var_off=(0x0; 0xff)"},
244 {13, "R4_w", "var_off=(0x0; 0xffff)"},
245 {14, "R4_w", "var_off=(0x0; 0xffff)"},
246 },
247 },
248 {
249 .descr = "packet variable offset",
250 .insns = {
251 LOAD_UNKNOWN(BPF_REG_6),
252 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
253
254 /* First, add a constant to the R5 packet pointer,
255 * then a variable with a known alignment.
256 */
257 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
258 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
259 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
260 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
262 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
263 BPF_EXIT_INSN(),
264 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
265
266 /* Now, test in the other direction. Adding first
267 * the variable offset to R5, then the constant.
268 */
269 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
270 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
271 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
273 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
275 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
276 BPF_EXIT_INSN(),
277 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
278
279 /* Test multiple accumulations of unknown values
280 * into a packet pointer.
281 */
282 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
284 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
285 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
286 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
287 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
288 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
290 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
291 BPF_EXIT_INSN(),
292 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
293
294 BPF_MOV64_IMM(BPF_REG_0, 0),
295 BPF_EXIT_INSN(),
296 },
297 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
298 .matches = {
299 /* Calculated offset in R6 has unknown value, but known
300 * alignment of 4.
301 */
302 {6, "R2_w", "pkt(r=8)"},
303 {7, "R6_w", "var_off=(0x0; 0x3fc)"},
304 /* Offset is added to packet pointer R5, resulting in
305 * known fixed offset, and variable offset from R6.
306 */
307 {11, "R5_w", "pkt(id=1,off=14,"},
308 /* At the time the word size load is performed from R5,
309 * it's total offset is NET_IP_ALIGN + reg->off (0) +
310 * reg->aux_off (14) which is 16. Then the variable
311 * offset is considered using reg->aux_off_align which
312 * is 4 and meets the load's requirements.
313 */
314 {15, "R4", "var_off=(0x0; 0x3fc)"},
315 {15, "R5", "var_off=(0x0; 0x3fc)"},
316 /* Variable offset is added to R5 packet pointer,
317 * resulting in auxiliary alignment of 4. To avoid BPF
318 * verifier's precision backtracking logging
319 * interfering we also have a no-op R4 = R5
320 * instruction to validate R5 state. We also check
321 * that R4 is what it should be in such case.
322 */
323 {18, "R4_w", "var_off=(0x0; 0x3fc)"},
324 {18, "R5_w", "var_off=(0x0; 0x3fc)"},
325 /* Constant offset is added to R5, resulting in
326 * reg->off of 14.
327 */
328 {19, "R5_w", "pkt(id=2,off=14,"},
329 /* At the time the word size load is performed from R5,
330 * its total fixed offset is NET_IP_ALIGN + reg->off
331 * (14) which is 16. Then the variable offset is 4-byte
332 * aligned, so the total offset is 4-byte aligned and
333 * meets the load's requirements.
334 */
335 {24, "R4", "var_off=(0x0; 0x3fc)"},
336 {24, "R5", "var_off=(0x0; 0x3fc)"},
337 /* Constant offset is added to R5 packet pointer,
338 * resulting in reg->off value of 14.
339 */
340 {26, "R5_w", "pkt(off=14,r=8)"},
341 /* Variable offset is added to R5, resulting in a
342 * variable offset of (4n). See comment for insn #18
343 * for R4 = R5 trick.
344 */
345 {28, "R4_w", "var_off=(0x0; 0x3fc)"},
346 {28, "R5_w", "var_off=(0x0; 0x3fc)"},
347 /* Constant is added to R5 again, setting reg->off to 18. */
348 {29, "R5_w", "pkt(id=3,off=18,"},
349 /* And once more we add a variable; resulting var_off
350 * is still (4n), fixed offset is not changed.
351 * Also, we create a new reg->id.
352 */
353 {31, "R4_w", "var_off=(0x0; 0x7fc)"},
354 {31, "R5_w", "var_off=(0x0; 0x7fc)"},
355 /* At the time the word size load is performed from R5,
356 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
357 * which is 20. Then the variable offset is (4n), so
358 * the total offset is 4-byte aligned and meets the
359 * load's requirements.
360 */
361 {35, "R4", "var_off=(0x0; 0x7fc)"},
362 {35, "R5", "var_off=(0x0; 0x7fc)"},
363 },
364 },
365 {
366 .descr = "packet variable offset 2",
367 .insns = {
368 /* Create an unknown offset, (4n+2)-aligned */
369 LOAD_UNKNOWN(BPF_REG_6),
370 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
371 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
372 /* Add it to the packet pointer */
373 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
374 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
375 /* Check bounds and perform a read */
376 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
378 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
379 BPF_EXIT_INSN(),
380 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
381 /* Make a (4n) offset from the value we just read */
382 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff),
383 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
384 /* Add it to the packet pointer */
385 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
386 /* Check bounds and perform a read */
387 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
389 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
390 BPF_EXIT_INSN(),
391 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
392 BPF_MOV64_IMM(BPF_REG_0, 0),
393 BPF_EXIT_INSN(),
394 },
395 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
396 .matches = {
397 /* Calculated offset in R6 has unknown value, but known
398 * alignment of 4.
399 */
400 {6, "R2_w", "pkt(r=8)"},
401 {7, "R6_w", "var_off=(0x0; 0x3fc)"},
402 /* Adding 14 makes R6 be (4n+2) */
403 {8, "R6_w", "var_off=(0x2; 0x7fc)"},
404 /* Packet pointer has (4n+2) offset */
405 {11, "R5_w", "var_off=(0x2; 0x7fc)"},
406 {12, "R4", "var_off=(0x2; 0x7fc)"},
407 /* At the time the word size load is performed from R5,
408 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
409 * which is 2. Then the variable offset is (4n+2), so
410 * the total offset is 4-byte aligned and meets the
411 * load's requirements.
412 */
413 {15, "R5", "var_off=(0x2; 0x7fc)"},
414 /* Newly read value in R6 was shifted left by 2, so has
415 * known alignment of 4.
416 */
417 {17, "R6_w", "var_off=(0x0; 0x3fc)"},
418 /* Added (4n) to packet pointer's (4n+2) var_off, giving
419 * another (4n+2).
420 */
421 {19, "R5_w", "var_off=(0x2; 0xffc)"},
422 {20, "R4", "var_off=(0x2; 0xffc)"},
423 /* At the time the word size load is performed from R5,
424 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
425 * which is 2. Then the variable offset is (4n+2), so
426 * the total offset is 4-byte aligned and meets the
427 * load's requirements.
428 */
429 {23, "R5", "var_off=(0x2; 0xffc)"},
430 },
431 },
432 {
433 .descr = "dubious pointer arithmetic",
434 .insns = {
435 PREP_PKT_POINTERS,
436 BPF_MOV64_IMM(BPF_REG_0, 0),
437 /* (ptr - ptr) << 2 */
438 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
439 BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
440 BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
441 /* We have a (4n) value. Let's make a packet offset
442 * out of it. First add 14, to make it a (4n+2)
443 */
444 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
445 /* Then make sure it's nonnegative */
446 BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1),
447 BPF_EXIT_INSN(),
448 /* Add it to packet pointer */
449 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
450 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
451 /* Check bounds and perform a read */
452 BPF_MOV64_REG(BPF_REG_4, BPF_REG_6),
453 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
454 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
455 BPF_EXIT_INSN(),
456 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0),
457 BPF_EXIT_INSN(),
458 },
459 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
460 .result = REJECT,
461 .matches = {
462 {3, "R5_w", "pkt_end()"},
463 /* (ptr - ptr) << 2 == unknown, (4n) */
464 {5, "R5_w", "var_off=(0x0; 0xfffffffffffffffc)"},
465 /* (4n) + 14 == (4n+2). We blow our bounds, because
466 * the add could overflow.
467 */
468 {6, "R5_w", "var_off=(0x2; 0xfffffffffffffffc)"},
469 /* Checked s>=0 */
470 {9, "R5", "var_off=(0x2; 0x7ffffffffffffffc)"},
471 /* packet pointer + nonnegative (4n+2) */
472 {11, "R6_w", "var_off=(0x2; 0x7ffffffffffffffc)"},
473 {12, "R4_w", "var_off=(0x2; 0x7ffffffffffffffc)"},
474 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
475 * We checked the bounds, but it might have been able
476 * to overflow if the packet pointer started in the
477 * upper half of the address space.
478 * So we did not get a 'range' on R6, and the access
479 * attempt will fail.
480 */
481 {15, "R6_w", "var_off=(0x2; 0x7ffffffffffffffc)"},
482 }
483 },
484 {
485 .descr = "variable subtraction",
486 .insns = {
487 /* Create an unknown offset, (4n+2)-aligned */
488 LOAD_UNKNOWN(BPF_REG_6),
489 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
490 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
492 /* Create another unknown, (4n)-aligned, and subtract
493 * it from the first one
494 */
495 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
496 BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7),
497 /* Bounds-check the result */
498 BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1),
499 BPF_EXIT_INSN(),
500 /* Add it to the packet pointer */
501 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
502 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
503 /* Check bounds and perform a read */
504 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
506 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
507 BPF_EXIT_INSN(),
508 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
509 BPF_EXIT_INSN(),
510 },
511 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
512 .matches = {
513 /* Calculated offset in R6 has unknown value, but known
514 * alignment of 4.
515 */
516 {6, "R2_w", "pkt(r=8)"},
517 {8, "R6_w", "var_off=(0x0; 0x3fc)"},
518 /* Adding 14 makes R6 be (4n+2) */
519 {9, "R6_w", "var_off=(0x2; 0x7fc)"},
520 /* New unknown value in R7 is (4n) */
521 {10, "R7_w", "var_off=(0x0; 0x3fc)"},
522 /* Subtracting it from R6 blows our unsigned bounds */
523 {11, "R6", "var_off=(0x2; 0xfffffffffffffffc)"},
524 /* Checked s>= 0 */
525 {14, "R6", "var_off=(0x2; 0x7fc)"},
526 /* At the time the word size load is performed from R5,
527 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
528 * which is 2. Then the variable offset is (4n+2), so
529 * the total offset is 4-byte aligned and meets the
530 * load's requirements.
531 */
532 {20, "R5", "var_off=(0x2; 0x7fc)"},
533 },
534 },
535 {
536 .descr = "pointer variable subtraction",
537 .insns = {
538 /* Create an unknown offset, (4n+2)-aligned and bounded
539 * to [14,74]
540 */
541 LOAD_UNKNOWN(BPF_REG_6),
542 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
543 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf),
544 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
545 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
546 /* Subtract it from the packet pointer */
547 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
548 BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6),
549 /* Create another unknown, (4n)-aligned and >= 74.
550 * That in fact means >= 76, since 74 % 4 == 2
551 */
552 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
553 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76),
554 /* Add it to the packet pointer */
555 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7),
556 /* Check bounds and perform a read */
557 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
558 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
559 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
560 BPF_EXIT_INSN(),
561 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
562 BPF_EXIT_INSN(),
563 },
564 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
565 .matches = {
566 /* Calculated offset in R6 has unknown value, but known
567 * alignment of 4.
568 */
569 {6, "R2_w", "pkt(r=8)"},
570 {9, "R6_w", "var_off=(0x0; 0x3c)"},
571 /* Adding 14 makes R6 be (4n+2) */
572 {10, "R6_w", "var_off=(0x2; 0x7c)"},
573 /* Subtracting from packet pointer overflows ubounds */
574 {13, "R5_w", "var_off=(0xffffffffffffff82; 0x7c)"},
575 /* New unknown value in R7 is (4n), >= 76 */
576 {14, "R7_w", "var_off=(0x0; 0x7fc)"},
577 /* Adding it to packet pointer gives nice bounds again */
578 {16, "R5_w", "var_off=(0x2; 0x7fc)"},
579 /* At the time the word size load is performed from R5,
580 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
581 * which is 2. Then the variable offset is (4n+2), so
582 * the total offset is 4-byte aligned and meets the
583 * load's requirements.
584 */
585 {20, "R5", "var_off=(0x2; 0x7fc)"},
586 },
587 },
588 };
589
probe_filter_length(const struct bpf_insn * fp)590 static int probe_filter_length(const struct bpf_insn *fp)
591 {
592 int len;
593
594 for (len = MAX_INSNS - 1; len > 0; --len)
595 if (fp[len].code != 0 || fp[len].imm != 0)
596 break;
597 return len + 1;
598 }
599
600 static char bpf_vlog[32768];
601
do_test_single(struct bpf_align_test * test)602 static int do_test_single(struct bpf_align_test *test)
603 {
604 struct bpf_insn *prog = test->insns;
605 int prog_type = test->prog_type;
606 char bpf_vlog_copy[32768];
607 LIBBPF_OPTS(bpf_prog_load_opts, opts,
608 .prog_flags = BPF_F_STRICT_ALIGNMENT,
609 .log_buf = bpf_vlog,
610 .log_size = sizeof(bpf_vlog),
611 .log_level = 2,
612 );
613 const char *line_ptr;
614 int cur_line = -1;
615 int prog_len, i;
616 int fd_prog;
617 int ret;
618
619 prog_len = probe_filter_length(prog);
620 fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
621 prog, prog_len, &opts);
622 if (fd_prog < 0 && test->result != REJECT) {
623 printf("Failed to load program.\n");
624 printf("%s", bpf_vlog);
625 ret = 1;
626 } else if (fd_prog >= 0 && test->result == REJECT) {
627 printf("Unexpected success to load!\n");
628 printf("%s", bpf_vlog);
629 ret = 1;
630 close(fd_prog);
631 } else {
632 ret = 0;
633 /* We make a local copy so that we can strtok() it */
634 strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
635 line_ptr = strtok(bpf_vlog_copy, "\n");
636 for (i = 0; i < MAX_MATCHES; i++) {
637 struct bpf_reg_match m = test->matches[i];
638 const char *p;
639 int tmp;
640
641 if (!m.match)
642 break;
643 while (line_ptr) {
644 cur_line = -1;
645 sscanf(line_ptr, "%u: ", &cur_line);
646 if (cur_line == -1)
647 sscanf(line_ptr, "from %u to %u: ", &tmp, &cur_line);
648 if (cur_line == m.line)
649 break;
650 line_ptr = strtok(NULL, "\n");
651 }
652 if (!line_ptr) {
653 printf("Failed to find line %u for match: %s=%s\n",
654 m.line, m.reg, m.match);
655 ret = 1;
656 printf("%s", bpf_vlog);
657 break;
658 }
659 /* Check the next line as well in case the previous line
660 * did not have a corresponding bpf insn. Example:
661 * func#0 @0
662 * 0: R1=ctx() R10=fp0
663 * 0: (b7) r3 = 2 ; R3_w=2
664 *
665 * Sometimes it's actually two lines below, e.g. when
666 * searching for "6: R3_w=scalar(umax=255,var_off=(0x0; 0xff))":
667 * from 4 to 6: R0_w=pkt(off=8,r=8) R1=ctx() R2_w=pkt(r=8) R3_w=pkt_end() R10=fp0
668 * 6: R0_w=pkt(off=8,r=8) R1=ctx() R2_w=pkt(r=8) R3_w=pkt_end() R10=fp0
669 * 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(r=8) R3_w=scalar(umax=255,var_off=(0x0; 0xff))
670 */
671 while (!(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) {
672 cur_line = -1;
673 line_ptr = strtok(NULL, "\n");
674 sscanf(line_ptr ?: "", "%u: ", &cur_line);
675 if (!line_ptr || cur_line != m.line)
676 break;
677 }
678 if (cur_line != m.line || !line_ptr || !(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) {
679 printf("Failed to find match %u: %s=%s\n", m.line, m.reg, m.match);
680 ret = 1;
681 printf("%s", bpf_vlog);
682 break;
683 }
684 }
685 if (fd_prog >= 0)
686 close(fd_prog);
687 }
688 return ret;
689 }
690
test_align(void)691 void test_align(void)
692 {
693 unsigned int i;
694
695 for (i = 0; i < ARRAY_SIZE(tests); i++) {
696 struct bpf_align_test *test = &tests[i];
697
698 if (!test__start_subtest(test->descr))
699 continue;
700
701 ASSERT_OK(do_test_single(test), test->descr);
702 }
703 }
704