xref: /linux/kernel/bpf/core.c (revision 9e9f60108423f18a99c9cc93ef7f23490ecc709b)
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *	Jay Schulist <jschlst@samba.org>
12  *	Alexei Starovoitov <ast@plumgrid.com>
13  *	Daniel Borkmann <dborkman@redhat.com>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23 
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <asm/unaligned.h>
30 #include <linux/bpf.h>
31 
32 /* Registers */
33 #define BPF_R0	regs[BPF_REG_0]
34 #define BPF_R1	regs[BPF_REG_1]
35 #define BPF_R2	regs[BPF_REG_2]
36 #define BPF_R3	regs[BPF_REG_3]
37 #define BPF_R4	regs[BPF_REG_4]
38 #define BPF_R5	regs[BPF_REG_5]
39 #define BPF_R6	regs[BPF_REG_6]
40 #define BPF_R7	regs[BPF_REG_7]
41 #define BPF_R8	regs[BPF_REG_8]
42 #define BPF_R9	regs[BPF_REG_9]
43 #define BPF_R10	regs[BPF_REG_10]
44 
45 /* Named registers */
46 #define DST	regs[insn->dst_reg]
47 #define SRC	regs[insn->src_reg]
48 #define FP	regs[BPF_REG_FP]
49 #define ARG1	regs[BPF_REG_ARG1]
50 #define CTX	regs[BPF_REG_CTX]
51 #define IMM	insn->imm
52 
53 /* No hurry in this branch
54  *
55  * Exported for the bpf jit load helper.
56  */
57 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
58 {
59 	u8 *ptr = NULL;
60 
61 	if (k >= SKF_NET_OFF)
62 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
63 	else if (k >= SKF_LL_OFF)
64 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
65 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
66 		return ptr;
67 
68 	return NULL;
69 }
70 
71 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
72 {
73 	gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
74 			  gfp_extra_flags;
75 	struct bpf_prog_aux *aux;
76 	struct bpf_prog *fp;
77 
78 	size = round_up(size, PAGE_SIZE);
79 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
80 	if (fp == NULL)
81 		return NULL;
82 
83 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
84 	if (aux == NULL) {
85 		vfree(fp);
86 		return NULL;
87 	}
88 
89 	fp->pages = size / PAGE_SIZE;
90 	fp->aux = aux;
91 
92 	return fp;
93 }
94 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
95 
96 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
97 				  gfp_t gfp_extra_flags)
98 {
99 	gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
100 			  gfp_extra_flags;
101 	struct bpf_prog *fp;
102 
103 	BUG_ON(fp_old == NULL);
104 
105 	size = round_up(size, PAGE_SIZE);
106 	if (size <= fp_old->pages * PAGE_SIZE)
107 		return fp_old;
108 
109 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
110 	if (fp != NULL) {
111 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
112 		fp->pages = size / PAGE_SIZE;
113 
114 		/* We keep fp->aux from fp_old around in the new
115 		 * reallocated structure.
116 		 */
117 		fp_old->aux = NULL;
118 		__bpf_prog_free(fp_old);
119 	}
120 
121 	return fp;
122 }
123 EXPORT_SYMBOL_GPL(bpf_prog_realloc);
124 
125 void __bpf_prog_free(struct bpf_prog *fp)
126 {
127 	kfree(fp->aux);
128 	vfree(fp);
129 }
130 EXPORT_SYMBOL_GPL(__bpf_prog_free);
131 
132 #ifdef CONFIG_BPF_JIT
133 struct bpf_binary_header *
134 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
135 		     unsigned int alignment,
136 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
137 {
138 	struct bpf_binary_header *hdr;
139 	unsigned int size, hole, start;
140 
141 	/* Most of BPF filters are really small, but if some of them
142 	 * fill a page, allow at least 128 extra bytes to insert a
143 	 * random section of illegal instructions.
144 	 */
145 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
146 	hdr = module_alloc(size);
147 	if (hdr == NULL)
148 		return NULL;
149 
150 	/* Fill space with illegal/arch-dep instructions. */
151 	bpf_fill_ill_insns(hdr, size);
152 
153 	hdr->pages = size / PAGE_SIZE;
154 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
155 		     PAGE_SIZE - sizeof(*hdr));
156 	start = (prandom_u32() % hole) & ~(alignment - 1);
157 
158 	/* Leave a random number of instructions before BPF code. */
159 	*image_ptr = &hdr->image[start];
160 
161 	return hdr;
162 }
163 
164 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
165 {
166 	module_free(NULL, hdr);
167 }
168 #endif /* CONFIG_BPF_JIT */
169 
170 /* Base function for offset calculation. Needs to go into .text section,
171  * therefore keeping it non-static as well; will also be used by JITs
172  * anyway later on, so do not let the compiler omit it.
173  */
174 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
175 {
176 	return 0;
177 }
178 
179 /**
180  *	__bpf_prog_run - run eBPF program on a given context
181  *	@ctx: is the data we are operating on
182  *	@insn: is the array of eBPF instructions
183  *
184  * Decode and execute eBPF instructions.
185  */
186 static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
187 {
188 	u64 stack[MAX_BPF_STACK / sizeof(u64)];
189 	u64 regs[MAX_BPF_REG], tmp;
190 	static const void *jumptable[256] = {
191 		[0 ... 255] = &&default_label,
192 		/* Now overwrite non-defaults ... */
193 		/* 32 bit ALU operations */
194 		[BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
195 		[BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
196 		[BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
197 		[BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
198 		[BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
199 		[BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
200 		[BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
201 		[BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
202 		[BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
203 		[BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
204 		[BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
205 		[BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
206 		[BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
207 		[BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
208 		[BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
209 		[BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
210 		[BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
211 		[BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
212 		[BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
213 		[BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
214 		[BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
215 		[BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
216 		[BPF_ALU | BPF_NEG] = &&ALU_NEG,
217 		[BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
218 		[BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
219 		/* 64 bit ALU operations */
220 		[BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
221 		[BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
222 		[BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
223 		[BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
224 		[BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
225 		[BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
226 		[BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
227 		[BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
228 		[BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
229 		[BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
230 		[BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
231 		[BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
232 		[BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
233 		[BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
234 		[BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
235 		[BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
236 		[BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
237 		[BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
238 		[BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
239 		[BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
240 		[BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
241 		[BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
242 		[BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
243 		[BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
244 		[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
245 		/* Call instruction */
246 		[BPF_JMP | BPF_CALL] = &&JMP_CALL,
247 		/* Jumps */
248 		[BPF_JMP | BPF_JA] = &&JMP_JA,
249 		[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
250 		[BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
251 		[BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
252 		[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
253 		[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
254 		[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
255 		[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
256 		[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
257 		[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
258 		[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
259 		[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
260 		[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
261 		[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
262 		[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
263 		/* Program return */
264 		[BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
265 		/* Store instructions */
266 		[BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
267 		[BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
268 		[BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
269 		[BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
270 		[BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
271 		[BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
272 		[BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
273 		[BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
274 		[BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
275 		[BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
276 		/* Load instructions */
277 		[BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
278 		[BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
279 		[BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
280 		[BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
281 		[BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
282 		[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
283 		[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
284 		[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
285 		[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
286 		[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
287 		[BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
288 	};
289 	void *ptr;
290 	int off;
291 
292 #define CONT	 ({ insn++; goto select_insn; })
293 #define CONT_JMP ({ insn++; goto select_insn; })
294 
295 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
296 	ARG1 = (u64) (unsigned long) ctx;
297 
298 	/* Registers used in classic BPF programs need to be reset first. */
299 	regs[BPF_REG_A] = 0;
300 	regs[BPF_REG_X] = 0;
301 
302 select_insn:
303 	goto *jumptable[insn->code];
304 
305 	/* ALU */
306 #define ALU(OPCODE, OP)			\
307 	ALU64_##OPCODE##_X:		\
308 		DST = DST OP SRC;	\
309 		CONT;			\
310 	ALU_##OPCODE##_X:		\
311 		DST = (u32) DST OP (u32) SRC;	\
312 		CONT;			\
313 	ALU64_##OPCODE##_K:		\
314 		DST = DST OP IMM;		\
315 		CONT;			\
316 	ALU_##OPCODE##_K:		\
317 		DST = (u32) DST OP (u32) IMM;	\
318 		CONT;
319 
320 	ALU(ADD,  +)
321 	ALU(SUB,  -)
322 	ALU(AND,  &)
323 	ALU(OR,   |)
324 	ALU(LSH, <<)
325 	ALU(RSH, >>)
326 	ALU(XOR,  ^)
327 	ALU(MUL,  *)
328 #undef ALU
329 	ALU_NEG:
330 		DST = (u32) -DST;
331 		CONT;
332 	ALU64_NEG:
333 		DST = -DST;
334 		CONT;
335 	ALU_MOV_X:
336 		DST = (u32) SRC;
337 		CONT;
338 	ALU_MOV_K:
339 		DST = (u32) IMM;
340 		CONT;
341 	ALU64_MOV_X:
342 		DST = SRC;
343 		CONT;
344 	ALU64_MOV_K:
345 		DST = IMM;
346 		CONT;
347 	LD_IMM_DW:
348 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
349 		insn++;
350 		CONT;
351 	ALU64_ARSH_X:
352 		(*(s64 *) &DST) >>= SRC;
353 		CONT;
354 	ALU64_ARSH_K:
355 		(*(s64 *) &DST) >>= IMM;
356 		CONT;
357 	ALU64_MOD_X:
358 		if (unlikely(SRC == 0))
359 			return 0;
360 		tmp = DST;
361 		DST = do_div(tmp, SRC);
362 		CONT;
363 	ALU_MOD_X:
364 		if (unlikely(SRC == 0))
365 			return 0;
366 		tmp = (u32) DST;
367 		DST = do_div(tmp, (u32) SRC);
368 		CONT;
369 	ALU64_MOD_K:
370 		tmp = DST;
371 		DST = do_div(tmp, IMM);
372 		CONT;
373 	ALU_MOD_K:
374 		tmp = (u32) DST;
375 		DST = do_div(tmp, (u32) IMM);
376 		CONT;
377 	ALU64_DIV_X:
378 		if (unlikely(SRC == 0))
379 			return 0;
380 		do_div(DST, SRC);
381 		CONT;
382 	ALU_DIV_X:
383 		if (unlikely(SRC == 0))
384 			return 0;
385 		tmp = (u32) DST;
386 		do_div(tmp, (u32) SRC);
387 		DST = (u32) tmp;
388 		CONT;
389 	ALU64_DIV_K:
390 		do_div(DST, IMM);
391 		CONT;
392 	ALU_DIV_K:
393 		tmp = (u32) DST;
394 		do_div(tmp, (u32) IMM);
395 		DST = (u32) tmp;
396 		CONT;
397 	ALU_END_TO_BE:
398 		switch (IMM) {
399 		case 16:
400 			DST = (__force u16) cpu_to_be16(DST);
401 			break;
402 		case 32:
403 			DST = (__force u32) cpu_to_be32(DST);
404 			break;
405 		case 64:
406 			DST = (__force u64) cpu_to_be64(DST);
407 			break;
408 		}
409 		CONT;
410 	ALU_END_TO_LE:
411 		switch (IMM) {
412 		case 16:
413 			DST = (__force u16) cpu_to_le16(DST);
414 			break;
415 		case 32:
416 			DST = (__force u32) cpu_to_le32(DST);
417 			break;
418 		case 64:
419 			DST = (__force u64) cpu_to_le64(DST);
420 			break;
421 		}
422 		CONT;
423 
424 	/* CALL */
425 	JMP_CALL:
426 		/* Function call scratches BPF_R1-BPF_R5 registers,
427 		 * preserves BPF_R6-BPF_R9, and stores return value
428 		 * into BPF_R0.
429 		 */
430 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
431 						       BPF_R4, BPF_R5);
432 		CONT;
433 
434 	/* JMP */
435 	JMP_JA:
436 		insn += insn->off;
437 		CONT;
438 	JMP_JEQ_X:
439 		if (DST == SRC) {
440 			insn += insn->off;
441 			CONT_JMP;
442 		}
443 		CONT;
444 	JMP_JEQ_K:
445 		if (DST == IMM) {
446 			insn += insn->off;
447 			CONT_JMP;
448 		}
449 		CONT;
450 	JMP_JNE_X:
451 		if (DST != SRC) {
452 			insn += insn->off;
453 			CONT_JMP;
454 		}
455 		CONT;
456 	JMP_JNE_K:
457 		if (DST != IMM) {
458 			insn += insn->off;
459 			CONT_JMP;
460 		}
461 		CONT;
462 	JMP_JGT_X:
463 		if (DST > SRC) {
464 			insn += insn->off;
465 			CONT_JMP;
466 		}
467 		CONT;
468 	JMP_JGT_K:
469 		if (DST > IMM) {
470 			insn += insn->off;
471 			CONT_JMP;
472 		}
473 		CONT;
474 	JMP_JGE_X:
475 		if (DST >= SRC) {
476 			insn += insn->off;
477 			CONT_JMP;
478 		}
479 		CONT;
480 	JMP_JGE_K:
481 		if (DST >= IMM) {
482 			insn += insn->off;
483 			CONT_JMP;
484 		}
485 		CONT;
486 	JMP_JSGT_X:
487 		if (((s64) DST) > ((s64) SRC)) {
488 			insn += insn->off;
489 			CONT_JMP;
490 		}
491 		CONT;
492 	JMP_JSGT_K:
493 		if (((s64) DST) > ((s64) IMM)) {
494 			insn += insn->off;
495 			CONT_JMP;
496 		}
497 		CONT;
498 	JMP_JSGE_X:
499 		if (((s64) DST) >= ((s64) SRC)) {
500 			insn += insn->off;
501 			CONT_JMP;
502 		}
503 		CONT;
504 	JMP_JSGE_K:
505 		if (((s64) DST) >= ((s64) IMM)) {
506 			insn += insn->off;
507 			CONT_JMP;
508 		}
509 		CONT;
510 	JMP_JSET_X:
511 		if (DST & SRC) {
512 			insn += insn->off;
513 			CONT_JMP;
514 		}
515 		CONT;
516 	JMP_JSET_K:
517 		if (DST & IMM) {
518 			insn += insn->off;
519 			CONT_JMP;
520 		}
521 		CONT;
522 	JMP_EXIT:
523 		return BPF_R0;
524 
525 	/* STX and ST and LDX*/
526 #define LDST(SIZEOP, SIZE)						\
527 	STX_MEM_##SIZEOP:						\
528 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
529 		CONT;							\
530 	ST_MEM_##SIZEOP:						\
531 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
532 		CONT;							\
533 	LDX_MEM_##SIZEOP:						\
534 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
535 		CONT;
536 
537 	LDST(B,   u8)
538 	LDST(H,  u16)
539 	LDST(W,  u32)
540 	LDST(DW, u64)
541 #undef LDST
542 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
543 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
544 			   (DST + insn->off));
545 		CONT;
546 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
547 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
548 			     (DST + insn->off));
549 		CONT;
550 	LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
551 		off = IMM;
552 load_word:
553 		/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
554 		 * only appearing in the programs where ctx ==
555 		 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
556 		 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
557 		 * internal BPF verifier will check that BPF_R6 ==
558 		 * ctx.
559 		 *
560 		 * BPF_ABS and BPF_IND are wrappers of function calls,
561 		 * so they scratch BPF_R1-BPF_R5 registers, preserve
562 		 * BPF_R6-BPF_R9, and store return value into BPF_R0.
563 		 *
564 		 * Implicit input:
565 		 *   ctx == skb == BPF_R6 == CTX
566 		 *
567 		 * Explicit input:
568 		 *   SRC == any register
569 		 *   IMM == 32-bit immediate
570 		 *
571 		 * Output:
572 		 *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
573 		 */
574 
575 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
576 		if (likely(ptr != NULL)) {
577 			BPF_R0 = get_unaligned_be32(ptr);
578 			CONT;
579 		}
580 
581 		return 0;
582 	LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
583 		off = IMM;
584 load_half:
585 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
586 		if (likely(ptr != NULL)) {
587 			BPF_R0 = get_unaligned_be16(ptr);
588 			CONT;
589 		}
590 
591 		return 0;
592 	LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
593 		off = IMM;
594 load_byte:
595 		ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
596 		if (likely(ptr != NULL)) {
597 			BPF_R0 = *(u8 *)ptr;
598 			CONT;
599 		}
600 
601 		return 0;
602 	LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
603 		off = IMM + SRC;
604 		goto load_word;
605 	LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
606 		off = IMM + SRC;
607 		goto load_half;
608 	LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
609 		off = IMM + SRC;
610 		goto load_byte;
611 
612 	default_label:
613 		/* If we ever reach this, we have a bug somewhere. */
614 		WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
615 		return 0;
616 }
617 
618 void __weak bpf_int_jit_compile(struct bpf_prog *prog)
619 {
620 }
621 
622 /**
623  *	bpf_prog_select_runtime - select execution runtime for BPF program
624  *	@fp: bpf_prog populated with internal BPF program
625  *
626  * try to JIT internal BPF program, if JIT is not available select interpreter
627  * BPF program will be executed via BPF_PROG_RUN() macro
628  */
629 void bpf_prog_select_runtime(struct bpf_prog *fp)
630 {
631 	fp->bpf_func = (void *) __bpf_prog_run;
632 
633 	/* Probe if internal BPF can be JITed */
634 	bpf_int_jit_compile(fp);
635 	/* Lock whole bpf_prog as read-only */
636 	bpf_prog_lock_ro(fp);
637 }
638 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
639 
640 static void bpf_prog_free_deferred(struct work_struct *work)
641 {
642 	struct bpf_prog_aux *aux;
643 
644 	aux = container_of(work, struct bpf_prog_aux, work);
645 	bpf_jit_free(aux->prog);
646 }
647 
648 /* Free internal BPF program */
649 void bpf_prog_free(struct bpf_prog *fp)
650 {
651 	struct bpf_prog_aux *aux = fp->aux;
652 
653 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
654 	aux->prog = fp;
655 	schedule_work(&aux->work);
656 }
657 EXPORT_SYMBOL_GPL(bpf_prog_free);
658 
659 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
660  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
661  */
662 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
663 			 int len)
664 {
665 	return -EFAULT;
666 }
667