xref: /linux/arch/powerpc/net/bpf_jit_comp64.c (revision 9a379e77033f02c4a071891afdf0f0a01eff8ccb)
1 /*
2  * bpf_jit_comp64.c: eBPF JIT compiler
3  *
4  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
5  *		  IBM Corporation
6  *
7  * Based on the powerpc classic BPF JIT compiler by Matt Evans
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; version 2
12  * of the License.
13  */
14 #include <linux/moduleloader.h>
15 #include <asm/cacheflush.h>
16 #include <linux/netdevice.h>
17 #include <linux/filter.h>
18 #include <linux/if_vlan.h>
19 #include <asm/kprobes.h>
20 #include <linux/bpf.h>
21 
22 #include "bpf_jit64.h"
23 
24 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
25 {
26 	memset32(area, BREAKPOINT_INSTRUCTION, size/4);
27 }
28 
29 static inline void bpf_flush_icache(void *start, void *end)
30 {
31 	smp_wmb();
32 	flush_icache_range((unsigned long)start, (unsigned long)end);
33 }
34 
35 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
36 {
37 	return (ctx->seen & (1 << (31 - b2p[i])));
38 }
39 
40 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
41 {
42 	ctx->seen |= (1 << (31 - b2p[i]));
43 }
44 
45 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
46 {
47 	/*
48 	 * We only need a stack frame if:
49 	 * - we call other functions (kernel helpers), or
50 	 * - the bpf program uses its stack area
51 	 * The latter condition is deduced from the usage of BPF_REG_FP
52 	 */
53 	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
54 }
55 
56 /*
57  * When not setting up our own stackframe, the redzone usage is:
58  *
59  *		[	prev sp		] <-------------
60  *		[	  ...       	] 		|
61  * sp (r1) --->	[    stack pointer	] --------------
62  *		[   nv gpr save area	] 8*8
63  *		[    tail_call_cnt	] 8
64  *		[    local_tmp_var	] 8
65  *		[   unused red zone	] 208 bytes protected
66  */
67 static int bpf_jit_stack_local(struct codegen_context *ctx)
68 {
69 	if (bpf_has_stack_frame(ctx))
70 		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
71 	else
72 		return -(BPF_PPC_STACK_SAVE + 16);
73 }
74 
75 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
76 {
77 	return bpf_jit_stack_local(ctx) + 8;
78 }
79 
80 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
81 {
82 	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
83 		return (bpf_has_stack_frame(ctx) ?
84 			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
85 				- (8 * (32 - reg));
86 
87 	pr_err("BPF JIT is asking about unknown registers");
88 	BUG();
89 }
90 
91 static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
92 {
93 	/*
94 	 * Load skb->len and skb->data_len
95 	 * r3 points to skb
96 	 */
97 	PPC_LWZ(b2p[SKB_HLEN_REG], 3, offsetof(struct sk_buff, len));
98 	PPC_LWZ(b2p[TMP_REG_1], 3, offsetof(struct sk_buff, data_len));
99 	/* header_len = len - data_len */
100 	PPC_SUB(b2p[SKB_HLEN_REG], b2p[SKB_HLEN_REG], b2p[TMP_REG_1]);
101 
102 	/* skb->data pointer */
103 	PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data));
104 }
105 
106 static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
107 {
108 	int i;
109 
110 	/*
111 	 * Initialize tail_call_cnt if we do tail calls.
112 	 * Otherwise, put in NOPs so that it can be skipped when we are
113 	 * invoked through a tail call.
114 	 */
115 	if (ctx->seen & SEEN_TAILCALL) {
116 		PPC_LI(b2p[TMP_REG_1], 0);
117 		/* this goes in the redzone */
118 		PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
119 	} else {
120 		PPC_NOP();
121 		PPC_NOP();
122 	}
123 
124 #define BPF_TAILCALL_PROLOGUE_SIZE	8
125 
126 	if (bpf_has_stack_frame(ctx)) {
127 		/*
128 		 * We need a stack frame, but we don't necessarily need to
129 		 * save/restore LR unless we call other functions
130 		 */
131 		if (ctx->seen & SEEN_FUNC) {
132 			EMIT(PPC_INST_MFLR | __PPC_RT(R0));
133 			PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
134 		}
135 
136 		PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
137 	}
138 
139 	/*
140 	 * Back up non-volatile regs -- BPF registers 6-10
141 	 * If we haven't created our own stack frame, we save these
142 	 * in the protected zone below the previous stack frame
143 	 */
144 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
145 		if (bpf_is_seen_register(ctx, i))
146 			PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
147 
148 	/*
149 	 * Save additional non-volatile regs if we cache skb
150 	 * Also, setup skb data
151 	 */
152 	if (ctx->seen & SEEN_SKB) {
153 		PPC_BPF_STL(b2p[SKB_HLEN_REG], 1,
154 				bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
155 		PPC_BPF_STL(b2p[SKB_DATA_REG], 1,
156 				bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
157 		bpf_jit_emit_skb_loads(image, ctx);
158 	}
159 
160 	/* Setup frame pointer to point to the bpf stack area */
161 	if (bpf_is_seen_register(ctx, BPF_REG_FP))
162 		PPC_ADDI(b2p[BPF_REG_FP], 1,
163 				STACK_FRAME_MIN_SIZE + ctx->stack_size);
164 }
165 
166 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
167 {
168 	int i;
169 
170 	/* Restore NVRs */
171 	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
172 		if (bpf_is_seen_register(ctx, i))
173 			PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
174 
175 	/* Restore non-volatile registers used for skb cache */
176 	if (ctx->seen & SEEN_SKB) {
177 		PPC_BPF_LL(b2p[SKB_HLEN_REG], 1,
178 				bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
179 		PPC_BPF_LL(b2p[SKB_DATA_REG], 1,
180 				bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
181 	}
182 
183 	/* Tear down our stack frame */
184 	if (bpf_has_stack_frame(ctx)) {
185 		PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
186 		if (ctx->seen & SEEN_FUNC) {
187 			PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
188 			PPC_MTLR(0);
189 		}
190 	}
191 }
192 
193 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
194 {
195 	bpf_jit_emit_common_epilogue(image, ctx);
196 
197 	/* Move result to r3 */
198 	PPC_MR(3, b2p[BPF_REG_0]);
199 
200 	PPC_BLR();
201 }
202 
203 static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
204 {
205 #ifdef PPC64_ELF_ABI_v1
206 	/* func points to the function descriptor */
207 	PPC_LI64(b2p[TMP_REG_2], func);
208 	/* Load actual entry point from function descriptor */
209 	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
210 	/* ... and move it to LR */
211 	PPC_MTLR(b2p[TMP_REG_1]);
212 	/*
213 	 * Load TOC from function descriptor at offset 8.
214 	 * We can clobber r2 since we get called through a
215 	 * function pointer (so caller will save/restore r2)
216 	 * and since we don't use a TOC ourself.
217 	 */
218 	PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
219 #else
220 	/* We can clobber r12 */
221 	PPC_FUNC_ADDR(12, func);
222 	PPC_MTLR(12);
223 #endif
224 	PPC_BLRL();
225 }
226 
227 static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
228 {
229 	/*
230 	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
231 	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
232 	 * r4/BPF_REG_2 - pointer to bpf_array
233 	 * r5/BPF_REG_3 - index in bpf_array
234 	 */
235 	int b2p_bpf_array = b2p[BPF_REG_2];
236 	int b2p_index = b2p[BPF_REG_3];
237 
238 	/*
239 	 * if (index >= array->map.max_entries)
240 	 *   goto out;
241 	 */
242 	PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
243 	PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
244 	PPC_BCC(COND_GE, out);
245 
246 	/*
247 	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
248 	 *   goto out;
249 	 */
250 	PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
251 	PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
252 	PPC_BCC(COND_GT, out);
253 
254 	/*
255 	 * tail_call_cnt++;
256 	 */
257 	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
258 	PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
259 
260 	/* prog = array->ptrs[index]; */
261 	PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
262 	PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
263 	PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
264 
265 	/*
266 	 * if (prog == NULL)
267 	 *   goto out;
268 	 */
269 	PPC_CMPLDI(b2p[TMP_REG_1], 0);
270 	PPC_BCC(COND_EQ, out);
271 
272 	/* goto *(prog->bpf_func + prologue_size); */
273 	PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
274 #ifdef PPC64_ELF_ABI_v1
275 	/* skip past the function descriptor */
276 	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
277 			FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
278 #else
279 	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
280 #endif
281 	PPC_MTCTR(b2p[TMP_REG_1]);
282 
283 	/* tear down stack, restore NVRs, ... */
284 	bpf_jit_emit_common_epilogue(image, ctx);
285 
286 	PPC_BCTR();
287 	/* out: */
288 }
289 
290 /* Assemble the body code between the prologue & epilogue */
291 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
292 			      struct codegen_context *ctx,
293 			      u32 *addrs)
294 {
295 	const struct bpf_insn *insn = fp->insnsi;
296 	int flen = fp->len;
297 	int i;
298 
299 	/* Start of epilogue code - will only be valid 2nd pass onwards */
300 	u32 exit_addr = addrs[flen];
301 
302 	for (i = 0; i < flen; i++) {
303 		u32 code = insn[i].code;
304 		u32 dst_reg = b2p[insn[i].dst_reg];
305 		u32 src_reg = b2p[insn[i].src_reg];
306 		s16 off = insn[i].off;
307 		s32 imm = insn[i].imm;
308 		u64 imm64;
309 		u8 *func;
310 		u32 true_cond;
311 
312 		/*
313 		 * addrs[] maps a BPF bytecode address into a real offset from
314 		 * the start of the body code.
315 		 */
316 		addrs[i] = ctx->idx * 4;
317 
318 		/*
319 		 * As an optimization, we note down which non-volatile registers
320 		 * are used so that we can only save/restore those in our
321 		 * prologue and epilogue. We do this here regardless of whether
322 		 * the actual BPF instruction uses src/dst registers or not
323 		 * (for instance, BPF_CALL does not use them). The expectation
324 		 * is that those instructions will have src_reg/dst_reg set to
325 		 * 0. Even otherwise, we just lose some prologue/epilogue
326 		 * optimization but everything else should work without
327 		 * any issues.
328 		 */
329 		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
330 			bpf_set_seen_register(ctx, insn[i].dst_reg);
331 		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
332 			bpf_set_seen_register(ctx, insn[i].src_reg);
333 
334 		switch (code) {
335 		/*
336 		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
337 		 */
338 		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
339 		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
340 			PPC_ADD(dst_reg, dst_reg, src_reg);
341 			goto bpf_alu32_trunc;
342 		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
343 		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
344 			PPC_SUB(dst_reg, dst_reg, src_reg);
345 			goto bpf_alu32_trunc;
346 		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
347 		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
348 		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
349 		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
350 			if (BPF_OP(code) == BPF_SUB)
351 				imm = -imm;
352 			if (imm) {
353 				if (imm >= -32768 && imm < 32768)
354 					PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
355 				else {
356 					PPC_LI32(b2p[TMP_REG_1], imm);
357 					PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
358 				}
359 			}
360 			goto bpf_alu32_trunc;
361 		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
362 		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
363 			if (BPF_CLASS(code) == BPF_ALU)
364 				PPC_MULW(dst_reg, dst_reg, src_reg);
365 			else
366 				PPC_MULD(dst_reg, dst_reg, src_reg);
367 			goto bpf_alu32_trunc;
368 		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
369 		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
370 			if (imm >= -32768 && imm < 32768)
371 				PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
372 			else {
373 				PPC_LI32(b2p[TMP_REG_1], imm);
374 				if (BPF_CLASS(code) == BPF_ALU)
375 					PPC_MULW(dst_reg, dst_reg,
376 							b2p[TMP_REG_1]);
377 				else
378 					PPC_MULD(dst_reg, dst_reg,
379 							b2p[TMP_REG_1]);
380 			}
381 			goto bpf_alu32_trunc;
382 		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
383 		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
384 			if (BPF_OP(code) == BPF_MOD) {
385 				PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
386 				PPC_MULW(b2p[TMP_REG_1], src_reg,
387 						b2p[TMP_REG_1]);
388 				PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
389 			} else
390 				PPC_DIVWU(dst_reg, dst_reg, src_reg);
391 			goto bpf_alu32_trunc;
392 		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
393 		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
394 			if (BPF_OP(code) == BPF_MOD) {
395 				PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
396 				PPC_MULD(b2p[TMP_REG_1], src_reg,
397 						b2p[TMP_REG_1]);
398 				PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
399 			} else
400 				PPC_DIVD(dst_reg, dst_reg, src_reg);
401 			break;
402 		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
403 		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
404 		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
405 		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
406 			if (imm == 0)
407 				return -EINVAL;
408 			else if (imm == 1)
409 				goto bpf_alu32_trunc;
410 
411 			PPC_LI32(b2p[TMP_REG_1], imm);
412 			switch (BPF_CLASS(code)) {
413 			case BPF_ALU:
414 				if (BPF_OP(code) == BPF_MOD) {
415 					PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
416 							b2p[TMP_REG_1]);
417 					PPC_MULW(b2p[TMP_REG_1],
418 							b2p[TMP_REG_1],
419 							b2p[TMP_REG_2]);
420 					PPC_SUB(dst_reg, dst_reg,
421 							b2p[TMP_REG_1]);
422 				} else
423 					PPC_DIVWU(dst_reg, dst_reg,
424 							b2p[TMP_REG_1]);
425 				break;
426 			case BPF_ALU64:
427 				if (BPF_OP(code) == BPF_MOD) {
428 					PPC_DIVD(b2p[TMP_REG_2], dst_reg,
429 							b2p[TMP_REG_1]);
430 					PPC_MULD(b2p[TMP_REG_1],
431 							b2p[TMP_REG_1],
432 							b2p[TMP_REG_2]);
433 					PPC_SUB(dst_reg, dst_reg,
434 							b2p[TMP_REG_1]);
435 				} else
436 					PPC_DIVD(dst_reg, dst_reg,
437 							b2p[TMP_REG_1]);
438 				break;
439 			}
440 			goto bpf_alu32_trunc;
441 		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
442 		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
443 			PPC_NEG(dst_reg, dst_reg);
444 			goto bpf_alu32_trunc;
445 
446 		/*
447 		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
448 		 */
449 		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
450 		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
451 			PPC_AND(dst_reg, dst_reg, src_reg);
452 			goto bpf_alu32_trunc;
453 		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
454 		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
455 			if (!IMM_H(imm))
456 				PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
457 			else {
458 				/* Sign-extended */
459 				PPC_LI32(b2p[TMP_REG_1], imm);
460 				PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
461 			}
462 			goto bpf_alu32_trunc;
463 		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
464 		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
465 			PPC_OR(dst_reg, dst_reg, src_reg);
466 			goto bpf_alu32_trunc;
467 		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
468 		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
469 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
470 				/* Sign-extended */
471 				PPC_LI32(b2p[TMP_REG_1], imm);
472 				PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
473 			} else {
474 				if (IMM_L(imm))
475 					PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
476 				if (IMM_H(imm))
477 					PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
478 			}
479 			goto bpf_alu32_trunc;
480 		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
481 		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
482 			PPC_XOR(dst_reg, dst_reg, src_reg);
483 			goto bpf_alu32_trunc;
484 		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
485 		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
486 			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
487 				/* Sign-extended */
488 				PPC_LI32(b2p[TMP_REG_1], imm);
489 				PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
490 			} else {
491 				if (IMM_L(imm))
492 					PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
493 				if (IMM_H(imm))
494 					PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
495 			}
496 			goto bpf_alu32_trunc;
497 		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
498 			/* slw clears top 32 bits */
499 			PPC_SLW(dst_reg, dst_reg, src_reg);
500 			break;
501 		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
502 			PPC_SLD(dst_reg, dst_reg, src_reg);
503 			break;
504 		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
505 			/* with imm 0, we still need to clear top 32 bits */
506 			PPC_SLWI(dst_reg, dst_reg, imm);
507 			break;
508 		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
509 			if (imm != 0)
510 				PPC_SLDI(dst_reg, dst_reg, imm);
511 			break;
512 		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
513 			PPC_SRW(dst_reg, dst_reg, src_reg);
514 			break;
515 		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
516 			PPC_SRD(dst_reg, dst_reg, src_reg);
517 			break;
518 		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
519 			PPC_SRWI(dst_reg, dst_reg, imm);
520 			break;
521 		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
522 			if (imm != 0)
523 				PPC_SRDI(dst_reg, dst_reg, imm);
524 			break;
525 		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
526 			PPC_SRAD(dst_reg, dst_reg, src_reg);
527 			break;
528 		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
529 			if (imm != 0)
530 				PPC_SRADI(dst_reg, dst_reg, imm);
531 			break;
532 
533 		/*
534 		 * MOV
535 		 */
536 		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
537 		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
538 			PPC_MR(dst_reg, src_reg);
539 			goto bpf_alu32_trunc;
540 		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
541 		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
542 			PPC_LI32(dst_reg, imm);
543 			if (imm < 0)
544 				goto bpf_alu32_trunc;
545 			break;
546 
547 bpf_alu32_trunc:
548 		/* Truncate to 32-bits */
549 		if (BPF_CLASS(code) == BPF_ALU)
550 			PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
551 		break;
552 
553 		/*
554 		 * BPF_FROM_BE/LE
555 		 */
556 		case BPF_ALU | BPF_END | BPF_FROM_LE:
557 		case BPF_ALU | BPF_END | BPF_FROM_BE:
558 #ifdef __BIG_ENDIAN__
559 			if (BPF_SRC(code) == BPF_FROM_BE)
560 				goto emit_clear;
561 #else /* !__BIG_ENDIAN__ */
562 			if (BPF_SRC(code) == BPF_FROM_LE)
563 				goto emit_clear;
564 #endif
565 			switch (imm) {
566 			case 16:
567 				/* Rotate 8 bits left & mask with 0x0000ff00 */
568 				PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
569 				/* Rotate 8 bits right & insert LSB to reg */
570 				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
571 				/* Move result back to dst_reg */
572 				PPC_MR(dst_reg, b2p[TMP_REG_1]);
573 				break;
574 			case 32:
575 				/*
576 				 * Rotate word left by 8 bits:
577 				 * 2 bytes are already in their final position
578 				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
579 				 */
580 				PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
581 				/* Rotate 24 bits and insert byte 1 */
582 				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
583 				/* Rotate 24 bits and insert byte 3 */
584 				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
585 				PPC_MR(dst_reg, b2p[TMP_REG_1]);
586 				break;
587 			case 64:
588 				/*
589 				 * Way easier and faster(?) to store the value
590 				 * into stack and then use ldbrx
591 				 *
592 				 * ctx->seen will be reliable in pass2, but
593 				 * the instructions generated will remain the
594 				 * same across all passes
595 				 */
596 				PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
597 				PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
598 				PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
599 				break;
600 			}
601 			break;
602 
603 emit_clear:
604 			switch (imm) {
605 			case 16:
606 				/* zero-extend 16 bits into 64 bits */
607 				PPC_RLDICL(dst_reg, dst_reg, 0, 48);
608 				break;
609 			case 32:
610 				/* zero-extend 32 bits into 64 bits */
611 				PPC_RLDICL(dst_reg, dst_reg, 0, 32);
612 				break;
613 			case 64:
614 				/* nop */
615 				break;
616 			}
617 			break;
618 
619 		/*
620 		 * BPF_ST(X)
621 		 */
622 		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
623 		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
624 			if (BPF_CLASS(code) == BPF_ST) {
625 				PPC_LI(b2p[TMP_REG_1], imm);
626 				src_reg = b2p[TMP_REG_1];
627 			}
628 			PPC_STB(src_reg, dst_reg, off);
629 			break;
630 		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
631 		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
632 			if (BPF_CLASS(code) == BPF_ST) {
633 				PPC_LI(b2p[TMP_REG_1], imm);
634 				src_reg = b2p[TMP_REG_1];
635 			}
636 			PPC_STH(src_reg, dst_reg, off);
637 			break;
638 		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
639 		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
640 			if (BPF_CLASS(code) == BPF_ST) {
641 				PPC_LI32(b2p[TMP_REG_1], imm);
642 				src_reg = b2p[TMP_REG_1];
643 			}
644 			PPC_STW(src_reg, dst_reg, off);
645 			break;
646 		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
647 		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
648 			if (BPF_CLASS(code) == BPF_ST) {
649 				PPC_LI32(b2p[TMP_REG_1], imm);
650 				src_reg = b2p[TMP_REG_1];
651 			}
652 			PPC_STD(src_reg, dst_reg, off);
653 			break;
654 
655 		/*
656 		 * BPF_STX XADD (atomic_add)
657 		 */
658 		/* *(u32 *)(dst + off) += src */
659 		case BPF_STX | BPF_XADD | BPF_W:
660 			/* Get EA into TMP_REG_1 */
661 			PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
662 			/* error if EA is not word-aligned */
663 			PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
664 			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
665 			PPC_LI(b2p[BPF_REG_0], 0);
666 			PPC_JMP(exit_addr);
667 			/* load value from memory into TMP_REG_2 */
668 			PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
669 			/* add value from src_reg into this */
670 			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
671 			/* store result back */
672 			PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
673 			/* we're done if this succeeded */
674 			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
675 			/* otherwise, let's try once more */
676 			PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
677 			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
678 			PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
679 			/* exit if the store was not successful */
680 			PPC_LI(b2p[BPF_REG_0], 0);
681 			PPC_BCC(COND_NE, exit_addr);
682 			break;
683 		/* *(u64 *)(dst + off) += src */
684 		case BPF_STX | BPF_XADD | BPF_DW:
685 			PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
686 			/* error if EA is not doubleword-aligned */
687 			PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
688 			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
689 			PPC_LI(b2p[BPF_REG_0], 0);
690 			PPC_JMP(exit_addr);
691 			PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
692 			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
693 			PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
694 			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
695 			PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
696 			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
697 			PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
698 			PPC_LI(b2p[BPF_REG_0], 0);
699 			PPC_BCC(COND_NE, exit_addr);
700 			break;
701 
702 		/*
703 		 * BPF_LDX
704 		 */
705 		/* dst = *(u8 *)(ul) (src + off) */
706 		case BPF_LDX | BPF_MEM | BPF_B:
707 			PPC_LBZ(dst_reg, src_reg, off);
708 			break;
709 		/* dst = *(u16 *)(ul) (src + off) */
710 		case BPF_LDX | BPF_MEM | BPF_H:
711 			PPC_LHZ(dst_reg, src_reg, off);
712 			break;
713 		/* dst = *(u32 *)(ul) (src + off) */
714 		case BPF_LDX | BPF_MEM | BPF_W:
715 			PPC_LWZ(dst_reg, src_reg, off);
716 			break;
717 		/* dst = *(u64 *)(ul) (src + off) */
718 		case BPF_LDX | BPF_MEM | BPF_DW:
719 			PPC_LD(dst_reg, src_reg, off);
720 			break;
721 
722 		/*
723 		 * Doubleword load
724 		 * 16 byte instruction that uses two 'struct bpf_insn'
725 		 */
726 		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
727 			imm64 = ((u64)(u32) insn[i].imm) |
728 				    (((u64)(u32) insn[i+1].imm) << 32);
729 			/* Adjust for two bpf instructions */
730 			addrs[++i] = ctx->idx * 4;
731 			PPC_LI64(dst_reg, imm64);
732 			break;
733 
734 		/*
735 		 * Return/Exit
736 		 */
737 		case BPF_JMP | BPF_EXIT:
738 			/*
739 			 * If this isn't the very last instruction, branch to
740 			 * the epilogue. If we _are_ the last instruction,
741 			 * we'll just fall through to the epilogue.
742 			 */
743 			if (i != flen - 1)
744 				PPC_JMP(exit_addr);
745 			/* else fall through to the epilogue */
746 			break;
747 
748 		/*
749 		 * Call kernel helper
750 		 */
751 		case BPF_JMP | BPF_CALL:
752 			ctx->seen |= SEEN_FUNC;
753 			func = (u8 *) __bpf_call_base + imm;
754 
755 			/* Save skb pointer if we need to re-cache skb data */
756 			if ((ctx->seen & SEEN_SKB) &&
757 			    bpf_helper_changes_pkt_data(func))
758 				PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
759 
760 			bpf_jit_emit_func_call(image, ctx, (u64)func);
761 
762 			/* move return value from r3 to BPF_REG_0 */
763 			PPC_MR(b2p[BPF_REG_0], 3);
764 
765 			/* refresh skb cache */
766 			if ((ctx->seen & SEEN_SKB) &&
767 			    bpf_helper_changes_pkt_data(func)) {
768 				/* reload skb pointer to r3 */
769 				PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
770 				bpf_jit_emit_skb_loads(image, ctx);
771 			}
772 			break;
773 
774 		/*
775 		 * Jumps and branches
776 		 */
777 		case BPF_JMP | BPF_JA:
778 			PPC_JMP(addrs[i + 1 + off]);
779 			break;
780 
781 		case BPF_JMP | BPF_JGT | BPF_K:
782 		case BPF_JMP | BPF_JGT | BPF_X:
783 		case BPF_JMP | BPF_JSGT | BPF_K:
784 		case BPF_JMP | BPF_JSGT | BPF_X:
785 			true_cond = COND_GT;
786 			goto cond_branch;
787 		case BPF_JMP | BPF_JLT | BPF_K:
788 		case BPF_JMP | BPF_JLT | BPF_X:
789 		case BPF_JMP | BPF_JSLT | BPF_K:
790 		case BPF_JMP | BPF_JSLT | BPF_X:
791 			true_cond = COND_LT;
792 			goto cond_branch;
793 		case BPF_JMP | BPF_JGE | BPF_K:
794 		case BPF_JMP | BPF_JGE | BPF_X:
795 		case BPF_JMP | BPF_JSGE | BPF_K:
796 		case BPF_JMP | BPF_JSGE | BPF_X:
797 			true_cond = COND_GE;
798 			goto cond_branch;
799 		case BPF_JMP | BPF_JLE | BPF_K:
800 		case BPF_JMP | BPF_JLE | BPF_X:
801 		case BPF_JMP | BPF_JSLE | BPF_K:
802 		case BPF_JMP | BPF_JSLE | BPF_X:
803 			true_cond = COND_LE;
804 			goto cond_branch;
805 		case BPF_JMP | BPF_JEQ | BPF_K:
806 		case BPF_JMP | BPF_JEQ | BPF_X:
807 			true_cond = COND_EQ;
808 			goto cond_branch;
809 		case BPF_JMP | BPF_JNE | BPF_K:
810 		case BPF_JMP | BPF_JNE | BPF_X:
811 			true_cond = COND_NE;
812 			goto cond_branch;
813 		case BPF_JMP | BPF_JSET | BPF_K:
814 		case BPF_JMP | BPF_JSET | BPF_X:
815 			true_cond = COND_NE;
816 			/* Fall through */
817 
818 cond_branch:
819 			switch (code) {
820 			case BPF_JMP | BPF_JGT | BPF_X:
821 			case BPF_JMP | BPF_JLT | BPF_X:
822 			case BPF_JMP | BPF_JGE | BPF_X:
823 			case BPF_JMP | BPF_JLE | BPF_X:
824 			case BPF_JMP | BPF_JEQ | BPF_X:
825 			case BPF_JMP | BPF_JNE | BPF_X:
826 				/* unsigned comparison */
827 				PPC_CMPLD(dst_reg, src_reg);
828 				break;
829 			case BPF_JMP | BPF_JSGT | BPF_X:
830 			case BPF_JMP | BPF_JSLT | BPF_X:
831 			case BPF_JMP | BPF_JSGE | BPF_X:
832 			case BPF_JMP | BPF_JSLE | BPF_X:
833 				/* signed comparison */
834 				PPC_CMPD(dst_reg, src_reg);
835 				break;
836 			case BPF_JMP | BPF_JSET | BPF_X:
837 				PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
838 				break;
839 			case BPF_JMP | BPF_JNE | BPF_K:
840 			case BPF_JMP | BPF_JEQ | BPF_K:
841 			case BPF_JMP | BPF_JGT | BPF_K:
842 			case BPF_JMP | BPF_JLT | BPF_K:
843 			case BPF_JMP | BPF_JGE | BPF_K:
844 			case BPF_JMP | BPF_JLE | BPF_K:
845 				/*
846 				 * Need sign-extended load, so only positive
847 				 * values can be used as imm in cmpldi
848 				 */
849 				if (imm >= 0 && imm < 32768)
850 					PPC_CMPLDI(dst_reg, imm);
851 				else {
852 					/* sign-extending load */
853 					PPC_LI32(b2p[TMP_REG_1], imm);
854 					/* ... but unsigned comparison */
855 					PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
856 				}
857 				break;
858 			case BPF_JMP | BPF_JSGT | BPF_K:
859 			case BPF_JMP | BPF_JSLT | BPF_K:
860 			case BPF_JMP | BPF_JSGE | BPF_K:
861 			case BPF_JMP | BPF_JSLE | BPF_K:
862 				/*
863 				 * signed comparison, so any 16-bit value
864 				 * can be used in cmpdi
865 				 */
866 				if (imm >= -32768 && imm < 32768)
867 					PPC_CMPDI(dst_reg, imm);
868 				else {
869 					PPC_LI32(b2p[TMP_REG_1], imm);
870 					PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
871 				}
872 				break;
873 			case BPF_JMP | BPF_JSET | BPF_K:
874 				/* andi does not sign-extend the immediate */
875 				if (imm >= 0 && imm < 32768)
876 					/* PPC_ANDI is _only/always_ dot-form */
877 					PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
878 				else {
879 					PPC_LI32(b2p[TMP_REG_1], imm);
880 					PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
881 						    b2p[TMP_REG_1]);
882 				}
883 				break;
884 			}
885 			PPC_BCC(true_cond, addrs[i + 1 + off]);
886 			break;
887 
888 		/*
889 		 * Loads from packet header/data
890 		 * Assume 32-bit input value in imm and X (src_reg)
891 		 */
892 
893 		/* Absolute loads */
894 		case BPF_LD | BPF_W | BPF_ABS:
895 			func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word);
896 			goto common_load_abs;
897 		case BPF_LD | BPF_H | BPF_ABS:
898 			func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half);
899 			goto common_load_abs;
900 		case BPF_LD | BPF_B | BPF_ABS:
901 			func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte);
902 common_load_abs:
903 			/*
904 			 * Load from [imm]
905 			 * Load into r4, which can just be passed onto
906 			 *  skb load helpers as the second parameter
907 			 */
908 			PPC_LI32(4, imm);
909 			goto common_load;
910 
911 		/* Indirect loads */
912 		case BPF_LD | BPF_W | BPF_IND:
913 			func = (u8 *)sk_load_word;
914 			goto common_load_ind;
915 		case BPF_LD | BPF_H | BPF_IND:
916 			func = (u8 *)sk_load_half;
917 			goto common_load_ind;
918 		case BPF_LD | BPF_B | BPF_IND:
919 			func = (u8 *)sk_load_byte;
920 common_load_ind:
921 			/*
922 			 * Load from [src_reg + imm]
923 			 * Treat src_reg as a 32-bit value
924 			 */
925 			PPC_EXTSW(4, src_reg);
926 			if (imm) {
927 				if (imm >= -32768 && imm < 32768)
928 					PPC_ADDI(4, 4, IMM_L(imm));
929 				else {
930 					PPC_LI32(b2p[TMP_REG_1], imm);
931 					PPC_ADD(4, 4, b2p[TMP_REG_1]);
932 				}
933 			}
934 
935 common_load:
936 			ctx->seen |= SEEN_SKB;
937 			ctx->seen |= SEEN_FUNC;
938 			bpf_jit_emit_func_call(image, ctx, (u64)func);
939 
940 			/*
941 			 * Helper returns 'lt' condition on error, and an
942 			 * appropriate return value in BPF_REG_0
943 			 */
944 			PPC_BCC(COND_LT, exit_addr);
945 			break;
946 
947 		/*
948 		 * Tail call
949 		 */
950 		case BPF_JMP | BPF_TAIL_CALL:
951 			ctx->seen |= SEEN_TAILCALL;
952 			bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
953 			break;
954 
955 		default:
956 			/*
957 			 * The filter contains something cruel & unusual.
958 			 * We don't handle it, but also there shouldn't be
959 			 * anything missing from our list.
960 			 */
961 			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
962 					code, i);
963 			return -ENOTSUPP;
964 		}
965 	}
966 
967 	/* Set end-of-body-code address for exit. */
968 	addrs[i] = ctx->idx * 4;
969 
970 	return 0;
971 }
972 
973 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
974 {
975 	u32 proglen;
976 	u32 alloclen;
977 	u8 *image = NULL;
978 	u32 *code_base;
979 	u32 *addrs;
980 	struct codegen_context cgctx;
981 	int pass;
982 	int flen;
983 	struct bpf_binary_header *bpf_hdr;
984 	struct bpf_prog *org_fp = fp;
985 	struct bpf_prog *tmp_fp;
986 	bool bpf_blinded = false;
987 
988 	if (!fp->jit_requested)
989 		return org_fp;
990 
991 	tmp_fp = bpf_jit_blind_constants(org_fp);
992 	if (IS_ERR(tmp_fp))
993 		return org_fp;
994 
995 	if (tmp_fp != org_fp) {
996 		bpf_blinded = true;
997 		fp = tmp_fp;
998 	}
999 
1000 	flen = fp->len;
1001 	addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
1002 	if (addrs == NULL) {
1003 		fp = org_fp;
1004 		goto out;
1005 	}
1006 
1007 	memset(&cgctx, 0, sizeof(struct codegen_context));
1008 
1009 	/* Make sure that the stack is quadword aligned. */
1010 	cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
1011 
1012 	/* Scouting faux-generate pass 0 */
1013 	if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) {
1014 		/* We hit something illegal or unsupported. */
1015 		fp = org_fp;
1016 		goto out;
1017 	}
1018 
1019 	/*
1020 	 * Pretend to build prologue, given the features we've seen.  This will
1021 	 * update ctgtx.idx as it pretends to output instructions, then we can
1022 	 * calculate total size from idx.
1023 	 */
1024 	bpf_jit_build_prologue(0, &cgctx);
1025 	bpf_jit_build_epilogue(0, &cgctx);
1026 
1027 	proglen = cgctx.idx * 4;
1028 	alloclen = proglen + FUNCTION_DESCR_SIZE;
1029 
1030 	bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
1031 			bpf_jit_fill_ill_insns);
1032 	if (!bpf_hdr) {
1033 		fp = org_fp;
1034 		goto out;
1035 	}
1036 
1037 	code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
1038 
1039 	/* Code generation passes 1-2 */
1040 	for (pass = 1; pass < 3; pass++) {
1041 		/* Now build the prologue, body code & epilogue for real. */
1042 		cgctx.idx = 0;
1043 		bpf_jit_build_prologue(code_base, &cgctx);
1044 		bpf_jit_build_body(fp, code_base, &cgctx, addrs);
1045 		bpf_jit_build_epilogue(code_base, &cgctx);
1046 
1047 		if (bpf_jit_enable > 1)
1048 			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
1049 				proglen - (cgctx.idx * 4), cgctx.seen);
1050 	}
1051 
1052 	if (bpf_jit_enable > 1)
1053 		/*
1054 		 * Note that we output the base address of the code_base
1055 		 * rather than image, since opcodes are in code_base.
1056 		 */
1057 		bpf_jit_dump(flen, proglen, pass, code_base);
1058 
1059 #ifdef PPC64_ELF_ABI_v1
1060 	/* Function descriptor nastiness: Address + TOC */
1061 	((u64 *)image)[0] = (u64)code_base;
1062 	((u64 *)image)[1] = local_paca->kernel_toc;
1063 #endif
1064 
1065 	fp->bpf_func = (void *)image;
1066 	fp->jited = 1;
1067 	fp->jited_len = alloclen;
1068 
1069 	bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
1070 
1071 out:
1072 	kfree(addrs);
1073 
1074 	if (bpf_blinded)
1075 		bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1076 
1077 	return fp;
1078 }
1079 
1080 /* Overriding bpf_jit_free() as we don't set images read-only. */
1081 void bpf_jit_free(struct bpf_prog *fp)
1082 {
1083 	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1084 	struct bpf_binary_header *bpf_hdr = (void *)addr;
1085 
1086 	if (fp->jited)
1087 		bpf_jit_binary_free(bpf_hdr);
1088 
1089 	bpf_prog_unlock_free(fp);
1090 }
1091