xref: /linux/tools/lib/bpf/gen_loader.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2021 Facebook */
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <errno.h>
7 #include <linux/filter.h>
8 #include <sys/param.h>
9 #include "btf.h"
10 #include "bpf.h"
11 #include "libbpf.h"
12 #include "libbpf_internal.h"
13 #include "hashmap.h"
14 #include "bpf_gen_internal.h"
15 #include "skel_internal.h"
16 #include <asm/byteorder.h>
17 #include "str_error.h"
18 
19 #define MAX_USED_MAPS	64
20 #define MAX_USED_PROGS	32
21 #define MAX_KFUNC_DESCS 256
22 #define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
23 
24 /* The following structure describes the stack layout of the loader program.
25  * In addition R6 contains the pointer to context.
26  * R7 contains the result of the last sys_bpf command (typically error or FD).
27  * R9 contains the result of the last sys_close command.
28  *
29  * Naming convention:
30  * ctx - bpf program context
31  * stack - bpf program stack
32  * blob - bpf_attr-s, strings, insns, map data.
33  *        All the bytes that loader prog will use for read/write.
34  */
35 struct loader_stack {
36 	__u32 btf_fd;
37 	__u32 inner_map_fd;
38 	__u32 prog_fd[MAX_USED_PROGS];
39 };
40 
41 #define stack_off(field) \
42 	(__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
43 
44 #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
45 
46 static int blob_fd_array_off(struct bpf_gen *gen, int index)
47 {
48 	return gen->fd_array + index * sizeof(int);
49 }
50 
51 static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
52 {
53 	size_t off = gen->insn_cur - gen->insn_start;
54 	void *insn_start;
55 
56 	if (gen->error)
57 		return gen->error;
58 	if (size > INT32_MAX || off + size > INT32_MAX) {
59 		gen->error = -ERANGE;
60 		return -ERANGE;
61 	}
62 	insn_start = realloc(gen->insn_start, off + size);
63 	if (!insn_start) {
64 		gen->error = -ENOMEM;
65 		free(gen->insn_start);
66 		gen->insn_start = NULL;
67 		return -ENOMEM;
68 	}
69 	gen->insn_start = insn_start;
70 	gen->insn_cur = insn_start + off;
71 	return 0;
72 }
73 
74 static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
75 {
76 	size_t off = gen->data_cur - gen->data_start;
77 	void *data_start;
78 
79 	if (gen->error)
80 		return gen->error;
81 	if (size > INT32_MAX || off + size > INT32_MAX) {
82 		gen->error = -ERANGE;
83 		return -ERANGE;
84 	}
85 	data_start = realloc(gen->data_start, off + size);
86 	if (!data_start) {
87 		gen->error = -ENOMEM;
88 		free(gen->data_start);
89 		gen->data_start = NULL;
90 		return -ENOMEM;
91 	}
92 	gen->data_start = data_start;
93 	gen->data_cur = data_start + off;
94 	return 0;
95 }
96 
97 static void emit(struct bpf_gen *gen, struct bpf_insn insn)
98 {
99 	if (realloc_insn_buf(gen, sizeof(insn)))
100 		return;
101 	memcpy(gen->insn_cur, &insn, sizeof(insn));
102 	gen->insn_cur += sizeof(insn);
103 }
104 
105 static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
106 {
107 	emit(gen, insn1);
108 	emit(gen, insn2);
109 }
110 
111 static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
112 static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
113 static void emit_signature_match(struct bpf_gen *gen);
114 
115 void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
116 {
117 	size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
118 	int i;
119 
120 	gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
121 	gen->log_level = log_level;
122 	/* save ctx pointer into R6 */
123 	emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
124 
125 	/* bzero stack */
126 	emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
127 	emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
128 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
129 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
130 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
131 
132 	/* amount of stack actually used, only used to calculate iterations, not stack offset */
133 	nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
134 	/* jump over cleanup code */
135 	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
136 			      /* size of cleanup code below (including map fd cleanup) */
137 			      (nr_progs_sz / 4) * 3 + 2 +
138 			      /* 6 insns for emit_sys_close_blob,
139 			       * 6 insns for debug_regs in emit_sys_close_blob
140 			       */
141 			      nr_maps * (6 + (gen->log_level ? 6 : 0))));
142 
143 	/* remember the label where all error branches will jump to */
144 	gen->cleanup_label = gen->insn_cur - gen->insn_start;
145 	/* emit cleanup code: close all temp FDs */
146 	for (i = 0; i < nr_progs_sz; i += 4) {
147 		emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
148 		emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
149 		emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
150 	}
151 	for (i = 0; i < nr_maps; i++)
152 		emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
153 	/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
154 	emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
155 	emit(gen, BPF_EXIT_INSN());
156 	if (OPTS_GET(gen->opts, gen_hash, false))
157 		emit_signature_match(gen);
158 }
159 
160 static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
161 {
162 	__u32 size8 = roundup(size, 8);
163 	__u64 zero = 0;
164 	void *prev;
165 
166 	if (realloc_data_buf(gen, size8))
167 		return 0;
168 	prev = gen->data_cur;
169 	if (data) {
170 		memcpy(gen->data_cur, data, size);
171 		memcpy(gen->data_cur + size, &zero, size8 - size);
172 	} else {
173 		memset(gen->data_cur, 0, size8);
174 	}
175 	gen->data_cur += size8;
176 	return prev - gen->data_start;
177 }
178 
179 /* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
180  * to start of fd_array. Caller can decide if it is usable or not.
181  */
182 static int add_map_fd(struct bpf_gen *gen)
183 {
184 	if (gen->nr_maps == MAX_USED_MAPS) {
185 		pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
186 		gen->error = -E2BIG;
187 		return 0;
188 	}
189 	return gen->nr_maps++;
190 }
191 
192 static int add_kfunc_btf_fd(struct bpf_gen *gen)
193 {
194 	int cur;
195 
196 	if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
197 		cur = add_data(gen, NULL, sizeof(int));
198 		return (cur - gen->fd_array) / sizeof(int);
199 	}
200 	return MAX_USED_MAPS + gen->nr_fd_array++;
201 }
202 
203 static int insn_bytes_to_bpf_size(__u32 sz)
204 {
205 	switch (sz) {
206 	case 8: return BPF_DW;
207 	case 4: return BPF_W;
208 	case 2: return BPF_H;
209 	case 1: return BPF_B;
210 	default: return -1;
211 	}
212 }
213 
214 /* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
215 static void emit_rel_store(struct bpf_gen *gen, int off, int data)
216 {
217 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
218 					 0, 0, 0, data));
219 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
220 					 0, 0, 0, off));
221 	emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
222 }
223 
224 static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off)
225 {
226 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
227 					 0, 0, 0, blob_off));
228 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0));
229 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
230 					 0, 0, 0, off));
231 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
232 }
233 
234 static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off)
235 {
236 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
237 					 0, 0, 0, blob_off));
238 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0));
239 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
240 }
241 
242 static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
243 				   bool check_non_zero)
244 {
245 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
246 	if (check_non_zero)
247 		/* If value in ctx is zero don't update the blob.
248 		 * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
249 		 */
250 		emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
251 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
252 					 0, 0, 0, off));
253 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
254 }
255 
256 static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
257 {
258 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
259 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
260 					 0, 0, 0, off));
261 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
262 }
263 
264 static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
265 {
266 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
267 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
268 }
269 
270 static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
271 {
272 	emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
273 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
274 					 0, 0, 0, attr));
275 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
276 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
277 	/* remember the result in R7 */
278 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
279 }
280 
281 static bool is_simm16(__s64 value)
282 {
283 	return value == (__s64)(__s16)value;
284 }
285 
286 static void emit_check_err(struct bpf_gen *gen)
287 {
288 	__s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
289 
290 	/* R7 contains result of last sys_bpf command.
291 	 * if (R7 < 0) goto cleanup;
292 	 */
293 	if (is_simm16(off)) {
294 		emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
295 	} else {
296 		gen->error = -ERANGE;
297 		emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
298 	}
299 }
300 
301 /* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
302 static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
303 		       const char *fmt, va_list args)
304 {
305 	char buf[1024];
306 	int addr, len, ret;
307 
308 	if (!gen->log_level)
309 		return;
310 	ret = vsnprintf(buf, sizeof(buf), fmt, args);
311 	if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
312 		/* The special case to accommodate common debug_ret():
313 		 * to avoid specifying BPF_REG_7 and adding " r=%%d" to
314 		 * prints explicitly.
315 		 */
316 		strcat(buf, " r=%d");
317 	len = strlen(buf) + 1;
318 	addr = add_data(gen, buf, len);
319 
320 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
321 					 0, 0, 0, addr));
322 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
323 	if (reg1 >= 0)
324 		emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
325 	if (reg2 >= 0)
326 		emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
327 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
328 }
329 
330 static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
331 {
332 	va_list args;
333 
334 	va_start(args, fmt);
335 	emit_debug(gen, reg1, reg2, fmt, args);
336 	va_end(args);
337 }
338 
339 static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
340 {
341 	va_list args;
342 
343 	va_start(args, fmt);
344 	emit_debug(gen, BPF_REG_7, -1, fmt, args);
345 	va_end(args);
346 }
347 
348 static void __emit_sys_close(struct bpf_gen *gen)
349 {
350 	emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
351 			      /* 2 is the number of the following insns
352 			       * * 6 is additional insns in debug_regs
353 			       */
354 			      2 + (gen->log_level ? 6 : 0)));
355 	emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
356 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
357 	debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
358 }
359 
360 static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
361 {
362 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
363 	__emit_sys_close(gen);
364 }
365 
366 static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
367 {
368 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
369 					 0, 0, 0, blob_off));
370 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
371 	__emit_sys_close(gen);
372 }
373 
374 static void compute_sha_update_offsets(struct bpf_gen *gen);
375 
376 int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
377 {
378 	int i;
379 
380 	if (nr_progs < gen->nr_progs || nr_maps != gen->nr_maps) {
381 		pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n",
382 			nr_progs, gen->nr_progs, nr_maps, gen->nr_maps);
383 		gen->error = -EFAULT;
384 		return gen->error;
385 	}
386 	emit_sys_close_stack(gen, stack_off(btf_fd));
387 	for (i = 0; i < gen->nr_progs; i++)
388 		move_stack2ctx(gen,
389 			       sizeof(struct bpf_loader_ctx) +
390 			       sizeof(struct bpf_map_desc) * gen->nr_maps +
391 			       sizeof(struct bpf_prog_desc) * i +
392 			       offsetof(struct bpf_prog_desc, prog_fd), 4,
393 			       stack_off(prog_fd[i]));
394 	for (i = 0; i < gen->nr_maps; i++)
395 		move_blob2ctx(gen,
396 			      sizeof(struct bpf_loader_ctx) +
397 			      sizeof(struct bpf_map_desc) * i +
398 			      offsetof(struct bpf_map_desc, map_fd), 4,
399 			      blob_fd_array_off(gen, i));
400 	emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
401 	emit(gen, BPF_EXIT_INSN());
402 	if (OPTS_GET(gen->opts, gen_hash, false))
403 		compute_sha_update_offsets(gen);
404 
405 	pr_debug("gen: finish %s\n", errstr(gen->error));
406 	if (!gen->error) {
407 		struct gen_loader_opts *opts = gen->opts;
408 
409 		opts->insns = gen->insn_start;
410 		opts->insns_sz = gen->insn_cur - gen->insn_start;
411 		opts->data = gen->data_start;
412 		opts->data_sz = gen->data_cur - gen->data_start;
413 
414 		/* use target endianness for embedded loader */
415 		if (gen->swapped_endian) {
416 			struct bpf_insn *insn = (struct bpf_insn *)opts->insns;
417 			int insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
418 
419 			for (i = 0; i < insn_cnt; i++)
420 				bpf_insn_bswap(insn++);
421 		}
422 	}
423 	return gen->error;
424 }
425 
426 void bpf_gen__free(struct bpf_gen *gen)
427 {
428 	if (!gen)
429 		return;
430 	free(gen->data_start);
431 	free(gen->insn_start);
432 	free(gen);
433 }
434 
435 /*
436  * Fields of bpf_attr are set to values in native byte-order before being
437  * written to the target-bound data blob, and may need endian conversion.
438  * This macro allows providing the correct value in situ more simply than
439  * writing a separate converter for *all fields* of *all records* included
440  * in union bpf_attr. Note that sizeof(rval) should match the assignment
441  * target to avoid runtime problems.
442  */
443 #define tgt_endian(rval) ({					\
444 	typeof(rval) _val = (rval);				\
445 	if (gen->swapped_endian) {				\
446 		switch (sizeof(_val)) {				\
447 		case 1: break;					\
448 		case 2: _val = bswap_16(_val); break;		\
449 		case 4: _val = bswap_32(_val); break;		\
450 		case 8: _val = bswap_64(_val); break;		\
451 		default: pr_warn("unsupported bswap size!\n");	\
452 		}						\
453 	}							\
454 	_val;							\
455 })
456 
457 static void compute_sha_update_offsets(struct bpf_gen *gen)
458 {
459 	__u64 sha[SHA256_DWORD_SIZE];
460 	__u64 sha_dw;
461 	int i;
462 
463 	libbpf_sha256(gen->data_start, gen->data_cur - gen->data_start, (__u8 *)sha);
464 	for (i = 0; i < SHA256_DWORD_SIZE; i++) {
465 		struct bpf_insn *insn =
466 			(struct bpf_insn *)(gen->insn_start + gen->hash_insn_offset[i]);
467 		sha_dw = tgt_endian(sha[i]);
468 		insn[0].imm = (__u32)sha_dw;
469 		insn[1].imm = sha_dw >> 32;
470 	}
471 }
472 
473 void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
474 		       __u32 btf_raw_size)
475 {
476 	int attr_size = offsetofend(union bpf_attr, btf_log_level);
477 	int btf_data, btf_load_attr;
478 	union bpf_attr attr;
479 
480 	memset(&attr, 0, attr_size);
481 	btf_data = add_data(gen, btf_raw_data, btf_raw_size);
482 
483 	attr.btf_size = tgt_endian(btf_raw_size);
484 	btf_load_attr = add_data(gen, &attr, attr_size);
485 	pr_debug("gen: load_btf: off %d size %d, attr: off %d size %d\n",
486 		 btf_data, btf_raw_size, btf_load_attr, attr_size);
487 
488 	/* populate union bpf_attr with user provided log details */
489 	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
490 		      offsetof(struct bpf_loader_ctx, log_level), false);
491 	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
492 		      offsetof(struct bpf_loader_ctx, log_size), false);
493 	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
494 		      offsetof(struct bpf_loader_ctx, log_buf), false);
495 	/* populate union bpf_attr with a pointer to the BTF data */
496 	emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
497 	/* emit BTF_LOAD command */
498 	emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
499 	debug_ret(gen, "btf_load size %d", btf_raw_size);
500 	emit_check_err(gen);
501 	/* remember btf_fd in the stack, if successful */
502 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
503 }
504 
505 void bpf_gen__map_create(struct bpf_gen *gen,
506 			 enum bpf_map_type map_type,
507 			 const char *map_name,
508 			 __u32 key_size, __u32 value_size, __u32 max_entries,
509 			 struct bpf_map_create_opts *map_attr, int map_idx)
510 {
511 	int attr_size = offsetofend(union bpf_attr, map_extra);
512 	bool close_inner_map_fd = false;
513 	int map_create_attr, idx;
514 	union bpf_attr attr;
515 
516 	memset(&attr, 0, attr_size);
517 	attr.map_type = tgt_endian(map_type);
518 	attr.key_size = tgt_endian(key_size);
519 	attr.value_size = tgt_endian(value_size);
520 	attr.map_flags = tgt_endian(map_attr->map_flags);
521 	attr.map_extra = tgt_endian(map_attr->map_extra);
522 	if (map_name)
523 		libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
524 	attr.numa_node = tgt_endian(map_attr->numa_node);
525 	attr.map_ifindex = tgt_endian(map_attr->map_ifindex);
526 	attr.max_entries = tgt_endian(max_entries);
527 	attr.btf_key_type_id = tgt_endian(map_attr->btf_key_type_id);
528 	attr.btf_value_type_id = tgt_endian(map_attr->btf_value_type_id);
529 
530 	map_create_attr = add_data(gen, &attr, attr_size);
531 	pr_debug("gen: map_create: %s idx %d type %d value_type_id %d, attr: off %d size %d\n",
532 		 map_name, map_idx, map_type, map_attr->btf_value_type_id,
533 		 map_create_attr, attr_size);
534 
535 	if (map_attr->btf_value_type_id)
536 		/* populate union bpf_attr with btf_fd saved in the stack earlier */
537 		move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
538 				stack_off(btf_fd));
539 	switch (map_type) {
540 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
541 	case BPF_MAP_TYPE_HASH_OF_MAPS:
542 		move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
543 				stack_off(inner_map_fd));
544 		close_inner_map_fd = true;
545 		break;
546 	default:
547 		break;
548 	}
549 	/* conditionally update max_entries */
550 	if (map_idx >= 0)
551 		move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
552 			      sizeof(struct bpf_loader_ctx) +
553 			      sizeof(struct bpf_map_desc) * map_idx +
554 			      offsetof(struct bpf_map_desc, max_entries),
555 			      true /* check that max_entries != 0 */);
556 	/* emit MAP_CREATE command */
557 	emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
558 	debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
559 		  map_name, map_idx, map_type, value_size,
560 		  map_attr->btf_value_type_id);
561 	emit_check_err(gen);
562 	/* remember map_fd in the stack, if successful */
563 	if (map_idx < 0) {
564 		/* This bpf_gen__map_create() function is called with map_idx >= 0
565 		 * for all maps that libbpf loading logic tracks.
566 		 * It's called with -1 to create an inner map.
567 		 */
568 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
569 				      stack_off(inner_map_fd)));
570 	} else if (map_idx != gen->nr_maps) {
571 		gen->error = -EDOM; /* internal bug */
572 		return;
573 	} else {
574 		/* add_map_fd does gen->nr_maps++ */
575 		idx = add_map_fd(gen);
576 		emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
577 						 0, 0, 0, blob_fd_array_off(gen, idx)));
578 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0));
579 	}
580 	if (close_inner_map_fd)
581 		emit_sys_close_stack(gen, stack_off(inner_map_fd));
582 }
583 
584 static void emit_signature_match(struct bpf_gen *gen)
585 {
586 	__s64 off;
587 	int i;
588 
589 	for (i = 0; i < SHA256_DWORD_SIZE; i++) {
590 		emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX,
591 						 0, 0, 0, 0));
592 		emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, i * sizeof(__u64)));
593 		gen->hash_insn_offset[i] = gen->insn_cur - gen->insn_start;
594 		emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_3, 0, 0, 0, 0, 0));
595 
596 		off =  -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
597 		if (is_simm16(off)) {
598 			emit(gen, BPF_MOV64_IMM(BPF_REG_7, -EINVAL));
599 			emit(gen, BPF_JMP_REG(BPF_JNE, BPF_REG_2, BPF_REG_3, off));
600 		} else {
601 			gen->error = -ERANGE;
602 			emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
603 		}
604 	}
605 }
606 
607 void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
608 				   enum bpf_attach_type type)
609 {
610 	const char *prefix;
611 	int kind, ret;
612 
613 	btf_get_kernel_prefix_kind(type, &prefix, &kind);
614 	gen->attach_kind = kind;
615 	ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
616 		       prefix, attach_name);
617 	if (ret >= sizeof(gen->attach_target))
618 		gen->error = -ENOSPC;
619 }
620 
621 static void emit_find_attach_target(struct bpf_gen *gen)
622 {
623 	int name, len = strlen(gen->attach_target) + 1;
624 
625 	pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
626 	name = add_data(gen, gen->attach_target, len);
627 
628 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
629 					 0, 0, 0, name));
630 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
631 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
632 	emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
633 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
634 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
635 	debug_ret(gen, "find_by_name_kind(%s,%d)",
636 		  gen->attach_target, gen->attach_kind);
637 	emit_check_err(gen);
638 	/* if successful, btf_id is in lower 32-bit of R7 and
639 	 * btf_obj_fd is in upper 32-bit
640 	 */
641 }
642 
643 void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
644 			    bool is_typeless, bool is_ld64, int kind, int insn_idx)
645 {
646 	struct ksym_relo_desc *relo;
647 
648 	relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
649 	if (!relo) {
650 		gen->error = -ENOMEM;
651 		return;
652 	}
653 	gen->relos = relo;
654 	relo += gen->relo_cnt;
655 	relo->name = name;
656 	relo->is_weak = is_weak;
657 	relo->is_typeless = is_typeless;
658 	relo->is_ld64 = is_ld64;
659 	relo->kind = kind;
660 	relo->insn_idx = insn_idx;
661 	gen->relo_cnt++;
662 }
663 
664 /* returns existing ksym_desc with ref incremented, or inserts a new one */
665 static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
666 {
667 	struct ksym_desc *kdesc;
668 	int i;
669 
670 	for (i = 0; i < gen->nr_ksyms; i++) {
671 		kdesc = &gen->ksyms[i];
672 		if (kdesc->kind == relo->kind && kdesc->is_ld64 == relo->is_ld64 &&
673 		    !strcmp(kdesc->name, relo->name)) {
674 			kdesc->ref++;
675 			return kdesc;
676 		}
677 	}
678 	kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
679 	if (!kdesc) {
680 		gen->error = -ENOMEM;
681 		return NULL;
682 	}
683 	gen->ksyms = kdesc;
684 	kdesc = &gen->ksyms[gen->nr_ksyms++];
685 	kdesc->name = relo->name;
686 	kdesc->kind = relo->kind;
687 	kdesc->ref = 1;
688 	kdesc->off = 0;
689 	kdesc->insn = 0;
690 	kdesc->is_ld64 = relo->is_ld64;
691 	return kdesc;
692 }
693 
694 /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
695  * Returns result in BPF_REG_7
696  */
697 static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo)
698 {
699 	int name_off, len = strlen(relo->name) + 1;
700 
701 	name_off = add_data(gen, relo->name, len);
702 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
703 					 0, 0, 0, name_off));
704 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
705 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
706 	emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
707 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
708 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
709 	debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
710 }
711 
712 /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
713  * Returns result in BPF_REG_7
714  * Returns u64 symbol addr in BPF_REG_9
715  */
716 static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
717 {
718 	int name_off, len = strlen(relo->name) + 1, res_off;
719 
720 	name_off = add_data(gen, relo->name, len);
721 	res_off = add_data(gen, NULL, 8); /* res is u64 */
722 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
723 					 0, 0, 0, name_off));
724 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
725 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
726 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
727 					 0, 0, 0, res_off));
728 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
729 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
730 	emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
731 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
732 	debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
733 }
734 
735 /* Expects:
736  * BPF_REG_8 - pointer to instruction
737  *
738  * We need to reuse BTF fd for same symbol otherwise each relocation takes a new
739  * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols,
740  * this would mean a new BTF fd index for each entry. By pairing symbol name
741  * with index, we get the insn->imm, insn->off pairing that kernel uses for
742  * kfunc_tab, which becomes the effective limit even though all of them may
743  * share same index in fd_array (such that kfunc_btf_tab has 1 element).
744  */
745 static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
746 {
747 	struct ksym_desc *kdesc;
748 	int btf_fd_idx;
749 
750 	kdesc = get_ksym_desc(gen, relo);
751 	if (!kdesc)
752 		return;
753 	/* try to copy from existing bpf_insn */
754 	if (kdesc->ref > 1) {
755 		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
756 			       kdesc->insn + offsetof(struct bpf_insn, imm));
757 		move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2,
758 			       kdesc->insn + offsetof(struct bpf_insn, off));
759 		goto log;
760 	}
761 	/* remember insn offset, so we can copy BTF ID and FD later */
762 	kdesc->insn = insn;
763 	emit_bpf_find_by_name_kind(gen, relo);
764 	if (!relo->is_weak)
765 		emit_check_err(gen);
766 	/* get index in fd_array to store BTF FD at */
767 	btf_fd_idx = add_kfunc_btf_fd(gen);
768 	if (btf_fd_idx > INT16_MAX) {
769 		pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
770 			btf_fd_idx, relo->name);
771 		gen->error = -E2BIG;
772 		return;
773 	}
774 	kdesc->off = btf_fd_idx;
775 	/* jump to success case */
776 	emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
777 	/* set value for imm, off as 0 */
778 	emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
779 	emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
780 	/* skip success case for ret < 0 */
781 	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10));
782 	/* store btf_id into insn[insn_idx].imm */
783 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
784 	/* obtain fd in BPF_REG_9 */
785 	emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
786 	emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
787 	/* load fd_array slot pointer */
788 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
789 					 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
790 	/* store BTF fd in slot, 0 for vmlinux */
791 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
792 	/* jump to insn[insn_idx].off store if fd denotes module BTF */
793 	emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2));
794 	/* set the default value for off */
795 	emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
796 	/* skip BTF fd store for vmlinux BTF */
797 	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
798 	/* store index into insn[insn_idx].off */
799 	emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
800 log:
801 	if (!gen->log_level)
802 		return;
803 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
804 			      offsetof(struct bpf_insn, imm)));
805 	emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8,
806 			      offsetof(struct bpf_insn, off)));
807 	debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d",
808 		   relo->name, kdesc->ref);
809 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
810 					 0, 0, 0, blob_fd_array_off(gen, kdesc->off)));
811 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0));
812 	debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd",
813 		   relo->name, kdesc->ref);
814 }
815 
816 static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo,
817 			       int ref)
818 {
819 	if (!gen->log_level)
820 		return;
821 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
822 			      offsetof(struct bpf_insn, imm)));
823 	emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
824 			      offsetof(struct bpf_insn, imm)));
825 	debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
826 		   relo->is_typeless, relo->is_weak, relo->name, ref);
827 	emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
828 	debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
829 		   relo->is_typeless, relo->is_weak, relo->name, ref);
830 }
831 
832 /* Expects:
833  * BPF_REG_8 - pointer to instruction
834  */
835 static void emit_relo_ksym_typeless(struct bpf_gen *gen,
836 				    struct ksym_relo_desc *relo, int insn)
837 {
838 	struct ksym_desc *kdesc;
839 
840 	kdesc = get_ksym_desc(gen, relo);
841 	if (!kdesc)
842 		return;
843 	/* try to copy from existing ldimm64 insn */
844 	if (kdesc->ref > 1) {
845 		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
846 			       kdesc->insn + offsetof(struct bpf_insn, imm));
847 		move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
848 			       kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
849 		goto log;
850 	}
851 	/* remember insn offset, so we can copy ksym addr later */
852 	kdesc->insn = insn;
853 	/* skip typeless ksym_desc in fd closing loop in cleanup_relos */
854 	kdesc->typeless = true;
855 	emit_bpf_kallsyms_lookup_name(gen, relo);
856 	emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
857 	emit_check_err(gen);
858 	/* store lower half of addr into insn[insn_idx].imm */
859 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
860 	/* store upper half of addr into insn[insn_idx + 1].imm */
861 	emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
862 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
863 		      sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
864 log:
865 	emit_ksym_relo_log(gen, relo, kdesc->ref);
866 }
867 
868 static __u32 src_reg_mask(struct bpf_gen *gen)
869 {
870 #if defined(__LITTLE_ENDIAN_BITFIELD) /* src_reg,dst_reg,... */
871 	return gen->swapped_endian ? 0xf0 : 0x0f;
872 #elif defined(__BIG_ENDIAN_BITFIELD) /* dst_reg,src_reg,... */
873 	return gen->swapped_endian ? 0x0f : 0xf0;
874 #else
875 #error "Unsupported bit endianness, cannot proceed"
876 #endif
877 }
878 
879 /* Expects:
880  * BPF_REG_8 - pointer to instruction
881  */
882 static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
883 {
884 	struct ksym_desc *kdesc;
885 	__u32 reg_mask;
886 
887 	kdesc = get_ksym_desc(gen, relo);
888 	if (!kdesc)
889 		return;
890 	/* try to copy from existing ldimm64 insn */
891 	if (kdesc->ref > 1) {
892 		move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
893 			       kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
894 		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
895 			       kdesc->insn + offsetof(struct bpf_insn, imm));
896 		/* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob
897 		 * If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn
898 		 */
899 		emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
900 		goto clear_src_reg;
901 	}
902 	/* remember insn offset, so we can copy BTF ID and FD later */
903 	kdesc->insn = insn;
904 	emit_bpf_find_by_name_kind(gen, relo);
905 	if (!relo->is_weak)
906 		emit_check_err(gen);
907 	/* jump to success case */
908 	emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
909 	/* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */
910 	emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
911 	emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
912 	/* skip success case for ret < 0 */
913 	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
914 	/* store btf_id into insn[insn_idx].imm */
915 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
916 	/* store btf_obj_fd into insn[insn_idx + 1].imm */
917 	emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
918 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
919 			      sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
920 	/* skip src_reg adjustment */
921 	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
922 clear_src_reg:
923 	/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
924 	reg_mask = src_reg_mask(gen);
925 	emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
926 	emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
927 	emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
928 
929 	emit_ksym_relo_log(gen, relo, kdesc->ref);
930 }
931 
932 void bpf_gen__record_relo_core(struct bpf_gen *gen,
933 			       const struct bpf_core_relo *core_relo)
934 {
935 	struct bpf_core_relo *relos;
936 
937 	relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos));
938 	if (!relos) {
939 		gen->error = -ENOMEM;
940 		return;
941 	}
942 	gen->core_relos = relos;
943 	relos += gen->core_relo_cnt;
944 	memcpy(relos, core_relo, sizeof(*relos));
945 	gen->core_relo_cnt++;
946 }
947 
948 static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
949 {
950 	int insn;
951 
952 	pr_debug("gen: emit_relo (%d): %s at %d %s\n",
953 		 relo->kind, relo->name, relo->insn_idx, relo->is_ld64 ? "ld64" : "call");
954 	insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
955 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
956 	if (relo->is_ld64) {
957 		if (relo->is_typeless)
958 			emit_relo_ksym_typeless(gen, relo, insn);
959 		else
960 			emit_relo_ksym_btf(gen, relo, insn);
961 	} else {
962 		emit_relo_kfunc_btf(gen, relo, insn);
963 	}
964 }
965 
966 static void emit_relos(struct bpf_gen *gen, int insns)
967 {
968 	int i;
969 
970 	for (i = 0; i < gen->relo_cnt; i++)
971 		emit_relo(gen, gen->relos + i, insns);
972 }
973 
974 static void cleanup_core_relo(struct bpf_gen *gen)
975 {
976 	if (!gen->core_relo_cnt)
977 		return;
978 	free(gen->core_relos);
979 	gen->core_relo_cnt = 0;
980 	gen->core_relos = NULL;
981 }
982 
983 static void cleanup_relos(struct bpf_gen *gen, int insns)
984 {
985 	struct ksym_desc *kdesc;
986 	int i, insn;
987 
988 	for (i = 0; i < gen->nr_ksyms; i++) {
989 		kdesc = &gen->ksyms[i];
990 		/* only close fds for typed ksyms and kfuncs */
991 		if (kdesc->is_ld64 && !kdesc->typeless) {
992 			/* close fd recorded in insn[insn_idx + 1].imm */
993 			insn = kdesc->insn;
994 			insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
995 			emit_sys_close_blob(gen, insn);
996 		} else if (!kdesc->is_ld64) {
997 			emit_sys_close_blob(gen, blob_fd_array_off(gen, kdesc->off));
998 			if (kdesc->off < MAX_FD_ARRAY_SZ)
999 				gen->nr_fd_array--;
1000 		}
1001 	}
1002 	if (gen->nr_ksyms) {
1003 		free(gen->ksyms);
1004 		gen->nr_ksyms = 0;
1005 		gen->ksyms = NULL;
1006 	}
1007 	if (gen->relo_cnt) {
1008 		free(gen->relos);
1009 		gen->relo_cnt = 0;
1010 		gen->relos = NULL;
1011 	}
1012 	cleanup_core_relo(gen);
1013 }
1014 
1015 /* Convert func, line, and core relo info blobs to target endianness */
1016 static void info_blob_bswap(struct bpf_gen *gen, int func_info, int line_info,
1017 			    int core_relos, struct bpf_prog_load_opts *load_attr)
1018 {
1019 	struct bpf_func_info *fi = gen->data_start + func_info;
1020 	struct bpf_line_info *li = gen->data_start + line_info;
1021 	struct bpf_core_relo *cr = gen->data_start + core_relos;
1022 	int i;
1023 
1024 	for (i = 0; i < load_attr->func_info_cnt; i++)
1025 		bpf_func_info_bswap(fi++);
1026 
1027 	for (i = 0; i < load_attr->line_info_cnt; i++)
1028 		bpf_line_info_bswap(li++);
1029 
1030 	for (i = 0; i < gen->core_relo_cnt; i++)
1031 		bpf_core_relo_bswap(cr++);
1032 }
1033 
1034 void bpf_gen__prog_load(struct bpf_gen *gen,
1035 			enum bpf_prog_type prog_type, const char *prog_name,
1036 			const char *license, struct bpf_insn *insns, size_t insn_cnt,
1037 			struct bpf_prog_load_opts *load_attr, int prog_idx)
1038 {
1039 	int func_info_tot_sz = load_attr->func_info_cnt *
1040 			       load_attr->func_info_rec_size;
1041 	int line_info_tot_sz = load_attr->line_info_cnt *
1042 			       load_attr->line_info_rec_size;
1043 	int core_relo_tot_sz = gen->core_relo_cnt *
1044 			       sizeof(struct bpf_core_relo);
1045 	int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
1046 	int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
1047 	union bpf_attr attr;
1048 
1049 	memset(&attr, 0, attr_size);
1050 	/* add license string to blob of bytes */
1051 	license_off = add_data(gen, license, strlen(license) + 1);
1052 	/* add insns to blob of bytes */
1053 	insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
1054 	pr_debug("gen: prog_load: prog_idx %d type %d insn off %d insns_cnt %zd license off %d\n",
1055 		 prog_idx, prog_type, insns_off, insn_cnt, license_off);
1056 
1057 	/* convert blob insns to target endianness */
1058 	if (gen->swapped_endian) {
1059 		struct bpf_insn *insn = gen->data_start + insns_off;
1060 		int i;
1061 
1062 		for (i = 0; i < insn_cnt; i++, insn++)
1063 			bpf_insn_bswap(insn);
1064 	}
1065 
1066 	attr.prog_type = tgt_endian(prog_type);
1067 	attr.expected_attach_type = tgt_endian(load_attr->expected_attach_type);
1068 	attr.attach_btf_id = tgt_endian(load_attr->attach_btf_id);
1069 	attr.prog_ifindex = tgt_endian(load_attr->prog_ifindex);
1070 	attr.kern_version = 0;
1071 	attr.insn_cnt = tgt_endian((__u32)insn_cnt);
1072 	attr.prog_flags = tgt_endian(load_attr->prog_flags);
1073 
1074 	attr.func_info_rec_size = tgt_endian(load_attr->func_info_rec_size);
1075 	attr.func_info_cnt = tgt_endian(load_attr->func_info_cnt);
1076 	func_info = add_data(gen, load_attr->func_info, func_info_tot_sz);
1077 	pr_debug("gen: prog_load: func_info: off %d cnt %d rec size %d\n",
1078 		 func_info, load_attr->func_info_cnt,
1079 		 load_attr->func_info_rec_size);
1080 
1081 	attr.line_info_rec_size = tgt_endian(load_attr->line_info_rec_size);
1082 	attr.line_info_cnt = tgt_endian(load_attr->line_info_cnt);
1083 	line_info = add_data(gen, load_attr->line_info, line_info_tot_sz);
1084 	pr_debug("gen: prog_load: line_info: off %d cnt %d rec size %d\n",
1085 		 line_info, load_attr->line_info_cnt,
1086 		 load_attr->line_info_rec_size);
1087 
1088 	attr.core_relo_rec_size = tgt_endian((__u32)sizeof(struct bpf_core_relo));
1089 	attr.core_relo_cnt = tgt_endian(gen->core_relo_cnt);
1090 	core_relos = add_data(gen, gen->core_relos, core_relo_tot_sz);
1091 	pr_debug("gen: prog_load: core_relos: off %d cnt %d rec size %zd\n",
1092 		 core_relos, gen->core_relo_cnt,
1093 		 sizeof(struct bpf_core_relo));
1094 
1095 	/* convert all info blobs to target endianness */
1096 	if (gen->swapped_endian)
1097 		info_blob_bswap(gen, func_info, line_info, core_relos, load_attr);
1098 
1099 	libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
1100 	prog_load_attr = add_data(gen, &attr, attr_size);
1101 	pr_debug("gen: prog_load: attr: off %d size %d\n",
1102 		 prog_load_attr, attr_size);
1103 
1104 	/* populate union bpf_attr with a pointer to license */
1105 	emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
1106 
1107 	/* populate union bpf_attr with a pointer to instructions */
1108 	emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off);
1109 
1110 	/* populate union bpf_attr with a pointer to func_info */
1111 	emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
1112 
1113 	/* populate union bpf_attr with a pointer to line_info */
1114 	emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
1115 
1116 	/* populate union bpf_attr with a pointer to core_relos */
1117 	emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos);
1118 
1119 	/* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */
1120 	emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array);
1121 
1122 	/* populate union bpf_attr with user provided log details */
1123 	move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
1124 		      offsetof(struct bpf_loader_ctx, log_level), false);
1125 	move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
1126 		      offsetof(struct bpf_loader_ctx, log_size), false);
1127 	move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
1128 		      offsetof(struct bpf_loader_ctx, log_buf), false);
1129 	/* populate union bpf_attr with btf_fd saved in the stack earlier */
1130 	move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
1131 			stack_off(btf_fd));
1132 	if (gen->attach_kind) {
1133 		emit_find_attach_target(gen);
1134 		/* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
1135 		emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
1136 						 0, 0, 0, prog_load_attr));
1137 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
1138 				      offsetof(union bpf_attr, attach_btf_id)));
1139 		emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
1140 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
1141 				      offsetof(union bpf_attr, attach_btf_obj_fd)));
1142 	}
1143 	emit_relos(gen, insns_off);
1144 	/* emit PROG_LOAD command */
1145 	emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
1146 	debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
1147 	/* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
1148 	cleanup_relos(gen, insns_off);
1149 	if (gen->attach_kind) {
1150 		emit_sys_close_blob(gen,
1151 				    attr_field(prog_load_attr, attach_btf_obj_fd));
1152 		gen->attach_kind = 0;
1153 	}
1154 	emit_check_err(gen);
1155 	/* remember prog_fd in the stack, if successful */
1156 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
1157 			      stack_off(prog_fd[gen->nr_progs])));
1158 	gen->nr_progs++;
1159 }
1160 
1161 void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
1162 			      __u32 value_size)
1163 {
1164 	int attr_size = offsetofend(union bpf_attr, flags);
1165 	int map_update_attr, value, key;
1166 	union bpf_attr attr;
1167 	int zero = 0;
1168 
1169 	memset(&attr, 0, attr_size);
1170 
1171 	value = add_data(gen, pvalue, value_size);
1172 	key = add_data(gen, &zero, sizeof(zero));
1173 
1174 	/* if (map_desc[map_idx].initial_value) {
1175 	 *    if (ctx->flags & BPF_SKEL_KERNEL)
1176 	 *        bpf_probe_read_kernel(value, value_size, initial_value);
1177 	 *    else
1178 	 *        bpf_copy_from_user(value, value_size, initial_value);
1179 	 * }
1180 	 */
1181 	emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
1182 			      sizeof(struct bpf_loader_ctx) +
1183 			      sizeof(struct bpf_map_desc) * map_idx +
1184 			      offsetof(struct bpf_map_desc, initial_value)));
1185 	emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8));
1186 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
1187 					 0, 0, 0, value));
1188 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
1189 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
1190 			      offsetof(struct bpf_loader_ctx, flags)));
1191 	emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2));
1192 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
1193 	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
1194 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
1195 
1196 	map_update_attr = add_data(gen, &attr, attr_size);
1197 	pr_debug("gen: map_update_elem: idx %d, value: off %d size %d, attr: off %d size %d\n",
1198 		 map_idx, value, value_size, map_update_attr, attr_size);
1199 	move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1200 		       blob_fd_array_off(gen, map_idx));
1201 	emit_rel_store(gen, attr_field(map_update_attr, key), key);
1202 	emit_rel_store(gen, attr_field(map_update_attr, value), value);
1203 	/* emit MAP_UPDATE_ELEM command */
1204 	emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
1205 	debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
1206 	emit_check_err(gen);
1207 }
1208 
1209 void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot,
1210 				 int inner_map_idx)
1211 {
1212 	int attr_size = offsetofend(union bpf_attr, flags);
1213 	int map_update_attr, key;
1214 	union bpf_attr attr;
1215 	int tgt_slot;
1216 
1217 	memset(&attr, 0, attr_size);
1218 
1219 	tgt_slot = tgt_endian(slot);
1220 	key = add_data(gen, &tgt_slot, sizeof(tgt_slot));
1221 
1222 	map_update_attr = add_data(gen, &attr, attr_size);
1223 	pr_debug("gen: populate_outer_map: outer %d key %d inner %d, attr: off %d size %d\n",
1224 		 outer_map_idx, slot, inner_map_idx, map_update_attr, attr_size);
1225 	move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1226 		       blob_fd_array_off(gen, outer_map_idx));
1227 	emit_rel_store(gen, attr_field(map_update_attr, key), key);
1228 	emit_rel_store(gen, attr_field(map_update_attr, value),
1229 		       blob_fd_array_off(gen, inner_map_idx));
1230 
1231 	/* emit MAP_UPDATE_ELEM command */
1232 	emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
1233 	debug_ret(gen, "populate_outer_map outer %d key %d inner %d",
1234 		  outer_map_idx, slot, inner_map_idx);
1235 	emit_check_err(gen);
1236 }
1237 
1238 void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
1239 {
1240 	int attr_size = offsetofend(union bpf_attr, map_fd);
1241 	int map_freeze_attr;
1242 	union bpf_attr attr;
1243 
1244 	memset(&attr, 0, attr_size);
1245 	map_freeze_attr = add_data(gen, &attr, attr_size);
1246 	pr_debug("gen: map_freeze: idx %d, attr: off %d size %d\n",
1247 		 map_idx, map_freeze_attr, attr_size);
1248 	move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
1249 		       blob_fd_array_off(gen, map_idx));
1250 	/* emit MAP_FREEZE command */
1251 	emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
1252 	debug_ret(gen, "map_freeze");
1253 	emit_check_err(gen);
1254 }
1255