xref: /linux/tools/lib/bpf/bpf.c (revision 9d027a35a52a4ea9400390ef4414e4e9dcd54193)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * common eBPF ELF operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation;
13  * version 2.1 of the License (not later!)
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with this program; if not,  see <http://www.gnu.org/licenses>
22  */
23 
24 #include <stdlib.h>
25 #include <string.h>
26 #include <memory.h>
27 #include <unistd.h>
28 #include <asm/unistd.h>
29 #include <errno.h>
30 #include <linux/bpf.h>
31 #include <linux/filter.h>
32 #include <linux/kernel.h>
33 #include <limits.h>
34 #include <sys/resource.h>
35 #include "bpf.h"
36 #include "libbpf.h"
37 #include "libbpf_internal.h"
38 
39 /*
40  * When building perf, unistd.h is overridden. __NR_bpf is
41  * required to be defined explicitly.
42  */
43 #ifndef __NR_bpf
44 # if defined(__i386__)
45 #  define __NR_bpf 357
46 # elif defined(__x86_64__)
47 #  define __NR_bpf 321
48 # elif defined(__aarch64__)
49 #  define __NR_bpf 280
50 # elif defined(__sparc__)
51 #  define __NR_bpf 349
52 # elif defined(__s390__)
53 #  define __NR_bpf 351
54 # elif defined(__arc__)
55 #  define __NR_bpf 280
56 # elif defined(__mips__) && defined(_ABIO32)
57 #  define __NR_bpf 4355
58 # elif defined(__mips__) && defined(_ABIN32)
59 #  define __NR_bpf 6319
60 # elif defined(__mips__) && defined(_ABI64)
61 #  define __NR_bpf 5315
62 # else
63 #  error __NR_bpf not defined. libbpf does not support your arch.
64 # endif
65 #endif
66 
67 static inline __u64 ptr_to_u64(const void *ptr)
68 {
69 	return (__u64) (unsigned long) ptr;
70 }
71 
72 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
73 			  unsigned int size)
74 {
75 	return syscall(__NR_bpf, cmd, attr, size);
76 }
77 
78 static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
79 			     unsigned int size)
80 {
81 	int fd;
82 
83 	fd = sys_bpf(cmd, attr, size);
84 	return ensure_good_fd(fd);
85 }
86 
87 int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
88 {
89 	int fd;
90 
91 	do {
92 		fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
93 	} while (fd < 0 && errno == EAGAIN && --attempts > 0);
94 
95 	return fd;
96 }
97 
98 /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
99  * memcg-based memory accounting for BPF maps and progs. This was done in [0].
100  * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in
101  * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF.
102  *
103  *   [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
104  *   [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
105  */
106 int probe_memcg_account(int token_fd)
107 {
108 	const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
109 	struct bpf_insn insns[] = {
110 		BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
111 		BPF_EXIT_INSN(),
112 	};
113 	size_t insn_cnt = ARRAY_SIZE(insns);
114 	union bpf_attr attr;
115 	int prog_fd;
116 
117 	/* attempt loading freplace trying to use custom BTF */
118 	memset(&attr, 0, attr_sz);
119 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
120 	attr.insns = ptr_to_u64(insns);
121 	attr.insn_cnt = insn_cnt;
122 	attr.license = ptr_to_u64("GPL");
123 	attr.prog_token_fd = token_fd;
124 
125 	prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
126 	if (prog_fd >= 0) {
127 		close(prog_fd);
128 		return 1;
129 	}
130 	return 0;
131 }
132 
133 static bool memlock_bumped;
134 static rlim_t memlock_rlim = RLIM_INFINITY;
135 
136 int libbpf_set_memlock_rlim(size_t memlock_bytes)
137 {
138 	if (memlock_bumped)
139 		return libbpf_err(-EBUSY);
140 
141 	memlock_rlim = memlock_bytes;
142 	return 0;
143 }
144 
145 int bump_rlimit_memlock(void)
146 {
147 	struct rlimit rlim;
148 
149 	/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
150 	if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT))
151 		return 0;
152 
153 	memlock_bumped = true;
154 
155 	/* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */
156 	if (memlock_rlim == 0)
157 		return 0;
158 
159 	rlim.rlim_cur = rlim.rlim_max = memlock_rlim;
160 	if (setrlimit(RLIMIT_MEMLOCK, &rlim))
161 		return -errno;
162 
163 	return 0;
164 }
165 
166 int bpf_map_create(enum bpf_map_type map_type,
167 		   const char *map_name,
168 		   __u32 key_size,
169 		   __u32 value_size,
170 		   __u32 max_entries,
171 		   const struct bpf_map_create_opts *opts)
172 {
173 	const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd);
174 	union bpf_attr attr;
175 	int fd;
176 
177 	bump_rlimit_memlock();
178 
179 	memset(&attr, 0, attr_sz);
180 
181 	if (!OPTS_VALID(opts, bpf_map_create_opts))
182 		return libbpf_err(-EINVAL);
183 
184 	attr.map_type = map_type;
185 	if (map_name && feat_supported(NULL, FEAT_PROG_NAME))
186 		libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
187 	attr.key_size = key_size;
188 	attr.value_size = value_size;
189 	attr.max_entries = max_entries;
190 
191 	attr.btf_fd = OPTS_GET(opts, btf_fd, 0);
192 	attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
193 	attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
194 	attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
195 
196 	attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
197 	attr.map_flags = OPTS_GET(opts, map_flags, 0);
198 	attr.map_extra = OPTS_GET(opts, map_extra, 0);
199 	attr.numa_node = OPTS_GET(opts, numa_node, 0);
200 	attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
201 
202 	attr.map_token_fd = OPTS_GET(opts, token_fd, 0);
203 
204 	fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
205 	return libbpf_err_errno(fd);
206 }
207 
208 static void *
209 alloc_zero_tailing_info(const void *orecord, __u32 cnt,
210 			__u32 actual_rec_size, __u32 expected_rec_size)
211 {
212 	__u64 info_len = (__u64)actual_rec_size * cnt;
213 	void *info, *nrecord;
214 	int i;
215 
216 	info = malloc(info_len);
217 	if (!info)
218 		return NULL;
219 
220 	/* zero out bytes kernel does not understand */
221 	nrecord = info;
222 	for (i = 0; i < cnt; i++) {
223 		memcpy(nrecord, orecord, expected_rec_size);
224 		memset(nrecord + expected_rec_size, 0,
225 		       actual_rec_size - expected_rec_size);
226 		orecord += actual_rec_size;
227 		nrecord += actual_rec_size;
228 	}
229 
230 	return info;
231 }
232 
233 int bpf_prog_load(enum bpf_prog_type prog_type,
234 		  const char *prog_name, const char *license,
235 		  const struct bpf_insn *insns, size_t insn_cnt,
236 		  struct bpf_prog_load_opts *opts)
237 {
238 	const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
239 	void *finfo = NULL, *linfo = NULL;
240 	const char *func_info, *line_info;
241 	__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
242 	__u32 func_info_rec_size, line_info_rec_size;
243 	int fd, attempts;
244 	union bpf_attr attr;
245 	char *log_buf;
246 
247 	bump_rlimit_memlock();
248 
249 	if (!OPTS_VALID(opts, bpf_prog_load_opts))
250 		return libbpf_err(-EINVAL);
251 
252 	attempts = OPTS_GET(opts, attempts, 0);
253 	if (attempts < 0)
254 		return libbpf_err(-EINVAL);
255 	if (attempts == 0)
256 		attempts = PROG_LOAD_ATTEMPTS;
257 
258 	memset(&attr, 0, attr_sz);
259 
260 	attr.prog_type = prog_type;
261 	attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
262 
263 	attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0);
264 	attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
265 	attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
266 	attr.kern_version = OPTS_GET(opts, kern_version, 0);
267 	attr.prog_token_fd = OPTS_GET(opts, token_fd, 0);
268 
269 	if (prog_name && feat_supported(NULL, FEAT_PROG_NAME))
270 		libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
271 	attr.license = ptr_to_u64(license);
272 
273 	if (insn_cnt > UINT_MAX)
274 		return libbpf_err(-E2BIG);
275 
276 	attr.insns = ptr_to_u64(insns);
277 	attr.insn_cnt = (__u32)insn_cnt;
278 
279 	attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
280 	attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0);
281 
282 	if (attach_prog_fd && attach_btf_obj_fd)
283 		return libbpf_err(-EINVAL);
284 
285 	attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0);
286 	if (attach_prog_fd)
287 		attr.attach_prog_fd = attach_prog_fd;
288 	else
289 		attr.attach_btf_obj_fd = attach_btf_obj_fd;
290 
291 	log_buf = OPTS_GET(opts, log_buf, NULL);
292 	log_size = OPTS_GET(opts, log_size, 0);
293 	log_level = OPTS_GET(opts, log_level, 0);
294 
295 	if (!!log_buf != !!log_size)
296 		return libbpf_err(-EINVAL);
297 
298 	func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
299 	func_info = OPTS_GET(opts, func_info, NULL);
300 	attr.func_info_rec_size = func_info_rec_size;
301 	attr.func_info = ptr_to_u64(func_info);
302 	attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0);
303 
304 	line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0);
305 	line_info = OPTS_GET(opts, line_info, NULL);
306 	attr.line_info_rec_size = line_info_rec_size;
307 	attr.line_info = ptr_to_u64(line_info);
308 	attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
309 
310 	attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
311 
312 	if (log_level) {
313 		attr.log_buf = ptr_to_u64(log_buf);
314 		attr.log_size = log_size;
315 		attr.log_level = log_level;
316 	}
317 
318 	fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
319 	OPTS_SET(opts, log_true_size, attr.log_true_size);
320 	if (fd >= 0)
321 		return fd;
322 
323 	/* After bpf_prog_load, the kernel may modify certain attributes
324 	 * to give user space a hint how to deal with loading failure.
325 	 * Check to see whether we can make some changes and load again.
326 	 */
327 	while (errno == E2BIG && (!finfo || !linfo)) {
328 		if (!finfo && attr.func_info_cnt &&
329 		    attr.func_info_rec_size < func_info_rec_size) {
330 			/* try with corrected func info records */
331 			finfo = alloc_zero_tailing_info(func_info,
332 							attr.func_info_cnt,
333 							func_info_rec_size,
334 							attr.func_info_rec_size);
335 			if (!finfo) {
336 				errno = E2BIG;
337 				goto done;
338 			}
339 
340 			attr.func_info = ptr_to_u64(finfo);
341 			attr.func_info_rec_size = func_info_rec_size;
342 		} else if (!linfo && attr.line_info_cnt &&
343 			   attr.line_info_rec_size < line_info_rec_size) {
344 			linfo = alloc_zero_tailing_info(line_info,
345 							attr.line_info_cnt,
346 							line_info_rec_size,
347 							attr.line_info_rec_size);
348 			if (!linfo) {
349 				errno = E2BIG;
350 				goto done;
351 			}
352 
353 			attr.line_info = ptr_to_u64(linfo);
354 			attr.line_info_rec_size = line_info_rec_size;
355 		} else {
356 			break;
357 		}
358 
359 		fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
360 		OPTS_SET(opts, log_true_size, attr.log_true_size);
361 		if (fd >= 0)
362 			goto done;
363 	}
364 
365 	if (log_level == 0 && log_buf) {
366 		/* log_level == 0 with non-NULL log_buf requires retrying on error
367 		 * with log_level == 1 and log_buf/log_buf_size set, to get details of
368 		 * failure
369 		 */
370 		attr.log_buf = ptr_to_u64(log_buf);
371 		attr.log_size = log_size;
372 		attr.log_level = 1;
373 
374 		fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
375 		OPTS_SET(opts, log_true_size, attr.log_true_size);
376 	}
377 done:
378 	/* free() doesn't affect errno, so we don't need to restore it */
379 	free(finfo);
380 	free(linfo);
381 	return libbpf_err_errno(fd);
382 }
383 
384 int bpf_map_update_elem(int fd, const void *key, const void *value,
385 			__u64 flags)
386 {
387 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
388 	union bpf_attr attr;
389 	int ret;
390 
391 	memset(&attr, 0, attr_sz);
392 	attr.map_fd = fd;
393 	attr.key = ptr_to_u64(key);
394 	attr.value = ptr_to_u64(value);
395 	attr.flags = flags;
396 
397 	ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
398 	return libbpf_err_errno(ret);
399 }
400 
401 int bpf_map_lookup_elem(int fd, const void *key, void *value)
402 {
403 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
404 	union bpf_attr attr;
405 	int ret;
406 
407 	memset(&attr, 0, attr_sz);
408 	attr.map_fd = fd;
409 	attr.key = ptr_to_u64(key);
410 	attr.value = ptr_to_u64(value);
411 
412 	ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz);
413 	return libbpf_err_errno(ret);
414 }
415 
416 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
417 {
418 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
419 	union bpf_attr attr;
420 	int ret;
421 
422 	memset(&attr, 0, attr_sz);
423 	attr.map_fd = fd;
424 	attr.key = ptr_to_u64(key);
425 	attr.value = ptr_to_u64(value);
426 	attr.flags = flags;
427 
428 	ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz);
429 	return libbpf_err_errno(ret);
430 }
431 
432 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
433 {
434 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
435 	union bpf_attr attr;
436 	int ret;
437 
438 	memset(&attr, 0, attr_sz);
439 	attr.map_fd = fd;
440 	attr.key = ptr_to_u64(key);
441 	attr.value = ptr_to_u64(value);
442 
443 	ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz);
444 	return libbpf_err_errno(ret);
445 }
446 
447 int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags)
448 {
449 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
450 	union bpf_attr attr;
451 	int ret;
452 
453 	memset(&attr, 0, attr_sz);
454 	attr.map_fd = fd;
455 	attr.key = ptr_to_u64(key);
456 	attr.value = ptr_to_u64(value);
457 	attr.flags = flags;
458 
459 	ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz);
460 	return libbpf_err_errno(ret);
461 }
462 
463 int bpf_map_delete_elem(int fd, const void *key)
464 {
465 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
466 	union bpf_attr attr;
467 	int ret;
468 
469 	memset(&attr, 0, attr_sz);
470 	attr.map_fd = fd;
471 	attr.key = ptr_to_u64(key);
472 
473 	ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
474 	return libbpf_err_errno(ret);
475 }
476 
477 int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags)
478 {
479 	const size_t attr_sz = offsetofend(union bpf_attr, flags);
480 	union bpf_attr attr;
481 	int ret;
482 
483 	memset(&attr, 0, attr_sz);
484 	attr.map_fd = fd;
485 	attr.key = ptr_to_u64(key);
486 	attr.flags = flags;
487 
488 	ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
489 	return libbpf_err_errno(ret);
490 }
491 
492 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
493 {
494 	const size_t attr_sz = offsetofend(union bpf_attr, next_key);
495 	union bpf_attr attr;
496 	int ret;
497 
498 	memset(&attr, 0, attr_sz);
499 	attr.map_fd = fd;
500 	attr.key = ptr_to_u64(key);
501 	attr.next_key = ptr_to_u64(next_key);
502 
503 	ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, attr_sz);
504 	return libbpf_err_errno(ret);
505 }
506 
507 int bpf_map_freeze(int fd)
508 {
509 	const size_t attr_sz = offsetofend(union bpf_attr, map_fd);
510 	union bpf_attr attr;
511 	int ret;
512 
513 	memset(&attr, 0, attr_sz);
514 	attr.map_fd = fd;
515 
516 	ret = sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz);
517 	return libbpf_err_errno(ret);
518 }
519 
520 static int bpf_map_batch_common(int cmd, int fd, void  *in_batch,
521 				void *out_batch, void *keys, void *values,
522 				__u32 *count,
523 				const struct bpf_map_batch_opts *opts)
524 {
525 	const size_t attr_sz = offsetofend(union bpf_attr, batch);
526 	union bpf_attr attr;
527 	int ret;
528 
529 	if (!OPTS_VALID(opts, bpf_map_batch_opts))
530 		return libbpf_err(-EINVAL);
531 
532 	memset(&attr, 0, attr_sz);
533 	attr.batch.map_fd = fd;
534 	attr.batch.in_batch = ptr_to_u64(in_batch);
535 	attr.batch.out_batch = ptr_to_u64(out_batch);
536 	attr.batch.keys = ptr_to_u64(keys);
537 	attr.batch.values = ptr_to_u64(values);
538 	attr.batch.count = *count;
539 	attr.batch.elem_flags  = OPTS_GET(opts, elem_flags, 0);
540 	attr.batch.flags = OPTS_GET(opts, flags, 0);
541 
542 	ret = sys_bpf(cmd, &attr, attr_sz);
543 	*count = attr.batch.count;
544 
545 	return libbpf_err_errno(ret);
546 }
547 
548 int bpf_map_delete_batch(int fd, const void *keys, __u32 *count,
549 			 const struct bpf_map_batch_opts *opts)
550 {
551 	return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
552 				    NULL, (void *)keys, NULL, count, opts);
553 }
554 
555 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
556 			 void *values, __u32 *count,
557 			 const struct bpf_map_batch_opts *opts)
558 {
559 	return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch,
560 				    out_batch, keys, values, count, opts);
561 }
562 
563 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
564 				    void *keys, void *values, __u32 *count,
565 				    const struct bpf_map_batch_opts *opts)
566 {
567 	return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH,
568 				    fd, in_batch, out_batch, keys, values,
569 				    count, opts);
570 }
571 
572 int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count,
573 			 const struct bpf_map_batch_opts *opts)
574 {
575 	return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
576 				    (void *)keys, (void *)values, count, opts);
577 }
578 
579 int bpf_obj_pin_opts(int fd, const char *pathname, const struct bpf_obj_pin_opts *opts)
580 {
581 	const size_t attr_sz = offsetofend(union bpf_attr, path_fd);
582 	union bpf_attr attr;
583 	int ret;
584 
585 	if (!OPTS_VALID(opts, bpf_obj_pin_opts))
586 		return libbpf_err(-EINVAL);
587 
588 	memset(&attr, 0, attr_sz);
589 	attr.path_fd = OPTS_GET(opts, path_fd, 0);
590 	attr.pathname = ptr_to_u64((void *)pathname);
591 	attr.file_flags = OPTS_GET(opts, file_flags, 0);
592 	attr.bpf_fd = fd;
593 
594 	ret = sys_bpf(BPF_OBJ_PIN, &attr, attr_sz);
595 	return libbpf_err_errno(ret);
596 }
597 
598 int bpf_obj_pin(int fd, const char *pathname)
599 {
600 	return bpf_obj_pin_opts(fd, pathname, NULL);
601 }
602 
603 int bpf_obj_get(const char *pathname)
604 {
605 	return bpf_obj_get_opts(pathname, NULL);
606 }
607 
608 int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts)
609 {
610 	const size_t attr_sz = offsetofend(union bpf_attr, path_fd);
611 	union bpf_attr attr;
612 	int fd;
613 
614 	if (!OPTS_VALID(opts, bpf_obj_get_opts))
615 		return libbpf_err(-EINVAL);
616 
617 	memset(&attr, 0, attr_sz);
618 	attr.path_fd = OPTS_GET(opts, path_fd, 0);
619 	attr.pathname = ptr_to_u64((void *)pathname);
620 	attr.file_flags = OPTS_GET(opts, file_flags, 0);
621 
622 	fd = sys_bpf_fd(BPF_OBJ_GET, &attr, attr_sz);
623 	return libbpf_err_errno(fd);
624 }
625 
626 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
627 		    unsigned int flags)
628 {
629 	DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts,
630 		.flags = flags,
631 	);
632 
633 	return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts);
634 }
635 
636 int bpf_prog_attach_opts(int prog_fd, int target, enum bpf_attach_type type,
637 			 const struct bpf_prog_attach_opts *opts)
638 {
639 	const size_t attr_sz = offsetofend(union bpf_attr, expected_revision);
640 	__u32 relative_id, flags;
641 	int ret, relative_fd;
642 	union bpf_attr attr;
643 
644 	if (!OPTS_VALID(opts, bpf_prog_attach_opts))
645 		return libbpf_err(-EINVAL);
646 
647 	relative_id = OPTS_GET(opts, relative_id, 0);
648 	relative_fd = OPTS_GET(opts, relative_fd, 0);
649 	flags = OPTS_GET(opts, flags, 0);
650 
651 	/* validate we don't have unexpected combinations of non-zero fields */
652 	if (relative_fd && relative_id)
653 		return libbpf_err(-EINVAL);
654 
655 	memset(&attr, 0, attr_sz);
656 	attr.target_fd		= target;
657 	attr.attach_bpf_fd	= prog_fd;
658 	attr.attach_type	= type;
659 	attr.replace_bpf_fd	= OPTS_GET(opts, replace_fd, 0);
660 	attr.expected_revision	= OPTS_GET(opts, expected_revision, 0);
661 
662 	if (relative_id) {
663 		attr.attach_flags = flags | BPF_F_ID;
664 		attr.relative_id  = relative_id;
665 	} else {
666 		attr.attach_flags = flags;
667 		attr.relative_fd  = relative_fd;
668 	}
669 
670 	ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz);
671 	return libbpf_err_errno(ret);
672 }
673 
674 int bpf_prog_detach_opts(int prog_fd, int target, enum bpf_attach_type type,
675 			 const struct bpf_prog_detach_opts *opts)
676 {
677 	const size_t attr_sz = offsetofend(union bpf_attr, expected_revision);
678 	__u32 relative_id, flags;
679 	int ret, relative_fd;
680 	union bpf_attr attr;
681 
682 	if (!OPTS_VALID(opts, bpf_prog_detach_opts))
683 		return libbpf_err(-EINVAL);
684 
685 	relative_id = OPTS_GET(opts, relative_id, 0);
686 	relative_fd = OPTS_GET(opts, relative_fd, 0);
687 	flags = OPTS_GET(opts, flags, 0);
688 
689 	/* validate we don't have unexpected combinations of non-zero fields */
690 	if (relative_fd && relative_id)
691 		return libbpf_err(-EINVAL);
692 
693 	memset(&attr, 0, attr_sz);
694 	attr.target_fd		= target;
695 	attr.attach_bpf_fd	= prog_fd;
696 	attr.attach_type	= type;
697 	attr.expected_revision	= OPTS_GET(opts, expected_revision, 0);
698 
699 	if (relative_id) {
700 		attr.attach_flags = flags | BPF_F_ID;
701 		attr.relative_id  = relative_id;
702 	} else {
703 		attr.attach_flags = flags;
704 		attr.relative_fd  = relative_fd;
705 	}
706 
707 	ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
708 	return libbpf_err_errno(ret);
709 }
710 
711 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
712 {
713 	return bpf_prog_detach_opts(0, target_fd, type, NULL);
714 }
715 
716 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
717 {
718 	return bpf_prog_detach_opts(prog_fd, target_fd, type, NULL);
719 }
720 
721 int bpf_link_create(int prog_fd, int target_fd,
722 		    enum bpf_attach_type attach_type,
723 		    const struct bpf_link_create_opts *opts)
724 {
725 	const size_t attr_sz = offsetofend(union bpf_attr, link_create);
726 	__u32 target_btf_id, iter_info_len, relative_id;
727 	int fd, err, relative_fd;
728 	union bpf_attr attr;
729 
730 	if (!OPTS_VALID(opts, bpf_link_create_opts))
731 		return libbpf_err(-EINVAL);
732 
733 	iter_info_len = OPTS_GET(opts, iter_info_len, 0);
734 	target_btf_id = OPTS_GET(opts, target_btf_id, 0);
735 
736 	/* validate we don't have unexpected combinations of non-zero fields */
737 	if (iter_info_len || target_btf_id) {
738 		if (iter_info_len && target_btf_id)
739 			return libbpf_err(-EINVAL);
740 		if (!OPTS_ZEROED(opts, target_btf_id))
741 			return libbpf_err(-EINVAL);
742 	}
743 
744 	memset(&attr, 0, attr_sz);
745 	attr.link_create.prog_fd = prog_fd;
746 	attr.link_create.target_fd = target_fd;
747 	attr.link_create.attach_type = attach_type;
748 	attr.link_create.flags = OPTS_GET(opts, flags, 0);
749 
750 	if (target_btf_id) {
751 		attr.link_create.target_btf_id = target_btf_id;
752 		goto proceed;
753 	}
754 
755 	switch (attach_type) {
756 	case BPF_TRACE_ITER:
757 		attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
758 		attr.link_create.iter_info_len = iter_info_len;
759 		break;
760 	case BPF_PERF_EVENT:
761 		attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0);
762 		if (!OPTS_ZEROED(opts, perf_event))
763 			return libbpf_err(-EINVAL);
764 		break;
765 	case BPF_TRACE_KPROBE_MULTI:
766 		attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0);
767 		attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0);
768 		attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0));
769 		attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0));
770 		attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0));
771 		if (!OPTS_ZEROED(opts, kprobe_multi))
772 			return libbpf_err(-EINVAL);
773 		break;
774 	case BPF_TRACE_UPROBE_MULTI:
775 		attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0);
776 		attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0);
777 		attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0));
778 		attr.link_create.uprobe_multi.offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.offsets, 0));
779 		attr.link_create.uprobe_multi.ref_ctr_offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.ref_ctr_offsets, 0));
780 		attr.link_create.uprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, uprobe_multi.cookies, 0));
781 		attr.link_create.uprobe_multi.pid = OPTS_GET(opts, uprobe_multi.pid, 0);
782 		if (!OPTS_ZEROED(opts, uprobe_multi))
783 			return libbpf_err(-EINVAL);
784 		break;
785 	case BPF_TRACE_FENTRY:
786 	case BPF_TRACE_FEXIT:
787 	case BPF_MODIFY_RETURN:
788 	case BPF_LSM_MAC:
789 		attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0);
790 		if (!OPTS_ZEROED(opts, tracing))
791 			return libbpf_err(-EINVAL);
792 		break;
793 	case BPF_NETFILTER:
794 		attr.link_create.netfilter.pf = OPTS_GET(opts, netfilter.pf, 0);
795 		attr.link_create.netfilter.hooknum = OPTS_GET(opts, netfilter.hooknum, 0);
796 		attr.link_create.netfilter.priority = OPTS_GET(opts, netfilter.priority, 0);
797 		attr.link_create.netfilter.flags = OPTS_GET(opts, netfilter.flags, 0);
798 		if (!OPTS_ZEROED(opts, netfilter))
799 			return libbpf_err(-EINVAL);
800 		break;
801 	case BPF_TCX_INGRESS:
802 	case BPF_TCX_EGRESS:
803 		relative_fd = OPTS_GET(opts, tcx.relative_fd, 0);
804 		relative_id = OPTS_GET(opts, tcx.relative_id, 0);
805 		if (relative_fd && relative_id)
806 			return libbpf_err(-EINVAL);
807 		if (relative_id) {
808 			attr.link_create.tcx.relative_id = relative_id;
809 			attr.link_create.flags |= BPF_F_ID;
810 		} else {
811 			attr.link_create.tcx.relative_fd = relative_fd;
812 		}
813 		attr.link_create.tcx.expected_revision = OPTS_GET(opts, tcx.expected_revision, 0);
814 		if (!OPTS_ZEROED(opts, tcx))
815 			return libbpf_err(-EINVAL);
816 		break;
817 	case BPF_NETKIT_PRIMARY:
818 	case BPF_NETKIT_PEER:
819 		relative_fd = OPTS_GET(opts, netkit.relative_fd, 0);
820 		relative_id = OPTS_GET(opts, netkit.relative_id, 0);
821 		if (relative_fd && relative_id)
822 			return libbpf_err(-EINVAL);
823 		if (relative_id) {
824 			attr.link_create.netkit.relative_id = relative_id;
825 			attr.link_create.flags |= BPF_F_ID;
826 		} else {
827 			attr.link_create.netkit.relative_fd = relative_fd;
828 		}
829 		attr.link_create.netkit.expected_revision = OPTS_GET(opts, netkit.expected_revision, 0);
830 		if (!OPTS_ZEROED(opts, netkit))
831 			return libbpf_err(-EINVAL);
832 		break;
833 	default:
834 		if (!OPTS_ZEROED(opts, flags))
835 			return libbpf_err(-EINVAL);
836 		break;
837 	}
838 proceed:
839 	fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, attr_sz);
840 	if (fd >= 0)
841 		return fd;
842 	/* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry
843 	 * and other similar programs
844 	 */
845 	err = -errno;
846 	if (err != -EINVAL)
847 		return libbpf_err(err);
848 
849 	/* if user used features not supported by
850 	 * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately
851 	 */
852 	if (attr.link_create.target_fd || attr.link_create.target_btf_id)
853 		return libbpf_err(err);
854 	if (!OPTS_ZEROED(opts, sz))
855 		return libbpf_err(err);
856 
857 	/* otherwise, for few select kinds of programs that can be
858 	 * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as
859 	 * a fallback for older kernels
860 	 */
861 	switch (attach_type) {
862 	case BPF_TRACE_RAW_TP:
863 	case BPF_LSM_MAC:
864 	case BPF_TRACE_FENTRY:
865 	case BPF_TRACE_FEXIT:
866 	case BPF_MODIFY_RETURN:
867 		return bpf_raw_tracepoint_open(NULL, prog_fd);
868 	default:
869 		return libbpf_err(err);
870 	}
871 }
872 
873 int bpf_link_detach(int link_fd)
874 {
875 	const size_t attr_sz = offsetofend(union bpf_attr, link_detach);
876 	union bpf_attr attr;
877 	int ret;
878 
879 	memset(&attr, 0, attr_sz);
880 	attr.link_detach.link_fd = link_fd;
881 
882 	ret = sys_bpf(BPF_LINK_DETACH, &attr, attr_sz);
883 	return libbpf_err_errno(ret);
884 }
885 
886 int bpf_link_update(int link_fd, int new_prog_fd,
887 		    const struct bpf_link_update_opts *opts)
888 {
889 	const size_t attr_sz = offsetofend(union bpf_attr, link_update);
890 	union bpf_attr attr;
891 	int ret;
892 
893 	if (!OPTS_VALID(opts, bpf_link_update_opts))
894 		return libbpf_err(-EINVAL);
895 
896 	if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0))
897 		return libbpf_err(-EINVAL);
898 
899 	memset(&attr, 0, attr_sz);
900 	attr.link_update.link_fd = link_fd;
901 	attr.link_update.new_prog_fd = new_prog_fd;
902 	attr.link_update.flags = OPTS_GET(opts, flags, 0);
903 	if (OPTS_GET(opts, old_prog_fd, 0))
904 		attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
905 	else if (OPTS_GET(opts, old_map_fd, 0))
906 		attr.link_update.old_map_fd = OPTS_GET(opts, old_map_fd, 0);
907 
908 	ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz);
909 	return libbpf_err_errno(ret);
910 }
911 
912 int bpf_iter_create(int link_fd)
913 {
914 	const size_t attr_sz = offsetofend(union bpf_attr, iter_create);
915 	union bpf_attr attr;
916 	int fd;
917 
918 	memset(&attr, 0, attr_sz);
919 	attr.iter_create.link_fd = link_fd;
920 
921 	fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, attr_sz);
922 	return libbpf_err_errno(fd);
923 }
924 
925 int bpf_prog_query_opts(int target, enum bpf_attach_type type,
926 			struct bpf_prog_query_opts *opts)
927 {
928 	const size_t attr_sz = offsetofend(union bpf_attr, query);
929 	union bpf_attr attr;
930 	int ret;
931 
932 	if (!OPTS_VALID(opts, bpf_prog_query_opts))
933 		return libbpf_err(-EINVAL);
934 
935 	memset(&attr, 0, attr_sz);
936 	attr.query.target_fd		= target;
937 	attr.query.attach_type		= type;
938 	attr.query.query_flags		= OPTS_GET(opts, query_flags, 0);
939 	attr.query.count		= OPTS_GET(opts, count, 0);
940 	attr.query.prog_ids		= ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
941 	attr.query.link_ids		= ptr_to_u64(OPTS_GET(opts, link_ids, NULL));
942 	attr.query.prog_attach_flags	= ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
943 	attr.query.link_attach_flags	= ptr_to_u64(OPTS_GET(opts, link_attach_flags, NULL));
944 
945 	ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz);
946 
947 	OPTS_SET(opts, attach_flags, attr.query.attach_flags);
948 	OPTS_SET(opts, revision, attr.query.revision);
949 	OPTS_SET(opts, count, attr.query.count);
950 
951 	return libbpf_err_errno(ret);
952 }
953 
954 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
955 		   __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
956 {
957 	LIBBPF_OPTS(bpf_prog_query_opts, opts);
958 	int ret;
959 
960 	opts.query_flags = query_flags;
961 	opts.prog_ids = prog_ids;
962 	opts.prog_cnt = *prog_cnt;
963 
964 	ret = bpf_prog_query_opts(target_fd, type, &opts);
965 
966 	if (attach_flags)
967 		*attach_flags = opts.attach_flags;
968 	*prog_cnt = opts.prog_cnt;
969 
970 	return libbpf_err_errno(ret);
971 }
972 
973 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
974 {
975 	const size_t attr_sz = offsetofend(union bpf_attr, test);
976 	union bpf_attr attr;
977 	int ret;
978 
979 	if (!OPTS_VALID(opts, bpf_test_run_opts))
980 		return libbpf_err(-EINVAL);
981 
982 	memset(&attr, 0, attr_sz);
983 	attr.test.prog_fd = prog_fd;
984 	attr.test.batch_size = OPTS_GET(opts, batch_size, 0);
985 	attr.test.cpu = OPTS_GET(opts, cpu, 0);
986 	attr.test.flags = OPTS_GET(opts, flags, 0);
987 	attr.test.repeat = OPTS_GET(opts, repeat, 0);
988 	attr.test.duration = OPTS_GET(opts, duration, 0);
989 	attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0);
990 	attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0);
991 	attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0);
992 	attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0);
993 	attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL));
994 	attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL));
995 	attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
996 	attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
997 
998 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, attr_sz);
999 
1000 	OPTS_SET(opts, data_size_out, attr.test.data_size_out);
1001 	OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
1002 	OPTS_SET(opts, duration, attr.test.duration);
1003 	OPTS_SET(opts, retval, attr.test.retval);
1004 
1005 	return libbpf_err_errno(ret);
1006 }
1007 
1008 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
1009 {
1010 	const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
1011 	union bpf_attr attr;
1012 	int err;
1013 
1014 	memset(&attr, 0, attr_sz);
1015 	attr.start_id = start_id;
1016 
1017 	err = sys_bpf(cmd, &attr, attr_sz);
1018 	if (!err)
1019 		*next_id = attr.next_id;
1020 
1021 	return libbpf_err_errno(err);
1022 }
1023 
1024 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
1025 {
1026 	return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID);
1027 }
1028 
1029 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
1030 {
1031 	return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID);
1032 }
1033 
1034 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
1035 {
1036 	return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
1037 }
1038 
1039 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
1040 {
1041 	return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID);
1042 }
1043 
1044 int bpf_prog_get_fd_by_id_opts(__u32 id,
1045 			       const struct bpf_get_fd_by_id_opts *opts)
1046 {
1047 	const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
1048 	union bpf_attr attr;
1049 	int fd;
1050 
1051 	if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
1052 		return libbpf_err(-EINVAL);
1053 
1054 	memset(&attr, 0, attr_sz);
1055 	attr.prog_id = id;
1056 	attr.open_flags = OPTS_GET(opts, open_flags, 0);
1057 
1058 	fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz);
1059 	return libbpf_err_errno(fd);
1060 }
1061 
1062 int bpf_prog_get_fd_by_id(__u32 id)
1063 {
1064 	return bpf_prog_get_fd_by_id_opts(id, NULL);
1065 }
1066 
1067 int bpf_map_get_fd_by_id_opts(__u32 id,
1068 			      const struct bpf_get_fd_by_id_opts *opts)
1069 {
1070 	const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
1071 	union bpf_attr attr;
1072 	int fd;
1073 
1074 	if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
1075 		return libbpf_err(-EINVAL);
1076 
1077 	memset(&attr, 0, attr_sz);
1078 	attr.map_id = id;
1079 	attr.open_flags = OPTS_GET(opts, open_flags, 0);
1080 
1081 	fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz);
1082 	return libbpf_err_errno(fd);
1083 }
1084 
1085 int bpf_map_get_fd_by_id(__u32 id)
1086 {
1087 	return bpf_map_get_fd_by_id_opts(id, NULL);
1088 }
1089 
1090 int bpf_btf_get_fd_by_id_opts(__u32 id,
1091 			      const struct bpf_get_fd_by_id_opts *opts)
1092 {
1093 	const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
1094 	union bpf_attr attr;
1095 	int fd;
1096 
1097 	if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
1098 		return libbpf_err(-EINVAL);
1099 
1100 	memset(&attr, 0, attr_sz);
1101 	attr.btf_id = id;
1102 	attr.open_flags = OPTS_GET(opts, open_flags, 0);
1103 
1104 	fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz);
1105 	return libbpf_err_errno(fd);
1106 }
1107 
1108 int bpf_btf_get_fd_by_id(__u32 id)
1109 {
1110 	return bpf_btf_get_fd_by_id_opts(id, NULL);
1111 }
1112 
1113 int bpf_link_get_fd_by_id_opts(__u32 id,
1114 			       const struct bpf_get_fd_by_id_opts *opts)
1115 {
1116 	const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
1117 	union bpf_attr attr;
1118 	int fd;
1119 
1120 	if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
1121 		return libbpf_err(-EINVAL);
1122 
1123 	memset(&attr, 0, attr_sz);
1124 	attr.link_id = id;
1125 	attr.open_flags = OPTS_GET(opts, open_flags, 0);
1126 
1127 	fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz);
1128 	return libbpf_err_errno(fd);
1129 }
1130 
1131 int bpf_link_get_fd_by_id(__u32 id)
1132 {
1133 	return bpf_link_get_fd_by_id_opts(id, NULL);
1134 }
1135 
1136 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
1137 {
1138 	const size_t attr_sz = offsetofend(union bpf_attr, info);
1139 	union bpf_attr attr;
1140 	int err;
1141 
1142 	memset(&attr, 0, attr_sz);
1143 	attr.info.bpf_fd = bpf_fd;
1144 	attr.info.info_len = *info_len;
1145 	attr.info.info = ptr_to_u64(info);
1146 
1147 	err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz);
1148 	if (!err)
1149 		*info_len = attr.info.info_len;
1150 	return libbpf_err_errno(err);
1151 }
1152 
1153 int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len)
1154 {
1155 	return bpf_obj_get_info_by_fd(prog_fd, info, info_len);
1156 }
1157 
1158 int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len)
1159 {
1160 	return bpf_obj_get_info_by_fd(map_fd, info, info_len);
1161 }
1162 
1163 int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len)
1164 {
1165 	return bpf_obj_get_info_by_fd(btf_fd, info, info_len);
1166 }
1167 
1168 int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len)
1169 {
1170 	return bpf_obj_get_info_by_fd(link_fd, info, info_len);
1171 }
1172 
1173 int bpf_raw_tracepoint_open(const char *name, int prog_fd)
1174 {
1175 	const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint);
1176 	union bpf_attr attr;
1177 	int fd;
1178 
1179 	memset(&attr, 0, attr_sz);
1180 	attr.raw_tracepoint.name = ptr_to_u64(name);
1181 	attr.raw_tracepoint.prog_fd = prog_fd;
1182 
1183 	fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
1184 	return libbpf_err_errno(fd);
1185 }
1186 
1187 int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
1188 {
1189 	const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd);
1190 	union bpf_attr attr;
1191 	char *log_buf;
1192 	size_t log_size;
1193 	__u32 log_level;
1194 	int fd;
1195 
1196 	bump_rlimit_memlock();
1197 
1198 	memset(&attr, 0, attr_sz);
1199 
1200 	if (!OPTS_VALID(opts, bpf_btf_load_opts))
1201 		return libbpf_err(-EINVAL);
1202 
1203 	log_buf = OPTS_GET(opts, log_buf, NULL);
1204 	log_size = OPTS_GET(opts, log_size, 0);
1205 	log_level = OPTS_GET(opts, log_level, 0);
1206 
1207 	if (log_size > UINT_MAX)
1208 		return libbpf_err(-EINVAL);
1209 	if (log_size && !log_buf)
1210 		return libbpf_err(-EINVAL);
1211 
1212 	attr.btf = ptr_to_u64(btf_data);
1213 	attr.btf_size = btf_size;
1214 	attr.btf_token_fd = OPTS_GET(opts, token_fd, 0);
1215 
1216 	/* log_level == 0 and log_buf != NULL means "try loading without
1217 	 * log_buf, but retry with log_buf and log_level=1 on error", which is
1218 	 * consistent across low-level and high-level BTF and program loading
1219 	 * APIs within libbpf and provides a sensible behavior in practice
1220 	 */
1221 	if (log_level) {
1222 		attr.btf_log_buf = ptr_to_u64(log_buf);
1223 		attr.btf_log_size = (__u32)log_size;
1224 		attr.btf_log_level = log_level;
1225 	}
1226 
1227 	fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
1228 	if (fd < 0 && log_buf && log_level == 0) {
1229 		attr.btf_log_buf = ptr_to_u64(log_buf);
1230 		attr.btf_log_size = (__u32)log_size;
1231 		attr.btf_log_level = 1;
1232 		fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
1233 	}
1234 
1235 	OPTS_SET(opts, log_true_size, attr.btf_log_true_size);
1236 	return libbpf_err_errno(fd);
1237 }
1238 
1239 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
1240 		      __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
1241 		      __u64 *probe_addr)
1242 {
1243 	const size_t attr_sz = offsetofend(union bpf_attr, task_fd_query);
1244 	union bpf_attr attr;
1245 	int err;
1246 
1247 	memset(&attr, 0, attr_sz);
1248 	attr.task_fd_query.pid = pid;
1249 	attr.task_fd_query.fd = fd;
1250 	attr.task_fd_query.flags = flags;
1251 	attr.task_fd_query.buf = ptr_to_u64(buf);
1252 	attr.task_fd_query.buf_len = *buf_len;
1253 
1254 	err = sys_bpf(BPF_TASK_FD_QUERY, &attr, attr_sz);
1255 
1256 	*buf_len = attr.task_fd_query.buf_len;
1257 	*prog_id = attr.task_fd_query.prog_id;
1258 	*fd_type = attr.task_fd_query.fd_type;
1259 	*probe_offset = attr.task_fd_query.probe_offset;
1260 	*probe_addr = attr.task_fd_query.probe_addr;
1261 
1262 	return libbpf_err_errno(err);
1263 }
1264 
1265 int bpf_enable_stats(enum bpf_stats_type type)
1266 {
1267 	const size_t attr_sz = offsetofend(union bpf_attr, enable_stats);
1268 	union bpf_attr attr;
1269 	int fd;
1270 
1271 	memset(&attr, 0, attr_sz);
1272 	attr.enable_stats.type = type;
1273 
1274 	fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, attr_sz);
1275 	return libbpf_err_errno(fd);
1276 }
1277 
1278 int bpf_prog_bind_map(int prog_fd, int map_fd,
1279 		      const struct bpf_prog_bind_opts *opts)
1280 {
1281 	const size_t attr_sz = offsetofend(union bpf_attr, prog_bind_map);
1282 	union bpf_attr attr;
1283 	int ret;
1284 
1285 	if (!OPTS_VALID(opts, bpf_prog_bind_opts))
1286 		return libbpf_err(-EINVAL);
1287 
1288 	memset(&attr, 0, attr_sz);
1289 	attr.prog_bind_map.prog_fd = prog_fd;
1290 	attr.prog_bind_map.map_fd = map_fd;
1291 	attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
1292 
1293 	ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
1294 	return libbpf_err_errno(ret);
1295 }
1296 
1297 int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts)
1298 {
1299 	const size_t attr_sz = offsetofend(union bpf_attr, token_create);
1300 	union bpf_attr attr;
1301 	int fd;
1302 
1303 	if (!OPTS_VALID(opts, bpf_token_create_opts))
1304 		return libbpf_err(-EINVAL);
1305 
1306 	memset(&attr, 0, attr_sz);
1307 	attr.token_create.bpffs_fd = bpffs_fd;
1308 	attr.token_create.flags = OPTS_GET(opts, flags, 0);
1309 
1310 	fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz);
1311 	return libbpf_err_errno(fd);
1312 }
1313