xref: /linux/tools/lib/bpf/bpf.c (revision ed4bc1890b4984d0af447ad3cc1f93541623f8f3)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * common eBPF ELF operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation;
13  * version 2.1 of the License (not later!)
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with this program; if not,  see <http://www.gnu.org/licenses>
22  */
23 
24 #include <stdlib.h>
25 #include <string.h>
26 #include <memory.h>
27 #include <unistd.h>
28 #include <asm/unistd.h>
29 #include <errno.h>
30 #include <linux/bpf.h>
31 #include "bpf.h"
32 #include "libbpf.h"
33 #include "libbpf_internal.h"
34 
35 /* make sure libbpf doesn't use kernel-only integer typedefs */
36 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
37 
38 /*
39  * When building perf, unistd.h is overridden. __NR_bpf is
40  * required to be defined explicitly.
41  */
42 #ifndef __NR_bpf
43 # if defined(__i386__)
44 #  define __NR_bpf 357
45 # elif defined(__x86_64__)
46 #  define __NR_bpf 321
47 # elif defined(__aarch64__)
48 #  define __NR_bpf 280
49 # elif defined(__sparc__)
50 #  define __NR_bpf 349
51 # elif defined(__s390__)
52 #  define __NR_bpf 351
53 # elif defined(__arc__)
54 #  define __NR_bpf 280
55 # else
56 #  error __NR_bpf not defined. libbpf does not support your arch.
57 # endif
58 #endif
59 
60 static inline __u64 ptr_to_u64(const void *ptr)
61 {
62 	return (__u64) (unsigned long) ptr;
63 }
64 
65 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
66 			  unsigned int size)
67 {
68 	return syscall(__NR_bpf, cmd, attr, size);
69 }
70 
71 static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
72 {
73 	int fd;
74 
75 	do {
76 		fd = sys_bpf(BPF_PROG_LOAD, attr, size);
77 	} while (fd < 0 && errno == EAGAIN);
78 
79 	return fd;
80 }
81 
82 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
83 {
84 	union bpf_attr attr;
85 
86 	memset(&attr, '\0', sizeof(attr));
87 
88 	attr.map_type = create_attr->map_type;
89 	attr.key_size = create_attr->key_size;
90 	attr.value_size = create_attr->value_size;
91 	attr.max_entries = create_attr->max_entries;
92 	attr.map_flags = create_attr->map_flags;
93 	if (create_attr->name)
94 		memcpy(attr.map_name, create_attr->name,
95 		       min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1));
96 	attr.numa_node = create_attr->numa_node;
97 	attr.btf_fd = create_attr->btf_fd;
98 	attr.btf_key_type_id = create_attr->btf_key_type_id;
99 	attr.btf_value_type_id = create_attr->btf_value_type_id;
100 	attr.map_ifindex = create_attr->map_ifindex;
101 	if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS)
102 		attr.btf_vmlinux_value_type_id =
103 			create_attr->btf_vmlinux_value_type_id;
104 	else
105 		attr.inner_map_fd = create_attr->inner_map_fd;
106 
107 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
108 }
109 
110 int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
111 			int key_size, int value_size, int max_entries,
112 			__u32 map_flags, int node)
113 {
114 	struct bpf_create_map_attr map_attr = {};
115 
116 	map_attr.name = name;
117 	map_attr.map_type = map_type;
118 	map_attr.map_flags = map_flags;
119 	map_attr.key_size = key_size;
120 	map_attr.value_size = value_size;
121 	map_attr.max_entries = max_entries;
122 	if (node >= 0) {
123 		map_attr.numa_node = node;
124 		map_attr.map_flags |= BPF_F_NUMA_NODE;
125 	}
126 
127 	return bpf_create_map_xattr(&map_attr);
128 }
129 
130 int bpf_create_map(enum bpf_map_type map_type, int key_size,
131 		   int value_size, int max_entries, __u32 map_flags)
132 {
133 	struct bpf_create_map_attr map_attr = {};
134 
135 	map_attr.map_type = map_type;
136 	map_attr.map_flags = map_flags;
137 	map_attr.key_size = key_size;
138 	map_attr.value_size = value_size;
139 	map_attr.max_entries = max_entries;
140 
141 	return bpf_create_map_xattr(&map_attr);
142 }
143 
144 int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
145 			int key_size, int value_size, int max_entries,
146 			__u32 map_flags)
147 {
148 	struct bpf_create_map_attr map_attr = {};
149 
150 	map_attr.name = name;
151 	map_attr.map_type = map_type;
152 	map_attr.map_flags = map_flags;
153 	map_attr.key_size = key_size;
154 	map_attr.value_size = value_size;
155 	map_attr.max_entries = max_entries;
156 
157 	return bpf_create_map_xattr(&map_attr);
158 }
159 
160 int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
161 			       int key_size, int inner_map_fd, int max_entries,
162 			       __u32 map_flags, int node)
163 {
164 	union bpf_attr attr;
165 
166 	memset(&attr, '\0', sizeof(attr));
167 
168 	attr.map_type = map_type;
169 	attr.key_size = key_size;
170 	attr.value_size = 4;
171 	attr.inner_map_fd = inner_map_fd;
172 	attr.max_entries = max_entries;
173 	attr.map_flags = map_flags;
174 	if (name)
175 		memcpy(attr.map_name, name,
176 		       min(strlen(name), BPF_OBJ_NAME_LEN - 1));
177 
178 	if (node >= 0) {
179 		attr.map_flags |= BPF_F_NUMA_NODE;
180 		attr.numa_node = node;
181 	}
182 
183 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
184 }
185 
186 int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
187 			  int key_size, int inner_map_fd, int max_entries,
188 			  __u32 map_flags)
189 {
190 	return bpf_create_map_in_map_node(map_type, name, key_size,
191 					  inner_map_fd, max_entries, map_flags,
192 					  -1);
193 }
194 
195 static void *
196 alloc_zero_tailing_info(const void *orecord, __u32 cnt,
197 			__u32 actual_rec_size, __u32 expected_rec_size)
198 {
199 	__u64 info_len = (__u64)actual_rec_size * cnt;
200 	void *info, *nrecord;
201 	int i;
202 
203 	info = malloc(info_len);
204 	if (!info)
205 		return NULL;
206 
207 	/* zero out bytes kernel does not understand */
208 	nrecord = info;
209 	for (i = 0; i < cnt; i++) {
210 		memcpy(nrecord, orecord, expected_rec_size);
211 		memset(nrecord + expected_rec_size, 0,
212 		       actual_rec_size - expected_rec_size);
213 		orecord += actual_rec_size;
214 		nrecord += actual_rec_size;
215 	}
216 
217 	return info;
218 }
219 
220 int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
221 			   char *log_buf, size_t log_buf_sz)
222 {
223 	void *finfo = NULL, *linfo = NULL;
224 	union bpf_attr attr;
225 	__u32 log_level;
226 	int fd;
227 
228 	if (!load_attr || !log_buf != !log_buf_sz)
229 		return -EINVAL;
230 
231 	log_level = load_attr->log_level;
232 	if (log_level > (4 | 2 | 1) || (log_level && !log_buf))
233 		return -EINVAL;
234 
235 	memset(&attr, 0, sizeof(attr));
236 	attr.prog_type = load_attr->prog_type;
237 	attr.expected_attach_type = load_attr->expected_attach_type;
238 	if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
239 	    attr.prog_type == BPF_PROG_TYPE_LSM) {
240 		attr.attach_btf_id = load_attr->attach_btf_id;
241 	} else if (attr.prog_type == BPF_PROG_TYPE_TRACING ||
242 		   attr.prog_type == BPF_PROG_TYPE_EXT) {
243 		attr.attach_btf_id = load_attr->attach_btf_id;
244 		attr.attach_prog_fd = load_attr->attach_prog_fd;
245 	} else {
246 		attr.prog_ifindex = load_attr->prog_ifindex;
247 		attr.kern_version = load_attr->kern_version;
248 	}
249 	attr.insn_cnt = (__u32)load_attr->insns_cnt;
250 	attr.insns = ptr_to_u64(load_attr->insns);
251 	attr.license = ptr_to_u64(load_attr->license);
252 
253 	attr.log_level = log_level;
254 	if (log_level) {
255 		attr.log_buf = ptr_to_u64(log_buf);
256 		attr.log_size = log_buf_sz;
257 	} else {
258 		attr.log_buf = ptr_to_u64(NULL);
259 		attr.log_size = 0;
260 	}
261 
262 	attr.prog_btf_fd = load_attr->prog_btf_fd;
263 	attr.func_info_rec_size = load_attr->func_info_rec_size;
264 	attr.func_info_cnt = load_attr->func_info_cnt;
265 	attr.func_info = ptr_to_u64(load_attr->func_info);
266 	attr.line_info_rec_size = load_attr->line_info_rec_size;
267 	attr.line_info_cnt = load_attr->line_info_cnt;
268 	attr.line_info = ptr_to_u64(load_attr->line_info);
269 	if (load_attr->name)
270 		memcpy(attr.prog_name, load_attr->name,
271 		       min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
272 	attr.prog_flags = load_attr->prog_flags;
273 
274 	fd = sys_bpf_prog_load(&attr, sizeof(attr));
275 	if (fd >= 0)
276 		return fd;
277 
278 	/* After bpf_prog_load, the kernel may modify certain attributes
279 	 * to give user space a hint how to deal with loading failure.
280 	 * Check to see whether we can make some changes and load again.
281 	 */
282 	while (errno == E2BIG && (!finfo || !linfo)) {
283 		if (!finfo && attr.func_info_cnt &&
284 		    attr.func_info_rec_size < load_attr->func_info_rec_size) {
285 			/* try with corrected func info records */
286 			finfo = alloc_zero_tailing_info(load_attr->func_info,
287 							load_attr->func_info_cnt,
288 							load_attr->func_info_rec_size,
289 							attr.func_info_rec_size);
290 			if (!finfo)
291 				goto done;
292 
293 			attr.func_info = ptr_to_u64(finfo);
294 			attr.func_info_rec_size = load_attr->func_info_rec_size;
295 		} else if (!linfo && attr.line_info_cnt &&
296 			   attr.line_info_rec_size <
297 			   load_attr->line_info_rec_size) {
298 			linfo = alloc_zero_tailing_info(load_attr->line_info,
299 							load_attr->line_info_cnt,
300 							load_attr->line_info_rec_size,
301 							attr.line_info_rec_size);
302 			if (!linfo)
303 				goto done;
304 
305 			attr.line_info = ptr_to_u64(linfo);
306 			attr.line_info_rec_size = load_attr->line_info_rec_size;
307 		} else {
308 			break;
309 		}
310 
311 		fd = sys_bpf_prog_load(&attr, sizeof(attr));
312 
313 		if (fd >= 0)
314 			goto done;
315 	}
316 
317 	if (log_level || !log_buf)
318 		goto done;
319 
320 	/* Try again with log */
321 	attr.log_buf = ptr_to_u64(log_buf);
322 	attr.log_size = log_buf_sz;
323 	attr.log_level = 1;
324 	log_buf[0] = 0;
325 	fd = sys_bpf_prog_load(&attr, sizeof(attr));
326 done:
327 	free(finfo);
328 	free(linfo);
329 	return fd;
330 }
331 
332 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
333 		     size_t insns_cnt, const char *license,
334 		     __u32 kern_version, char *log_buf,
335 		     size_t log_buf_sz)
336 {
337 	struct bpf_load_program_attr load_attr;
338 
339 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
340 	load_attr.prog_type = type;
341 	load_attr.expected_attach_type = 0;
342 	load_attr.name = NULL;
343 	load_attr.insns = insns;
344 	load_attr.insns_cnt = insns_cnt;
345 	load_attr.license = license;
346 	load_attr.kern_version = kern_version;
347 
348 	return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
349 }
350 
351 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
352 		       size_t insns_cnt, __u32 prog_flags, const char *license,
353 		       __u32 kern_version, char *log_buf, size_t log_buf_sz,
354 		       int log_level)
355 {
356 	union bpf_attr attr;
357 
358 	memset(&attr, 0, sizeof(attr));
359 	attr.prog_type = type;
360 	attr.insn_cnt = (__u32)insns_cnt;
361 	attr.insns = ptr_to_u64(insns);
362 	attr.license = ptr_to_u64(license);
363 	attr.log_buf = ptr_to_u64(log_buf);
364 	attr.log_size = log_buf_sz;
365 	attr.log_level = log_level;
366 	log_buf[0] = 0;
367 	attr.kern_version = kern_version;
368 	attr.prog_flags = prog_flags;
369 
370 	return sys_bpf_prog_load(&attr, sizeof(attr));
371 }
372 
373 int bpf_map_update_elem(int fd, const void *key, const void *value,
374 			__u64 flags)
375 {
376 	union bpf_attr attr;
377 
378 	memset(&attr, 0, sizeof(attr));
379 	attr.map_fd = fd;
380 	attr.key = ptr_to_u64(key);
381 	attr.value = ptr_to_u64(value);
382 	attr.flags = flags;
383 
384 	return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
385 }
386 
387 int bpf_map_lookup_elem(int fd, const void *key, void *value)
388 {
389 	union bpf_attr attr;
390 
391 	memset(&attr, 0, sizeof(attr));
392 	attr.map_fd = fd;
393 	attr.key = ptr_to_u64(key);
394 	attr.value = ptr_to_u64(value);
395 
396 	return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
397 }
398 
399 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
400 {
401 	union bpf_attr attr;
402 
403 	memset(&attr, 0, sizeof(attr));
404 	attr.map_fd = fd;
405 	attr.key = ptr_to_u64(key);
406 	attr.value = ptr_to_u64(value);
407 	attr.flags = flags;
408 
409 	return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
410 }
411 
412 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
413 {
414 	union bpf_attr attr;
415 
416 	memset(&attr, 0, sizeof(attr));
417 	attr.map_fd = fd;
418 	attr.key = ptr_to_u64(key);
419 	attr.value = ptr_to_u64(value);
420 
421 	return sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
422 }
423 
424 int bpf_map_delete_elem(int fd, const void *key)
425 {
426 	union bpf_attr attr;
427 
428 	memset(&attr, 0, sizeof(attr));
429 	attr.map_fd = fd;
430 	attr.key = ptr_to_u64(key);
431 
432 	return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
433 }
434 
435 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
436 {
437 	union bpf_attr attr;
438 
439 	memset(&attr, 0, sizeof(attr));
440 	attr.map_fd = fd;
441 	attr.key = ptr_to_u64(key);
442 	attr.next_key = ptr_to_u64(next_key);
443 
444 	return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
445 }
446 
447 int bpf_map_freeze(int fd)
448 {
449 	union bpf_attr attr;
450 
451 	memset(&attr, 0, sizeof(attr));
452 	attr.map_fd = fd;
453 
454 	return sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
455 }
456 
457 static int bpf_map_batch_common(int cmd, int fd, void  *in_batch,
458 				void *out_batch, void *keys, void *values,
459 				__u32 *count,
460 				const struct bpf_map_batch_opts *opts)
461 {
462 	union bpf_attr attr;
463 	int ret;
464 
465 	if (!OPTS_VALID(opts, bpf_map_batch_opts))
466 		return -EINVAL;
467 
468 	memset(&attr, 0, sizeof(attr));
469 	attr.batch.map_fd = fd;
470 	attr.batch.in_batch = ptr_to_u64(in_batch);
471 	attr.batch.out_batch = ptr_to_u64(out_batch);
472 	attr.batch.keys = ptr_to_u64(keys);
473 	attr.batch.values = ptr_to_u64(values);
474 	attr.batch.count = *count;
475 	attr.batch.elem_flags  = OPTS_GET(opts, elem_flags, 0);
476 	attr.batch.flags = OPTS_GET(opts, flags, 0);
477 
478 	ret = sys_bpf(cmd, &attr, sizeof(attr));
479 	*count = attr.batch.count;
480 
481 	return ret;
482 }
483 
484 int bpf_map_delete_batch(int fd, void *keys, __u32 *count,
485 			 const struct bpf_map_batch_opts *opts)
486 {
487 	return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
488 				    NULL, keys, NULL, count, opts);
489 }
490 
491 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
492 			 void *values, __u32 *count,
493 			 const struct bpf_map_batch_opts *opts)
494 {
495 	return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch,
496 				    out_batch, keys, values, count, opts);
497 }
498 
499 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
500 				    void *keys, void *values, __u32 *count,
501 				    const struct bpf_map_batch_opts *opts)
502 {
503 	return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH,
504 				    fd, in_batch, out_batch, keys, values,
505 				    count, opts);
506 }
507 
508 int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count,
509 			 const struct bpf_map_batch_opts *opts)
510 {
511 	return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
512 				    keys, values, count, opts);
513 }
514 
515 int bpf_obj_pin(int fd, const char *pathname)
516 {
517 	union bpf_attr attr;
518 
519 	memset(&attr, 0, sizeof(attr));
520 	attr.pathname = ptr_to_u64((void *)pathname);
521 	attr.bpf_fd = fd;
522 
523 	return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
524 }
525 
526 int bpf_obj_get(const char *pathname)
527 {
528 	union bpf_attr attr;
529 
530 	memset(&attr, 0, sizeof(attr));
531 	attr.pathname = ptr_to_u64((void *)pathname);
532 
533 	return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
534 }
535 
536 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
537 		    unsigned int flags)
538 {
539 	DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts,
540 		.flags = flags,
541 	);
542 
543 	return bpf_prog_attach_xattr(prog_fd, target_fd, type, &opts);
544 }
545 
546 int bpf_prog_attach_xattr(int prog_fd, int target_fd,
547 			  enum bpf_attach_type type,
548 			  const struct bpf_prog_attach_opts *opts)
549 {
550 	union bpf_attr attr;
551 
552 	if (!OPTS_VALID(opts, bpf_prog_attach_opts))
553 		return -EINVAL;
554 
555 	memset(&attr, 0, sizeof(attr));
556 	attr.target_fd	   = target_fd;
557 	attr.attach_bpf_fd = prog_fd;
558 	attr.attach_type   = type;
559 	attr.attach_flags  = OPTS_GET(opts, flags, 0);
560 	attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
561 
562 	return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
563 }
564 
565 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
566 {
567 	union bpf_attr attr;
568 
569 	memset(&attr, 0, sizeof(attr));
570 	attr.target_fd	 = target_fd;
571 	attr.attach_type = type;
572 
573 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
574 }
575 
576 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
577 {
578 	union bpf_attr attr;
579 
580 	memset(&attr, 0, sizeof(attr));
581 	attr.target_fd	 = target_fd;
582 	attr.attach_bpf_fd = prog_fd;
583 	attr.attach_type = type;
584 
585 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
586 }
587 
588 int bpf_link_create(int prog_fd, int target_fd,
589 		    enum bpf_attach_type attach_type,
590 		    const struct bpf_link_create_opts *opts)
591 {
592 	union bpf_attr attr;
593 
594 	if (!OPTS_VALID(opts, bpf_link_create_opts))
595 		return -EINVAL;
596 
597 	memset(&attr, 0, sizeof(attr));
598 	attr.link_create.prog_fd = prog_fd;
599 	attr.link_create.target_fd = target_fd;
600 	attr.link_create.attach_type = attach_type;
601 	attr.link_create.flags = OPTS_GET(opts, flags, 0);
602 	attr.link_create.iter_info =
603 		ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
604 	attr.link_create.iter_info_len = OPTS_GET(opts, iter_info_len, 0);
605 
606 	return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
607 }
608 
609 int bpf_link_detach(int link_fd)
610 {
611 	union bpf_attr attr;
612 
613 	memset(&attr, 0, sizeof(attr));
614 	attr.link_detach.link_fd = link_fd;
615 
616 	return sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr));
617 }
618 
619 int bpf_link_update(int link_fd, int new_prog_fd,
620 		    const struct bpf_link_update_opts *opts)
621 {
622 	union bpf_attr attr;
623 
624 	if (!OPTS_VALID(opts, bpf_link_update_opts))
625 		return -EINVAL;
626 
627 	memset(&attr, 0, sizeof(attr));
628 	attr.link_update.link_fd = link_fd;
629 	attr.link_update.new_prog_fd = new_prog_fd;
630 	attr.link_update.flags = OPTS_GET(opts, flags, 0);
631 	attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
632 
633 	return sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr));
634 }
635 
636 int bpf_iter_create(int link_fd)
637 {
638 	union bpf_attr attr;
639 
640 	memset(&attr, 0, sizeof(attr));
641 	attr.iter_create.link_fd = link_fd;
642 
643 	return sys_bpf(BPF_ITER_CREATE, &attr, sizeof(attr));
644 }
645 
646 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
647 		   __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
648 {
649 	union bpf_attr attr;
650 	int ret;
651 
652 	memset(&attr, 0, sizeof(attr));
653 	attr.query.target_fd	= target_fd;
654 	attr.query.attach_type	= type;
655 	attr.query.query_flags	= query_flags;
656 	attr.query.prog_cnt	= *prog_cnt;
657 	attr.query.prog_ids	= ptr_to_u64(prog_ids);
658 
659 	ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
660 	if (attach_flags)
661 		*attach_flags = attr.query.attach_flags;
662 	*prog_cnt = attr.query.prog_cnt;
663 	return ret;
664 }
665 
666 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
667 		      void *data_out, __u32 *size_out, __u32 *retval,
668 		      __u32 *duration)
669 {
670 	union bpf_attr attr;
671 	int ret;
672 
673 	memset(&attr, 0, sizeof(attr));
674 	attr.test.prog_fd = prog_fd;
675 	attr.test.data_in = ptr_to_u64(data);
676 	attr.test.data_out = ptr_to_u64(data_out);
677 	attr.test.data_size_in = size;
678 	attr.test.repeat = repeat;
679 
680 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
681 	if (size_out)
682 		*size_out = attr.test.data_size_out;
683 	if (retval)
684 		*retval = attr.test.retval;
685 	if (duration)
686 		*duration = attr.test.duration;
687 	return ret;
688 }
689 
690 int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
691 {
692 	union bpf_attr attr;
693 	int ret;
694 
695 	if (!test_attr->data_out && test_attr->data_size_out > 0)
696 		return -EINVAL;
697 
698 	memset(&attr, 0, sizeof(attr));
699 	attr.test.prog_fd = test_attr->prog_fd;
700 	attr.test.data_in = ptr_to_u64(test_attr->data_in);
701 	attr.test.data_out = ptr_to_u64(test_attr->data_out);
702 	attr.test.data_size_in = test_attr->data_size_in;
703 	attr.test.data_size_out = test_attr->data_size_out;
704 	attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in);
705 	attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out);
706 	attr.test.ctx_size_in = test_attr->ctx_size_in;
707 	attr.test.ctx_size_out = test_attr->ctx_size_out;
708 	attr.test.repeat = test_attr->repeat;
709 
710 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
711 	test_attr->data_size_out = attr.test.data_size_out;
712 	test_attr->ctx_size_out = attr.test.ctx_size_out;
713 	test_attr->retval = attr.test.retval;
714 	test_attr->duration = attr.test.duration;
715 	return ret;
716 }
717 
718 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
719 {
720 	union bpf_attr attr;
721 	int err;
722 
723 	memset(&attr, 0, sizeof(attr));
724 	attr.start_id = start_id;
725 
726 	err = sys_bpf(cmd, &attr, sizeof(attr));
727 	if (!err)
728 		*next_id = attr.next_id;
729 
730 	return err;
731 }
732 
733 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
734 {
735 	return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID);
736 }
737 
738 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
739 {
740 	return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID);
741 }
742 
743 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
744 {
745 	return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
746 }
747 
748 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
749 {
750 	return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID);
751 }
752 
753 int bpf_prog_get_fd_by_id(__u32 id)
754 {
755 	union bpf_attr attr;
756 
757 	memset(&attr, 0, sizeof(attr));
758 	attr.prog_id = id;
759 
760 	return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
761 }
762 
763 int bpf_map_get_fd_by_id(__u32 id)
764 {
765 	union bpf_attr attr;
766 
767 	memset(&attr, 0, sizeof(attr));
768 	attr.map_id = id;
769 
770 	return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
771 }
772 
773 int bpf_btf_get_fd_by_id(__u32 id)
774 {
775 	union bpf_attr attr;
776 
777 	memset(&attr, 0, sizeof(attr));
778 	attr.btf_id = id;
779 
780 	return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
781 }
782 
783 int bpf_link_get_fd_by_id(__u32 id)
784 {
785 	union bpf_attr attr;
786 
787 	memset(&attr, 0, sizeof(attr));
788 	attr.link_id = id;
789 
790 	return sys_bpf(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
791 }
792 
793 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
794 {
795 	union bpf_attr attr;
796 	int err;
797 
798 	memset(&attr, 0, sizeof(attr));
799 	attr.info.bpf_fd = bpf_fd;
800 	attr.info.info_len = *info_len;
801 	attr.info.info = ptr_to_u64(info);
802 
803 	err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
804 	if (!err)
805 		*info_len = attr.info.info_len;
806 
807 	return err;
808 }
809 
810 int bpf_raw_tracepoint_open(const char *name, int prog_fd)
811 {
812 	union bpf_attr attr;
813 
814 	memset(&attr, 0, sizeof(attr));
815 	attr.raw_tracepoint.name = ptr_to_u64(name);
816 	attr.raw_tracepoint.prog_fd = prog_fd;
817 
818 	return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
819 }
820 
821 int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
822 		 bool do_log)
823 {
824 	union bpf_attr attr = {};
825 	int fd;
826 
827 	attr.btf = ptr_to_u64(btf);
828 	attr.btf_size = btf_size;
829 
830 retry:
831 	if (do_log && log_buf && log_buf_size) {
832 		attr.btf_log_level = 1;
833 		attr.btf_log_size = log_buf_size;
834 		attr.btf_log_buf = ptr_to_u64(log_buf);
835 	}
836 
837 	fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
838 	if (fd == -1 && !do_log && log_buf && log_buf_size) {
839 		do_log = true;
840 		goto retry;
841 	}
842 
843 	return fd;
844 }
845 
846 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
847 		      __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
848 		      __u64 *probe_addr)
849 {
850 	union bpf_attr attr = {};
851 	int err;
852 
853 	attr.task_fd_query.pid = pid;
854 	attr.task_fd_query.fd = fd;
855 	attr.task_fd_query.flags = flags;
856 	attr.task_fd_query.buf = ptr_to_u64(buf);
857 	attr.task_fd_query.buf_len = *buf_len;
858 
859 	err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
860 	*buf_len = attr.task_fd_query.buf_len;
861 	*prog_id = attr.task_fd_query.prog_id;
862 	*fd_type = attr.task_fd_query.fd_type;
863 	*probe_offset = attr.task_fd_query.probe_offset;
864 	*probe_addr = attr.task_fd_query.probe_addr;
865 
866 	return err;
867 }
868 
869 int bpf_enable_stats(enum bpf_stats_type type)
870 {
871 	union bpf_attr attr;
872 
873 	memset(&attr, 0, sizeof(attr));
874 	attr.enable_stats.type = type;
875 
876 	return sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
877 }
878