xref: /linux/tools/lib/bpf/bpf.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 /*
2  * common eBPF ELF operations.
3  *
4  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
5  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6  * Copyright (C) 2015 Huawei Inc.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation;
11  * version 2.1 of the License (not later!)
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this program; if not,  see <http://www.gnu.org/licenses>
20  */
21 
22 #include <stdlib.h>
23 #include <memory.h>
24 #include <unistd.h>
25 #include <asm/unistd.h>
26 #include <linux/bpf.h>
27 #include "bpf.h"
28 
29 /*
30  * When building perf, unistd.h is overridden. __NR_bpf is
31  * required to be defined explicitly.
32  */
33 #ifndef __NR_bpf
34 # if defined(__i386__)
35 #  define __NR_bpf 357
36 # elif defined(__x86_64__)
37 #  define __NR_bpf 321
38 # elif defined(__aarch64__)
39 #  define __NR_bpf 280
40 # elif defined(__sparc__)
41 #  define __NR_bpf 349
42 # elif defined(__s390__)
43 #  define __NR_bpf 351
44 # else
45 #  error __NR_bpf not defined. libbpf does not support your arch.
46 # endif
47 #endif
48 
49 #define min(x, y) ((x) < (y) ? (x) : (y))
50 
51 static inline __u64 ptr_to_u64(const void *ptr)
52 {
53 	return (__u64) (unsigned long) ptr;
54 }
55 
56 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
57 			  unsigned int size)
58 {
59 	return syscall(__NR_bpf, cmd, attr, size);
60 }
61 
62 int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
63 			int key_size, int value_size, int max_entries,
64 			__u32 map_flags, int node)
65 {
66 	__u32 name_len = name ? strlen(name) : 0;
67 	union bpf_attr attr;
68 
69 	memset(&attr, '\0', sizeof(attr));
70 
71 	attr.map_type = map_type;
72 	attr.key_size = key_size;
73 	attr.value_size = value_size;
74 	attr.max_entries = max_entries;
75 	attr.map_flags = map_flags;
76 	memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
77 
78 	if (node >= 0) {
79 		attr.map_flags |= BPF_F_NUMA_NODE;
80 		attr.numa_node = node;
81 	}
82 
83 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
84 }
85 
86 int bpf_create_map(enum bpf_map_type map_type, int key_size,
87 		   int value_size, int max_entries, __u32 map_flags)
88 {
89 	return bpf_create_map_node(map_type, NULL, key_size, value_size,
90 				   max_entries, map_flags, -1);
91 }
92 
93 int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
94 			int key_size, int value_size, int max_entries,
95 			__u32 map_flags)
96 {
97 	return bpf_create_map_node(map_type, name, key_size, value_size,
98 				   max_entries, map_flags, -1);
99 }
100 
101 int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
102 			       int key_size, int inner_map_fd, int max_entries,
103 			       __u32 map_flags, int node)
104 {
105 	__u32 name_len = name ? strlen(name) : 0;
106 	union bpf_attr attr;
107 
108 	memset(&attr, '\0', sizeof(attr));
109 
110 	attr.map_type = map_type;
111 	attr.key_size = key_size;
112 	attr.value_size = 4;
113 	attr.inner_map_fd = inner_map_fd;
114 	attr.max_entries = max_entries;
115 	attr.map_flags = map_flags;
116 	memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
117 
118 	if (node >= 0) {
119 		attr.map_flags |= BPF_F_NUMA_NODE;
120 		attr.numa_node = node;
121 	}
122 
123 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
124 }
125 
126 int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
127 			  int key_size, int inner_map_fd, int max_entries,
128 			  __u32 map_flags)
129 {
130 	return bpf_create_map_in_map_node(map_type, name, key_size,
131 					  inner_map_fd, max_entries, map_flags,
132 					  -1);
133 }
134 
135 int bpf_load_program_name(enum bpf_prog_type type, const char *name,
136 			  const struct bpf_insn *insns,
137 			  size_t insns_cnt, const char *license,
138 			  __u32 kern_version, char *log_buf,
139 			  size_t log_buf_sz)
140 {
141 	int fd;
142 	union bpf_attr attr;
143 	__u32 name_len = name ? strlen(name) : 0;
144 
145 	bzero(&attr, sizeof(attr));
146 	attr.prog_type = type;
147 	attr.insn_cnt = (__u32)insns_cnt;
148 	attr.insns = ptr_to_u64(insns);
149 	attr.license = ptr_to_u64(license);
150 	attr.log_buf = ptr_to_u64(NULL);
151 	attr.log_size = 0;
152 	attr.log_level = 0;
153 	attr.kern_version = kern_version;
154 	memcpy(attr.prog_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
155 
156 	fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
157 	if (fd >= 0 || !log_buf || !log_buf_sz)
158 		return fd;
159 
160 	/* Try again with log */
161 	attr.log_buf = ptr_to_u64(log_buf);
162 	attr.log_size = log_buf_sz;
163 	attr.log_level = 1;
164 	log_buf[0] = 0;
165 	return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
166 }
167 
168 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
169 		     size_t insns_cnt, const char *license,
170 		     __u32 kern_version, char *log_buf,
171 		     size_t log_buf_sz)
172 {
173 	return bpf_load_program_name(type, NULL, insns, insns_cnt, license,
174 				     kern_version, log_buf, log_buf_sz);
175 }
176 
177 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
178 		       size_t insns_cnt, int strict_alignment,
179 		       const char *license, __u32 kern_version,
180 		       char *log_buf, size_t log_buf_sz, int log_level)
181 {
182 	union bpf_attr attr;
183 
184 	bzero(&attr, sizeof(attr));
185 	attr.prog_type = type;
186 	attr.insn_cnt = (__u32)insns_cnt;
187 	attr.insns = ptr_to_u64(insns);
188 	attr.license = ptr_to_u64(license);
189 	attr.log_buf = ptr_to_u64(log_buf);
190 	attr.log_size = log_buf_sz;
191 	attr.log_level = log_level;
192 	log_buf[0] = 0;
193 	attr.kern_version = kern_version;
194 	attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
195 
196 	return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
197 }
198 
199 int bpf_map_update_elem(int fd, const void *key, const void *value,
200 			__u64 flags)
201 {
202 	union bpf_attr attr;
203 
204 	bzero(&attr, sizeof(attr));
205 	attr.map_fd = fd;
206 	attr.key = ptr_to_u64(key);
207 	attr.value = ptr_to_u64(value);
208 	attr.flags = flags;
209 
210 	return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
211 }
212 
213 int bpf_map_lookup_elem(int fd, const void *key, void *value)
214 {
215 	union bpf_attr attr;
216 
217 	bzero(&attr, sizeof(attr));
218 	attr.map_fd = fd;
219 	attr.key = ptr_to_u64(key);
220 	attr.value = ptr_to_u64(value);
221 
222 	return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
223 }
224 
225 int bpf_map_delete_elem(int fd, const void *key)
226 {
227 	union bpf_attr attr;
228 
229 	bzero(&attr, sizeof(attr));
230 	attr.map_fd = fd;
231 	attr.key = ptr_to_u64(key);
232 
233 	return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
234 }
235 
236 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
237 {
238 	union bpf_attr attr;
239 
240 	bzero(&attr, sizeof(attr));
241 	attr.map_fd = fd;
242 	attr.key = ptr_to_u64(key);
243 	attr.next_key = ptr_to_u64(next_key);
244 
245 	return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
246 }
247 
248 int bpf_obj_pin(int fd, const char *pathname)
249 {
250 	union bpf_attr attr;
251 
252 	bzero(&attr, sizeof(attr));
253 	attr.pathname = ptr_to_u64((void *)pathname);
254 	attr.bpf_fd = fd;
255 
256 	return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
257 }
258 
259 int bpf_obj_get(const char *pathname)
260 {
261 	union bpf_attr attr;
262 
263 	bzero(&attr, sizeof(attr));
264 	attr.pathname = ptr_to_u64((void *)pathname);
265 
266 	return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
267 }
268 
269 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
270 		    unsigned int flags)
271 {
272 	union bpf_attr attr;
273 
274 	bzero(&attr, sizeof(attr));
275 	attr.target_fd	   = target_fd;
276 	attr.attach_bpf_fd = prog_fd;
277 	attr.attach_type   = type;
278 	attr.attach_flags  = flags;
279 
280 	return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
281 }
282 
283 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
284 {
285 	union bpf_attr attr;
286 
287 	bzero(&attr, sizeof(attr));
288 	attr.target_fd	 = target_fd;
289 	attr.attach_type = type;
290 
291 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
292 }
293 
294 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
295 {
296 	union bpf_attr attr;
297 
298 	bzero(&attr, sizeof(attr));
299 	attr.target_fd	 = target_fd;
300 	attr.attach_bpf_fd = prog_fd;
301 	attr.attach_type = type;
302 
303 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
304 }
305 
306 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
307 		   __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
308 {
309 	union bpf_attr attr;
310 	int ret;
311 
312 	bzero(&attr, sizeof(attr));
313 	attr.query.target_fd	= target_fd;
314 	attr.query.attach_type	= type;
315 	attr.query.query_flags	= query_flags;
316 	attr.query.prog_cnt	= *prog_cnt;
317 	attr.query.prog_ids	= ptr_to_u64(prog_ids);
318 
319 	ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
320 	if (attach_flags)
321 		*attach_flags = attr.query.attach_flags;
322 	*prog_cnt = attr.query.prog_cnt;
323 	return ret;
324 }
325 
326 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
327 		      void *data_out, __u32 *size_out, __u32 *retval,
328 		      __u32 *duration)
329 {
330 	union bpf_attr attr;
331 	int ret;
332 
333 	bzero(&attr, sizeof(attr));
334 	attr.test.prog_fd = prog_fd;
335 	attr.test.data_in = ptr_to_u64(data);
336 	attr.test.data_out = ptr_to_u64(data_out);
337 	attr.test.data_size_in = size;
338 	attr.test.repeat = repeat;
339 
340 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
341 	if (size_out)
342 		*size_out = attr.test.data_size_out;
343 	if (retval)
344 		*retval = attr.test.retval;
345 	if (duration)
346 		*duration = attr.test.duration;
347 	return ret;
348 }
349 
350 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
351 {
352 	union bpf_attr attr;
353 	int err;
354 
355 	bzero(&attr, sizeof(attr));
356 	attr.start_id = start_id;
357 
358 	err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
359 	if (!err)
360 		*next_id = attr.next_id;
361 
362 	return err;
363 }
364 
365 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
366 {
367 	union bpf_attr attr;
368 	int err;
369 
370 	bzero(&attr, sizeof(attr));
371 	attr.start_id = start_id;
372 
373 	err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
374 	if (!err)
375 		*next_id = attr.next_id;
376 
377 	return err;
378 }
379 
380 int bpf_prog_get_fd_by_id(__u32 id)
381 {
382 	union bpf_attr attr;
383 
384 	bzero(&attr, sizeof(attr));
385 	attr.prog_id = id;
386 
387 	return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
388 }
389 
390 int bpf_map_get_fd_by_id(__u32 id)
391 {
392 	union bpf_attr attr;
393 
394 	bzero(&attr, sizeof(attr));
395 	attr.map_id = id;
396 
397 	return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
398 }
399 
400 int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
401 {
402 	union bpf_attr attr;
403 	int err;
404 
405 	bzero(&attr, sizeof(attr));
406 	attr.info.bpf_fd = prog_fd;
407 	attr.info.info_len = *info_len;
408 	attr.info.info = ptr_to_u64(info);
409 
410 	err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
411 	if (!err)
412 		*info_len = attr.info.info_len;
413 
414 	return err;
415 }
416