xref: /linux/kernel/bpf/helpers.c (revision 827634added7f38b7d724cab1dccdb2b004c13c3)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 #include <linux/bpf.h>
13 #include <linux/rcupdate.h>
14 #include <linux/random.h>
15 #include <linux/smp.h>
16 
17 /* If kernel subsystem is allowing eBPF programs to call this function,
18  * inside its own verifier_ops->get_func_proto() callback it should return
19  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
20  *
21  * Different map implementations will rely on rcu in map methods
22  * lookup/update/delete, therefore eBPF programs must run under rcu lock
23  * if program is allowed to access maps, so check rcu_read_lock_held in
24  * all three functions.
25  */
26 static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
27 {
28 	/* verifier checked that R1 contains a valid pointer to bpf_map
29 	 * and R2 points to a program stack and map->key_size bytes were
30 	 * initialized
31 	 */
32 	struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
33 	void *key = (void *) (unsigned long) r2;
34 	void *value;
35 
36 	WARN_ON_ONCE(!rcu_read_lock_held());
37 
38 	value = map->ops->map_lookup_elem(map, key);
39 
40 	/* lookup() returns either pointer to element value or NULL
41 	 * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type
42 	 */
43 	return (unsigned long) value;
44 }
45 
46 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
47 	.func = bpf_map_lookup_elem,
48 	.gpl_only = false,
49 	.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
50 	.arg1_type = ARG_CONST_MAP_PTR,
51 	.arg2_type = ARG_PTR_TO_MAP_KEY,
52 };
53 
54 static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
55 {
56 	struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
57 	void *key = (void *) (unsigned long) r2;
58 	void *value = (void *) (unsigned long) r3;
59 
60 	WARN_ON_ONCE(!rcu_read_lock_held());
61 
62 	return map->ops->map_update_elem(map, key, value, r4);
63 }
64 
65 const struct bpf_func_proto bpf_map_update_elem_proto = {
66 	.func = bpf_map_update_elem,
67 	.gpl_only = false,
68 	.ret_type = RET_INTEGER,
69 	.arg1_type = ARG_CONST_MAP_PTR,
70 	.arg2_type = ARG_PTR_TO_MAP_KEY,
71 	.arg3_type = ARG_PTR_TO_MAP_VALUE,
72 	.arg4_type = ARG_ANYTHING,
73 };
74 
75 static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
76 {
77 	struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
78 	void *key = (void *) (unsigned long) r2;
79 
80 	WARN_ON_ONCE(!rcu_read_lock_held());
81 
82 	return map->ops->map_delete_elem(map, key);
83 }
84 
85 const struct bpf_func_proto bpf_map_delete_elem_proto = {
86 	.func = bpf_map_delete_elem,
87 	.gpl_only = false,
88 	.ret_type = RET_INTEGER,
89 	.arg1_type = ARG_CONST_MAP_PTR,
90 	.arg2_type = ARG_PTR_TO_MAP_KEY,
91 };
92 
93 static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
94 {
95 	return prandom_u32();
96 }
97 
98 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
99 	.func		= bpf_get_prandom_u32,
100 	.gpl_only	= false,
101 	.ret_type	= RET_INTEGER,
102 };
103 
104 static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
105 {
106 	return raw_smp_processor_id();
107 }
108 
109 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
110 	.func		= bpf_get_smp_processor_id,
111 	.gpl_only	= false,
112 	.ret_type	= RET_INTEGER,
113 };
114