xref: /linux/kernel/bpf/helpers.c (revision 81fa7a69c2174ed8de314b9c231ef30a8718e5e1)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 #include <linux/bpf.h>
13 #include <linux/rcupdate.h>
14 #include <linux/random.h>
15 #include <linux/smp.h>
16 #include <linux/topology.h>
17 #include <linux/ktime.h>
18 #include <linux/sched.h>
19 #include <linux/uidgid.h>
20 #include <linux/filter.h>
21 
22 /* If kernel subsystem is allowing eBPF programs to call this function,
23  * inside its own verifier_ops->get_func_proto() callback it should return
24  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
25  *
26  * Different map implementations will rely on rcu in map methods
27  * lookup/update/delete, therefore eBPF programs must run under rcu lock
28  * if program is allowed to access maps, so check rcu_read_lock_held in
29  * all three functions.
30  */
31 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
32 {
33 	WARN_ON_ONCE(!rcu_read_lock_held());
34 	return (unsigned long) map->ops->map_lookup_elem(map, key);
35 }
36 
37 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
38 	.func		= bpf_map_lookup_elem,
39 	.gpl_only	= false,
40 	.pkt_access	= true,
41 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
42 	.arg1_type	= ARG_CONST_MAP_PTR,
43 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
44 };
45 
46 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
47 	   void *, value, u64, flags)
48 {
49 	WARN_ON_ONCE(!rcu_read_lock_held());
50 	return map->ops->map_update_elem(map, key, value, flags);
51 }
52 
53 const struct bpf_func_proto bpf_map_update_elem_proto = {
54 	.func		= bpf_map_update_elem,
55 	.gpl_only	= false,
56 	.pkt_access	= true,
57 	.ret_type	= RET_INTEGER,
58 	.arg1_type	= ARG_CONST_MAP_PTR,
59 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
60 	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
61 	.arg4_type	= ARG_ANYTHING,
62 };
63 
64 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
65 {
66 	WARN_ON_ONCE(!rcu_read_lock_held());
67 	return map->ops->map_delete_elem(map, key);
68 }
69 
70 const struct bpf_func_proto bpf_map_delete_elem_proto = {
71 	.func		= bpf_map_delete_elem,
72 	.gpl_only	= false,
73 	.pkt_access	= true,
74 	.ret_type	= RET_INTEGER,
75 	.arg1_type	= ARG_CONST_MAP_PTR,
76 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
77 };
78 
79 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
80 {
81 	return map->ops->map_push_elem(map, value, flags);
82 }
83 
84 const struct bpf_func_proto bpf_map_push_elem_proto = {
85 	.func		= bpf_map_push_elem,
86 	.gpl_only	= false,
87 	.pkt_access	= true,
88 	.ret_type	= RET_INTEGER,
89 	.arg1_type	= ARG_CONST_MAP_PTR,
90 	.arg2_type	= ARG_PTR_TO_MAP_VALUE,
91 	.arg3_type	= ARG_ANYTHING,
92 };
93 
94 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
95 {
96 	return map->ops->map_pop_elem(map, value);
97 }
98 
99 const struct bpf_func_proto bpf_map_pop_elem_proto = {
100 	.func		= bpf_map_pop_elem,
101 	.gpl_only	= false,
102 	.pkt_access	= true,
103 	.ret_type	= RET_INTEGER,
104 	.arg1_type	= ARG_CONST_MAP_PTR,
105 	.arg2_type	= ARG_PTR_TO_UNINIT_MAP_VALUE,
106 };
107 
108 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
109 {
110 	return map->ops->map_peek_elem(map, value);
111 }
112 
113 const struct bpf_func_proto bpf_map_peek_elem_proto = {
114 	.func		= bpf_map_pop_elem,
115 	.gpl_only	= false,
116 	.pkt_access	= true,
117 	.ret_type	= RET_INTEGER,
118 	.arg1_type	= ARG_CONST_MAP_PTR,
119 	.arg2_type	= ARG_PTR_TO_UNINIT_MAP_VALUE,
120 };
121 
122 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
123 	.func		= bpf_user_rnd_u32,
124 	.gpl_only	= false,
125 	.ret_type	= RET_INTEGER,
126 };
127 
128 BPF_CALL_0(bpf_get_smp_processor_id)
129 {
130 	return smp_processor_id();
131 }
132 
133 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
134 	.func		= bpf_get_smp_processor_id,
135 	.gpl_only	= false,
136 	.ret_type	= RET_INTEGER,
137 };
138 
139 BPF_CALL_0(bpf_get_numa_node_id)
140 {
141 	return numa_node_id();
142 }
143 
144 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
145 	.func		= bpf_get_numa_node_id,
146 	.gpl_only	= false,
147 	.ret_type	= RET_INTEGER,
148 };
149 
150 BPF_CALL_0(bpf_ktime_get_ns)
151 {
152 	/* NMI safe access to clock monotonic */
153 	return ktime_get_mono_fast_ns();
154 }
155 
156 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
157 	.func		= bpf_ktime_get_ns,
158 	.gpl_only	= true,
159 	.ret_type	= RET_INTEGER,
160 };
161 
162 BPF_CALL_0(bpf_get_current_pid_tgid)
163 {
164 	struct task_struct *task = current;
165 
166 	if (unlikely(!task))
167 		return -EINVAL;
168 
169 	return (u64) task->tgid << 32 | task->pid;
170 }
171 
172 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
173 	.func		= bpf_get_current_pid_tgid,
174 	.gpl_only	= false,
175 	.ret_type	= RET_INTEGER,
176 };
177 
178 BPF_CALL_0(bpf_get_current_uid_gid)
179 {
180 	struct task_struct *task = current;
181 	kuid_t uid;
182 	kgid_t gid;
183 
184 	if (unlikely(!task))
185 		return -EINVAL;
186 
187 	current_uid_gid(&uid, &gid);
188 	return (u64) from_kgid(&init_user_ns, gid) << 32 |
189 		     from_kuid(&init_user_ns, uid);
190 }
191 
192 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
193 	.func		= bpf_get_current_uid_gid,
194 	.gpl_only	= false,
195 	.ret_type	= RET_INTEGER,
196 };
197 
198 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
199 {
200 	struct task_struct *task = current;
201 
202 	if (unlikely(!task))
203 		goto err_clear;
204 
205 	strncpy(buf, task->comm, size);
206 
207 	/* Verifier guarantees that size > 0. For task->comm exceeding
208 	 * size, guarantee that buf is %NUL-terminated. Unconditionally
209 	 * done here to save the size test.
210 	 */
211 	buf[size - 1] = 0;
212 	return 0;
213 err_clear:
214 	memset(buf, 0, size);
215 	return -EINVAL;
216 }
217 
218 const struct bpf_func_proto bpf_get_current_comm_proto = {
219 	.func		= bpf_get_current_comm,
220 	.gpl_only	= false,
221 	.ret_type	= RET_INTEGER,
222 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
223 	.arg2_type	= ARG_CONST_SIZE,
224 };
225 
226 #ifdef CONFIG_CGROUPS
227 BPF_CALL_0(bpf_get_current_cgroup_id)
228 {
229 	struct cgroup *cgrp = task_dfl_cgroup(current);
230 
231 	return cgrp->kn->id.id;
232 }
233 
234 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
235 	.func		= bpf_get_current_cgroup_id,
236 	.gpl_only	= false,
237 	.ret_type	= RET_INTEGER,
238 };
239 
240 #ifdef CONFIG_CGROUP_BPF
241 DECLARE_PER_CPU(struct bpf_cgroup_storage*,
242 		bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
243 
244 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
245 {
246 	/* flags argument is not used now,
247 	 * but provides an ability to extend the API.
248 	 * verifier checks that its value is correct.
249 	 */
250 	enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
251 	struct bpf_cgroup_storage *storage;
252 	void *ptr;
253 
254 	storage = this_cpu_read(bpf_cgroup_storage[stype]);
255 
256 	if (stype == BPF_CGROUP_STORAGE_SHARED)
257 		ptr = &READ_ONCE(storage->buf)->data[0];
258 	else
259 		ptr = this_cpu_ptr(storage->percpu_buf);
260 
261 	return (unsigned long)ptr;
262 }
263 
264 const struct bpf_func_proto bpf_get_local_storage_proto = {
265 	.func		= bpf_get_local_storage,
266 	.gpl_only	= false,
267 	.ret_type	= RET_PTR_TO_MAP_VALUE,
268 	.arg1_type	= ARG_CONST_MAP_PTR,
269 	.arg2_type	= ARG_ANYTHING,
270 };
271 #endif
272 #endif
273