xref: /linux/kernel/bpf/helpers.c (revision 9ee0034b8f49aaaa7e7c2da8db1038915db99c19)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 #include <linux/bpf.h>
13 #include <linux/rcupdate.h>
14 #include <linux/random.h>
15 #include <linux/smp.h>
16 #include <linux/ktime.h>
17 #include <linux/sched.h>
18 #include <linux/uidgid.h>
19 #include <linux/filter.h>
20 
21 /* If kernel subsystem is allowing eBPF programs to call this function,
22  * inside its own verifier_ops->get_func_proto() callback it should return
23  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
24  *
25  * Different map implementations will rely on rcu in map methods
26  * lookup/update/delete, therefore eBPF programs must run under rcu lock
27  * if program is allowed to access maps, so check rcu_read_lock_held in
28  * all three functions.
29  */
30 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
31 {
32 	WARN_ON_ONCE(!rcu_read_lock_held());
33 	return (unsigned long) map->ops->map_lookup_elem(map, key);
34 }
35 
36 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
37 	.func		= bpf_map_lookup_elem,
38 	.gpl_only	= false,
39 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
40 	.arg1_type	= ARG_CONST_MAP_PTR,
41 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
42 };
43 
44 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
45 	   void *, value, u64, flags)
46 {
47 	WARN_ON_ONCE(!rcu_read_lock_held());
48 	return map->ops->map_update_elem(map, key, value, flags);
49 }
50 
51 const struct bpf_func_proto bpf_map_update_elem_proto = {
52 	.func		= bpf_map_update_elem,
53 	.gpl_only	= false,
54 	.ret_type	= RET_INTEGER,
55 	.arg1_type	= ARG_CONST_MAP_PTR,
56 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
57 	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
58 	.arg4_type	= ARG_ANYTHING,
59 };
60 
61 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
62 {
63 	WARN_ON_ONCE(!rcu_read_lock_held());
64 	return map->ops->map_delete_elem(map, key);
65 }
66 
67 const struct bpf_func_proto bpf_map_delete_elem_proto = {
68 	.func		= bpf_map_delete_elem,
69 	.gpl_only	= false,
70 	.ret_type	= RET_INTEGER,
71 	.arg1_type	= ARG_CONST_MAP_PTR,
72 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
73 };
74 
75 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
76 	.func		= bpf_user_rnd_u32,
77 	.gpl_only	= false,
78 	.ret_type	= RET_INTEGER,
79 };
80 
81 BPF_CALL_0(bpf_get_smp_processor_id)
82 {
83 	return smp_processor_id();
84 }
85 
86 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
87 	.func		= bpf_get_smp_processor_id,
88 	.gpl_only	= false,
89 	.ret_type	= RET_INTEGER,
90 };
91 
92 BPF_CALL_0(bpf_ktime_get_ns)
93 {
94 	/* NMI safe access to clock monotonic */
95 	return ktime_get_mono_fast_ns();
96 }
97 
98 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
99 	.func		= bpf_ktime_get_ns,
100 	.gpl_only	= true,
101 	.ret_type	= RET_INTEGER,
102 };
103 
104 BPF_CALL_0(bpf_get_current_pid_tgid)
105 {
106 	struct task_struct *task = current;
107 
108 	if (unlikely(!task))
109 		return -EINVAL;
110 
111 	return (u64) task->tgid << 32 | task->pid;
112 }
113 
114 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
115 	.func		= bpf_get_current_pid_tgid,
116 	.gpl_only	= false,
117 	.ret_type	= RET_INTEGER,
118 };
119 
120 BPF_CALL_0(bpf_get_current_uid_gid)
121 {
122 	struct task_struct *task = current;
123 	kuid_t uid;
124 	kgid_t gid;
125 
126 	if (unlikely(!task))
127 		return -EINVAL;
128 
129 	current_uid_gid(&uid, &gid);
130 	return (u64) from_kgid(&init_user_ns, gid) << 32 |
131 		     from_kuid(&init_user_ns, uid);
132 }
133 
134 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
135 	.func		= bpf_get_current_uid_gid,
136 	.gpl_only	= false,
137 	.ret_type	= RET_INTEGER,
138 };
139 
140 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
141 {
142 	struct task_struct *task = current;
143 
144 	if (unlikely(!task))
145 		goto err_clear;
146 
147 	strncpy(buf, task->comm, size);
148 
149 	/* Verifier guarantees that size > 0. For task->comm exceeding
150 	 * size, guarantee that buf is %NUL-terminated. Unconditionally
151 	 * done here to save the size test.
152 	 */
153 	buf[size - 1] = 0;
154 	return 0;
155 err_clear:
156 	memset(buf, 0, size);
157 	return -EINVAL;
158 }
159 
160 const struct bpf_func_proto bpf_get_current_comm_proto = {
161 	.func		= bpf_get_current_comm,
162 	.gpl_only	= false,
163 	.ret_type	= RET_INTEGER,
164 	.arg1_type	= ARG_PTR_TO_RAW_STACK,
165 	.arg2_type	= ARG_CONST_STACK_SIZE,
166 };
167