xref: /linux/kernel/bpf/token.c (revision 9d027a35a52a4ea9400390ef4414e4e9dcd54193)
1 #include <linux/bpf.h>
2 #include <linux/vmalloc.h>
3 #include <linux/fdtable.h>
4 #include <linux/file.h>
5 #include <linux/fs.h>
6 #include <linux/kernel.h>
7 #include <linux/idr.h>
8 #include <linux/namei.h>
9 #include <linux/user_namespace.h>
10 #include <linux/security.h>
11 
12 bool bpf_token_capable(const struct bpf_token *token, int cap)
13 {
14 	/* BPF token allows ns_capable() level of capabilities, but only if
15 	 * token's userns is *exactly* the same as current user's userns
16 	 */
17 	if (token && current_user_ns() == token->userns) {
18 		if (ns_capable(token->userns, cap) ||
19 		    (cap != CAP_SYS_ADMIN && ns_capable(token->userns, CAP_SYS_ADMIN)))
20 			return security_bpf_token_capable(token, cap) == 0;
21 	}
22 	/* otherwise fallback to capable() checks */
23 	return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN));
24 }
25 
26 void bpf_token_inc(struct bpf_token *token)
27 {
28 	atomic64_inc(&token->refcnt);
29 }
30 
31 static void bpf_token_free(struct bpf_token *token)
32 {
33 	security_bpf_token_free(token);
34 	put_user_ns(token->userns);
35 	kvfree(token);
36 }
37 
38 static void bpf_token_put_deferred(struct work_struct *work)
39 {
40 	struct bpf_token *token = container_of(work, struct bpf_token, work);
41 
42 	bpf_token_free(token);
43 }
44 
45 void bpf_token_put(struct bpf_token *token)
46 {
47 	if (!token)
48 		return;
49 
50 	if (!atomic64_dec_and_test(&token->refcnt))
51 		return;
52 
53 	INIT_WORK(&token->work, bpf_token_put_deferred);
54 	schedule_work(&token->work);
55 }
56 
57 static int bpf_token_release(struct inode *inode, struct file *filp)
58 {
59 	struct bpf_token *token = filp->private_data;
60 
61 	bpf_token_put(token);
62 	return 0;
63 }
64 
65 static void bpf_token_show_fdinfo(struct seq_file *m, struct file *filp)
66 {
67 	struct bpf_token *token = filp->private_data;
68 	u64 mask;
69 
70 	BUILD_BUG_ON(__MAX_BPF_CMD >= 64);
71 	mask = (1ULL << __MAX_BPF_CMD) - 1;
72 	if ((token->allowed_cmds & mask) == mask)
73 		seq_printf(m, "allowed_cmds:\tany\n");
74 	else
75 		seq_printf(m, "allowed_cmds:\t0x%llx\n", token->allowed_cmds);
76 
77 	BUILD_BUG_ON(__MAX_BPF_MAP_TYPE >= 64);
78 	mask = (1ULL << __MAX_BPF_MAP_TYPE) - 1;
79 	if ((token->allowed_maps & mask) == mask)
80 		seq_printf(m, "allowed_maps:\tany\n");
81 	else
82 		seq_printf(m, "allowed_maps:\t0x%llx\n", token->allowed_maps);
83 
84 	BUILD_BUG_ON(__MAX_BPF_PROG_TYPE >= 64);
85 	mask = (1ULL << __MAX_BPF_PROG_TYPE) - 1;
86 	if ((token->allowed_progs & mask) == mask)
87 		seq_printf(m, "allowed_progs:\tany\n");
88 	else
89 		seq_printf(m, "allowed_progs:\t0x%llx\n", token->allowed_progs);
90 
91 	BUILD_BUG_ON(__MAX_BPF_ATTACH_TYPE >= 64);
92 	mask = (1ULL << __MAX_BPF_ATTACH_TYPE) - 1;
93 	if ((token->allowed_attachs & mask) == mask)
94 		seq_printf(m, "allowed_attachs:\tany\n");
95 	else
96 		seq_printf(m, "allowed_attachs:\t0x%llx\n", token->allowed_attachs);
97 }
98 
99 #define BPF_TOKEN_INODE_NAME "bpf-token"
100 
101 static const struct inode_operations bpf_token_iops = { };
102 
103 static const struct file_operations bpf_token_fops = {
104 	.release	= bpf_token_release,
105 	.show_fdinfo	= bpf_token_show_fdinfo,
106 };
107 
108 int bpf_token_create(union bpf_attr *attr)
109 {
110 	struct bpf_mount_opts *mnt_opts;
111 	struct bpf_token *token = NULL;
112 	struct user_namespace *userns;
113 	struct inode *inode;
114 	struct file *file;
115 	struct path path;
116 	struct fd f;
117 	umode_t mode;
118 	int err, fd;
119 
120 	f = fdget(attr->token_create.bpffs_fd);
121 	if (!f.file)
122 		return -EBADF;
123 
124 	path = f.file->f_path;
125 	path_get(&path);
126 	fdput(f);
127 
128 	if (path.dentry != path.mnt->mnt_sb->s_root) {
129 		err = -EINVAL;
130 		goto out_path;
131 	}
132 	if (path.mnt->mnt_sb->s_op != &bpf_super_ops) {
133 		err = -EINVAL;
134 		goto out_path;
135 	}
136 	err = path_permission(&path, MAY_ACCESS);
137 	if (err)
138 		goto out_path;
139 
140 	userns = path.dentry->d_sb->s_user_ns;
141 	/*
142 	 * Enforce that creators of BPF tokens are in the same user
143 	 * namespace as the BPF FS instance. This makes reasoning about
144 	 * permissions a lot easier and we can always relax this later.
145 	 */
146 	if (current_user_ns() != userns) {
147 		err = -EPERM;
148 		goto out_path;
149 	}
150 	if (!ns_capable(userns, CAP_BPF)) {
151 		err = -EPERM;
152 		goto out_path;
153 	}
154 
155 	mnt_opts = path.dentry->d_sb->s_fs_info;
156 	if (mnt_opts->delegate_cmds == 0 &&
157 	    mnt_opts->delegate_maps == 0 &&
158 	    mnt_opts->delegate_progs == 0 &&
159 	    mnt_opts->delegate_attachs == 0) {
160 		err = -ENOENT; /* no BPF token delegation is set up */
161 		goto out_path;
162 	}
163 
164 	mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
165 	inode = bpf_get_inode(path.mnt->mnt_sb, NULL, mode);
166 	if (IS_ERR(inode)) {
167 		err = PTR_ERR(inode);
168 		goto out_path;
169 	}
170 
171 	inode->i_op = &bpf_token_iops;
172 	inode->i_fop = &bpf_token_fops;
173 	clear_nlink(inode); /* make sure it is unlinked */
174 
175 	file = alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME, O_RDWR, &bpf_token_fops);
176 	if (IS_ERR(file)) {
177 		iput(inode);
178 		err = PTR_ERR(file);
179 		goto out_path;
180 	}
181 
182 	token = kvzalloc(sizeof(*token), GFP_USER);
183 	if (!token) {
184 		err = -ENOMEM;
185 		goto out_file;
186 	}
187 
188 	atomic64_set(&token->refcnt, 1);
189 
190 	/* remember bpffs owning userns for future ns_capable() checks */
191 	token->userns = get_user_ns(userns);
192 
193 	token->allowed_cmds = mnt_opts->delegate_cmds;
194 	token->allowed_maps = mnt_opts->delegate_maps;
195 	token->allowed_progs = mnt_opts->delegate_progs;
196 	token->allowed_attachs = mnt_opts->delegate_attachs;
197 
198 	err = security_bpf_token_create(token, attr, &path);
199 	if (err)
200 		goto out_token;
201 
202 	fd = get_unused_fd_flags(O_CLOEXEC);
203 	if (fd < 0) {
204 		err = fd;
205 		goto out_token;
206 	}
207 
208 	file->private_data = token;
209 	fd_install(fd, file);
210 
211 	path_put(&path);
212 	return fd;
213 
214 out_token:
215 	bpf_token_free(token);
216 out_file:
217 	fput(file);
218 out_path:
219 	path_put(&path);
220 	return err;
221 }
222 
223 struct bpf_token *bpf_token_get_from_fd(u32 ufd)
224 {
225 	struct fd f = fdget(ufd);
226 	struct bpf_token *token;
227 
228 	if (!f.file)
229 		return ERR_PTR(-EBADF);
230 	if (f.file->f_op != &bpf_token_fops) {
231 		fdput(f);
232 		return ERR_PTR(-EINVAL);
233 	}
234 
235 	token = f.file->private_data;
236 	bpf_token_inc(token);
237 	fdput(f);
238 
239 	return token;
240 }
241 
242 bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
243 {
244 	/* BPF token can be used only within exactly the same userns in which
245 	 * it was created
246 	 */
247 	if (!token || current_user_ns() != token->userns)
248 		return false;
249 	if (!(token->allowed_cmds & (1ULL << cmd)))
250 		return false;
251 	return security_bpf_token_cmd(token, cmd) == 0;
252 }
253 
254 bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type)
255 {
256 	if (!token || type >= __MAX_BPF_MAP_TYPE)
257 		return false;
258 
259 	return token->allowed_maps & (1ULL << type);
260 }
261 
262 bool bpf_token_allow_prog_type(const struct bpf_token *token,
263 			       enum bpf_prog_type prog_type,
264 			       enum bpf_attach_type attach_type)
265 {
266 	if (!token || prog_type >= __MAX_BPF_PROG_TYPE || attach_type >= __MAX_BPF_ATTACH_TYPE)
267 		return false;
268 
269 	return (token->allowed_progs & (1ULL << prog_type)) &&
270 	       (token->allowed_attachs & (1ULL << attach_type));
271 }
272