xref: /linux/kernel/bpf/token.c (revision 72bea132f3680ee51e7ed2cee62892b6f5121909)
1 #include <linux/bpf.h>
2 #include <linux/vmalloc.h>
3 #include <linux/fdtable.h>
4 #include <linux/file.h>
5 #include <linux/fs.h>
6 #include <linux/kernel.h>
7 #include <linux/idr.h>
8 #include <linux/namei.h>
9 #include <linux/user_namespace.h>
10 #include <linux/security.h>
11 
12 static bool bpf_ns_capable(struct user_namespace *ns, int cap)
13 {
14 	return ns_capable(ns, cap) || (cap != CAP_SYS_ADMIN && ns_capable(ns, CAP_SYS_ADMIN));
15 }
16 
17 bool bpf_token_capable(const struct bpf_token *token, int cap)
18 {
19 	struct user_namespace *userns;
20 
21 	/* BPF token allows ns_capable() level of capabilities */
22 	userns = token ? token->userns : &init_user_ns;
23 	if (!bpf_ns_capable(userns, cap))
24 		return false;
25 	if (token && security_bpf_token_capable(token, cap) < 0)
26 		return false;
27 	return true;
28 }
29 
30 void bpf_token_inc(struct bpf_token *token)
31 {
32 	atomic64_inc(&token->refcnt);
33 }
34 
35 static void bpf_token_free(struct bpf_token *token)
36 {
37 	security_bpf_token_free(token);
38 	put_user_ns(token->userns);
39 	kfree(token);
40 }
41 
42 static void bpf_token_put_deferred(struct work_struct *work)
43 {
44 	struct bpf_token *token = container_of(work, struct bpf_token, work);
45 
46 	bpf_token_free(token);
47 }
48 
49 void bpf_token_put(struct bpf_token *token)
50 {
51 	if (!token)
52 		return;
53 
54 	if (!atomic64_dec_and_test(&token->refcnt))
55 		return;
56 
57 	INIT_WORK(&token->work, bpf_token_put_deferred);
58 	schedule_work(&token->work);
59 }
60 
61 static int bpf_token_release(struct inode *inode, struct file *filp)
62 {
63 	struct bpf_token *token = filp->private_data;
64 
65 	bpf_token_put(token);
66 	return 0;
67 }
68 
69 static void bpf_token_show_fdinfo(struct seq_file *m, struct file *filp)
70 {
71 	struct bpf_token *token = filp->private_data;
72 	u64 mask;
73 
74 	BUILD_BUG_ON(__MAX_BPF_CMD >= 64);
75 	mask = BIT_ULL(__MAX_BPF_CMD) - 1;
76 	if ((token->allowed_cmds & mask) == mask)
77 		seq_printf(m, "allowed_cmds:\tany\n");
78 	else
79 		seq_printf(m, "allowed_cmds:\t0x%llx\n", token->allowed_cmds);
80 
81 	BUILD_BUG_ON(__MAX_BPF_MAP_TYPE >= 64);
82 	mask = BIT_ULL(__MAX_BPF_MAP_TYPE) - 1;
83 	if ((token->allowed_maps & mask) == mask)
84 		seq_printf(m, "allowed_maps:\tany\n");
85 	else
86 		seq_printf(m, "allowed_maps:\t0x%llx\n", token->allowed_maps);
87 
88 	BUILD_BUG_ON(__MAX_BPF_PROG_TYPE >= 64);
89 	mask = BIT_ULL(__MAX_BPF_PROG_TYPE) - 1;
90 	if ((token->allowed_progs & mask) == mask)
91 		seq_printf(m, "allowed_progs:\tany\n");
92 	else
93 		seq_printf(m, "allowed_progs:\t0x%llx\n", token->allowed_progs);
94 
95 	BUILD_BUG_ON(__MAX_BPF_ATTACH_TYPE >= 64);
96 	mask = BIT_ULL(__MAX_BPF_ATTACH_TYPE) - 1;
97 	if ((token->allowed_attachs & mask) == mask)
98 		seq_printf(m, "allowed_attachs:\tany\n");
99 	else
100 		seq_printf(m, "allowed_attachs:\t0x%llx\n", token->allowed_attachs);
101 }
102 
103 #define BPF_TOKEN_INODE_NAME "bpf-token"
104 
105 static const struct inode_operations bpf_token_iops = { };
106 
107 static const struct file_operations bpf_token_fops = {
108 	.release	= bpf_token_release,
109 	.show_fdinfo	= bpf_token_show_fdinfo,
110 };
111 
112 int bpf_token_create(union bpf_attr *attr)
113 {
114 	struct bpf_mount_opts *mnt_opts;
115 	struct bpf_token *token = NULL;
116 	struct user_namespace *userns;
117 	struct inode *inode;
118 	struct file *file;
119 	struct path path;
120 	struct fd f;
121 	umode_t mode;
122 	int err, fd;
123 
124 	f = fdget(attr->token_create.bpffs_fd);
125 	if (!f.file)
126 		return -EBADF;
127 
128 	path = f.file->f_path;
129 	path_get(&path);
130 	fdput(f);
131 
132 	if (path.dentry != path.mnt->mnt_sb->s_root) {
133 		err = -EINVAL;
134 		goto out_path;
135 	}
136 	if (path.mnt->mnt_sb->s_op != &bpf_super_ops) {
137 		err = -EINVAL;
138 		goto out_path;
139 	}
140 	err = path_permission(&path, MAY_ACCESS);
141 	if (err)
142 		goto out_path;
143 
144 	userns = path.dentry->d_sb->s_user_ns;
145 	/*
146 	 * Enforce that creators of BPF tokens are in the same user
147 	 * namespace as the BPF FS instance. This makes reasoning about
148 	 * permissions a lot easier and we can always relax this later.
149 	 */
150 	if (current_user_ns() != userns) {
151 		err = -EPERM;
152 		goto out_path;
153 	}
154 	if (!ns_capable(userns, CAP_BPF)) {
155 		err = -EPERM;
156 		goto out_path;
157 	}
158 
159 	/* Creating BPF token in init_user_ns doesn't make much sense. */
160 	if (current_user_ns() == &init_user_ns) {
161 		err = -EOPNOTSUPP;
162 		goto out_path;
163 	}
164 
165 	mnt_opts = path.dentry->d_sb->s_fs_info;
166 	if (mnt_opts->delegate_cmds == 0 &&
167 	    mnt_opts->delegate_maps == 0 &&
168 	    mnt_opts->delegate_progs == 0 &&
169 	    mnt_opts->delegate_attachs == 0) {
170 		err = -ENOENT; /* no BPF token delegation is set up */
171 		goto out_path;
172 	}
173 
174 	mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
175 	inode = bpf_get_inode(path.mnt->mnt_sb, NULL, mode);
176 	if (IS_ERR(inode)) {
177 		err = PTR_ERR(inode);
178 		goto out_path;
179 	}
180 
181 	inode->i_op = &bpf_token_iops;
182 	inode->i_fop = &bpf_token_fops;
183 	clear_nlink(inode); /* make sure it is unlinked */
184 
185 	file = alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME, O_RDWR, &bpf_token_fops);
186 	if (IS_ERR(file)) {
187 		iput(inode);
188 		err = PTR_ERR(file);
189 		goto out_path;
190 	}
191 
192 	token = kzalloc(sizeof(*token), GFP_USER);
193 	if (!token) {
194 		err = -ENOMEM;
195 		goto out_file;
196 	}
197 
198 	atomic64_set(&token->refcnt, 1);
199 
200 	/* remember bpffs owning userns for future ns_capable() checks */
201 	token->userns = get_user_ns(userns);
202 
203 	token->allowed_cmds = mnt_opts->delegate_cmds;
204 	token->allowed_maps = mnt_opts->delegate_maps;
205 	token->allowed_progs = mnt_opts->delegate_progs;
206 	token->allowed_attachs = mnt_opts->delegate_attachs;
207 
208 	err = security_bpf_token_create(token, attr, &path);
209 	if (err)
210 		goto out_token;
211 
212 	fd = get_unused_fd_flags(O_CLOEXEC);
213 	if (fd < 0) {
214 		err = fd;
215 		goto out_token;
216 	}
217 
218 	file->private_data = token;
219 	fd_install(fd, file);
220 
221 	path_put(&path);
222 	return fd;
223 
224 out_token:
225 	bpf_token_free(token);
226 out_file:
227 	fput(file);
228 out_path:
229 	path_put(&path);
230 	return err;
231 }
232 
233 struct bpf_token *bpf_token_get_from_fd(u32 ufd)
234 {
235 	struct fd f = fdget(ufd);
236 	struct bpf_token *token;
237 
238 	if (!f.file)
239 		return ERR_PTR(-EBADF);
240 	if (f.file->f_op != &bpf_token_fops) {
241 		fdput(f);
242 		return ERR_PTR(-EINVAL);
243 	}
244 
245 	token = f.file->private_data;
246 	bpf_token_inc(token);
247 	fdput(f);
248 
249 	return token;
250 }
251 
252 bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
253 {
254 	if (!token)
255 		return false;
256 	if (!(token->allowed_cmds & BIT_ULL(cmd)))
257 		return false;
258 	return security_bpf_token_cmd(token, cmd) == 0;
259 }
260 
261 bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type)
262 {
263 	if (!token || type >= __MAX_BPF_MAP_TYPE)
264 		return false;
265 
266 	return token->allowed_maps & BIT_ULL(type);
267 }
268 
269 bool bpf_token_allow_prog_type(const struct bpf_token *token,
270 			       enum bpf_prog_type prog_type,
271 			       enum bpf_attach_type attach_type)
272 {
273 	if (!token || prog_type >= __MAX_BPF_PROG_TYPE || attach_type >= __MAX_BPF_ATTACH_TYPE)
274 		return false;
275 
276 	return (token->allowed_progs & BIT_ULL(prog_type)) &&
277 	       (token->allowed_attachs & BIT_ULL(attach_type));
278 }
279