xref: /linux/kernel/bpf/token.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 #include <linux/bpf.h>
2 #include <linux/vmalloc.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/kernel.h>
6 #include <linux/idr.h>
7 #include <linux/namei.h>
8 #include <linux/user_namespace.h>
9 #include <linux/security.h>
10 
11 static bool bpf_ns_capable(struct user_namespace *ns, int cap)
12 {
13 	return ns_capable(ns, cap) || (cap != CAP_SYS_ADMIN && ns_capable(ns, CAP_SYS_ADMIN));
14 }
15 
16 bool bpf_token_capable(const struct bpf_token *token, int cap)
17 {
18 	struct user_namespace *userns;
19 
20 	/* BPF token allows ns_capable() level of capabilities */
21 	userns = token ? token->userns : &init_user_ns;
22 	if (!bpf_ns_capable(userns, cap))
23 		return false;
24 	if (token && security_bpf_token_capable(token, cap) < 0)
25 		return false;
26 	return true;
27 }
28 
29 void bpf_token_inc(struct bpf_token *token)
30 {
31 	atomic64_inc(&token->refcnt);
32 }
33 
34 static void bpf_token_free(struct bpf_token *token)
35 {
36 	security_bpf_token_free(token);
37 	put_user_ns(token->userns);
38 	kfree(token);
39 }
40 
41 static void bpf_token_put_deferred(struct work_struct *work)
42 {
43 	struct bpf_token *token = container_of(work, struct bpf_token, work);
44 
45 	bpf_token_free(token);
46 }
47 
48 void bpf_token_put(struct bpf_token *token)
49 {
50 	if (!token)
51 		return;
52 
53 	if (!atomic64_dec_and_test(&token->refcnt))
54 		return;
55 
56 	INIT_WORK(&token->work, bpf_token_put_deferred);
57 	schedule_work(&token->work);
58 }
59 
60 static int bpf_token_release(struct inode *inode, struct file *filp)
61 {
62 	struct bpf_token *token = filp->private_data;
63 
64 	bpf_token_put(token);
65 	return 0;
66 }
67 
68 static void bpf_token_show_fdinfo(struct seq_file *m, struct file *filp)
69 {
70 	struct bpf_token *token = filp->private_data;
71 	u64 mask;
72 
73 	BUILD_BUG_ON(__MAX_BPF_CMD >= 64);
74 	mask = BIT_ULL(__MAX_BPF_CMD) - 1;
75 	if ((token->allowed_cmds & mask) == mask)
76 		seq_printf(m, "allowed_cmds:\tany\n");
77 	else
78 		seq_printf(m, "allowed_cmds:\t0x%llx\n", token->allowed_cmds);
79 
80 	BUILD_BUG_ON(__MAX_BPF_MAP_TYPE >= 64);
81 	mask = BIT_ULL(__MAX_BPF_MAP_TYPE) - 1;
82 	if ((token->allowed_maps & mask) == mask)
83 		seq_printf(m, "allowed_maps:\tany\n");
84 	else
85 		seq_printf(m, "allowed_maps:\t0x%llx\n", token->allowed_maps);
86 
87 	BUILD_BUG_ON(__MAX_BPF_PROG_TYPE >= 64);
88 	mask = BIT_ULL(__MAX_BPF_PROG_TYPE) - 1;
89 	if ((token->allowed_progs & mask) == mask)
90 		seq_printf(m, "allowed_progs:\tany\n");
91 	else
92 		seq_printf(m, "allowed_progs:\t0x%llx\n", token->allowed_progs);
93 
94 	BUILD_BUG_ON(__MAX_BPF_ATTACH_TYPE >= 64);
95 	mask = BIT_ULL(__MAX_BPF_ATTACH_TYPE) - 1;
96 	if ((token->allowed_attachs & mask) == mask)
97 		seq_printf(m, "allowed_attachs:\tany\n");
98 	else
99 		seq_printf(m, "allowed_attachs:\t0x%llx\n", token->allowed_attachs);
100 }
101 
102 #define BPF_TOKEN_INODE_NAME "bpf-token"
103 
104 static const struct inode_operations bpf_token_iops = { };
105 
106 const struct file_operations bpf_token_fops = {
107 	.release	= bpf_token_release,
108 	.show_fdinfo	= bpf_token_show_fdinfo,
109 };
110 
111 int bpf_token_create(union bpf_attr *attr)
112 {
113 	struct bpf_token *token __free(kfree) = NULL;
114 	struct bpf_mount_opts *mnt_opts;
115 	struct user_namespace *userns;
116 	struct inode *inode;
117 	CLASS(fd, f)(attr->token_create.bpffs_fd);
118 	struct path path;
119 	struct super_block *sb;
120 	umode_t mode;
121 	int err;
122 
123 	if (fd_empty(f))
124 		return -EBADF;
125 
126 	path = fd_file(f)->f_path;
127 	sb = path.dentry->d_sb;
128 
129 	if (path.dentry != sb->s_root)
130 		return -EINVAL;
131 	if (sb->s_op != &bpf_super_ops)
132 		return -EINVAL;
133 	err = path_permission(&path, MAY_ACCESS);
134 	if (err)
135 		return err;
136 
137 	userns = sb->s_user_ns;
138 	/*
139 	 * Enforce that creators of BPF tokens are in the same user
140 	 * namespace as the BPF FS instance. This makes reasoning about
141 	 * permissions a lot easier and we can always relax this later.
142 	 */
143 	if (current_user_ns() != userns)
144 		return -EPERM;
145 	if (!ns_capable(userns, CAP_BPF))
146 		return -EPERM;
147 
148 	/* Creating BPF token in init_user_ns doesn't make much sense. */
149 	if (current_user_ns() == &init_user_ns)
150 		return -EOPNOTSUPP;
151 
152 	mnt_opts = sb->s_fs_info;
153 	if (mnt_opts->delegate_cmds == 0 &&
154 	    mnt_opts->delegate_maps == 0 &&
155 	    mnt_opts->delegate_progs == 0 &&
156 	    mnt_opts->delegate_attachs == 0)
157 		return -ENOENT; /* no BPF token delegation is set up */
158 
159 	mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
160 	inode = bpf_get_inode(sb, NULL, mode);
161 	if (IS_ERR(inode))
162 		return PTR_ERR(inode);
163 
164 	inode->i_op = &bpf_token_iops;
165 	inode->i_fop = &bpf_token_fops;
166 	clear_nlink(inode); /* make sure it is unlinked */
167 
168 	FD_PREPARE(fdf, O_CLOEXEC,
169 		   alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME,
170 				     O_RDWR, &bpf_token_fops));
171 	if (fdf.err)
172 		return fdf.err;
173 
174 	token = kzalloc(sizeof(*token), GFP_USER);
175 	if (!token)
176 		return -ENOMEM;
177 
178 	atomic64_set(&token->refcnt, 1);
179 
180 	/* remember bpffs owning userns for future ns_capable() checks. */
181 	token->userns = userns;
182 	token->allowed_cmds = mnt_opts->delegate_cmds;
183 	token->allowed_maps = mnt_opts->delegate_maps;
184 	token->allowed_progs = mnt_opts->delegate_progs;
185 	token->allowed_attachs = mnt_opts->delegate_attachs;
186 
187 	err = security_bpf_token_create(token, attr, &path);
188 	if (err)
189 		return err;
190 
191 	get_user_ns(token->userns);
192 	fd_prepare_file(fdf)->private_data = no_free_ptr(token);
193 	return fd_publish(fdf);
194 }
195 
196 int bpf_token_get_info_by_fd(struct bpf_token *token,
197 			     const union bpf_attr *attr,
198 			     union bpf_attr __user *uattr)
199 {
200 	struct bpf_token_info __user *uinfo = u64_to_user_ptr(attr->info.info);
201 	struct bpf_token_info info;
202 	u32 info_len = attr->info.info_len;
203 
204 	info_len = min_t(u32, info_len, sizeof(info));
205 	memset(&info, 0, sizeof(info));
206 
207 	info.allowed_cmds = token->allowed_cmds;
208 	info.allowed_maps = token->allowed_maps;
209 	info.allowed_progs = token->allowed_progs;
210 	info.allowed_attachs = token->allowed_attachs;
211 
212 	if (copy_to_user(uinfo, &info, info_len) ||
213 	    put_user(info_len, &uattr->info.info_len))
214 		return -EFAULT;
215 
216 	return 0;
217 }
218 
219 struct bpf_token *bpf_token_get_from_fd(u32 ufd)
220 {
221 	CLASS(fd, f)(ufd);
222 	struct bpf_token *token;
223 
224 	if (fd_empty(f))
225 		return ERR_PTR(-EBADF);
226 	if (fd_file(f)->f_op != &bpf_token_fops)
227 		return ERR_PTR(-EINVAL);
228 
229 	token = fd_file(f)->private_data;
230 	bpf_token_inc(token);
231 
232 	return token;
233 }
234 
235 bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
236 {
237 	if (!token)
238 		return false;
239 	if (!(token->allowed_cmds & BIT_ULL(cmd)))
240 		return false;
241 	return security_bpf_token_cmd(token, cmd) == 0;
242 }
243 
244 bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type)
245 {
246 	if (!token || type >= __MAX_BPF_MAP_TYPE)
247 		return false;
248 
249 	return token->allowed_maps & BIT_ULL(type);
250 }
251 
252 bool bpf_token_allow_prog_type(const struct bpf_token *token,
253 			       enum bpf_prog_type prog_type,
254 			       enum bpf_attach_type attach_type)
255 {
256 	if (!token || prog_type >= __MAX_BPF_PROG_TYPE || attach_type >= __MAX_BPF_ATTACH_TYPE)
257 		return false;
258 
259 	return (token->allowed_progs & BIT_ULL(prog_type)) &&
260 	       (token->allowed_attachs & BIT_ULL(attach_type));
261 }
262