1 #include <linux/bpf.h> 2 #include <linux/vmalloc.h> 3 #include <linux/file.h> 4 #include <linux/fs.h> 5 #include <linux/kernel.h> 6 #include <linux/idr.h> 7 #include <linux/namei.h> 8 #include <linux/user_namespace.h> 9 #include <linux/security.h> 10 11 static bool bpf_ns_capable(struct user_namespace *ns, int cap) 12 { 13 return ns_capable(ns, cap) || (cap != CAP_SYS_ADMIN && ns_capable(ns, CAP_SYS_ADMIN)); 14 } 15 16 bool bpf_token_capable(const struct bpf_token *token, int cap) 17 { 18 struct user_namespace *userns; 19 20 /* BPF token allows ns_capable() level of capabilities */ 21 userns = token ? token->userns : &init_user_ns; 22 if (!bpf_ns_capable(userns, cap)) 23 return false; 24 if (token && security_bpf_token_capable(token, cap) < 0) 25 return false; 26 return true; 27 } 28 29 void bpf_token_inc(struct bpf_token *token) 30 { 31 atomic64_inc(&token->refcnt); 32 } 33 34 static void bpf_token_free(struct bpf_token *token) 35 { 36 security_bpf_token_free(token); 37 put_user_ns(token->userns); 38 kfree(token); 39 } 40 41 static void bpf_token_put_deferred(struct work_struct *work) 42 { 43 struct bpf_token *token = container_of(work, struct bpf_token, work); 44 45 bpf_token_free(token); 46 } 47 48 void bpf_token_put(struct bpf_token *token) 49 { 50 if (!token) 51 return; 52 53 if (!atomic64_dec_and_test(&token->refcnt)) 54 return; 55 56 INIT_WORK(&token->work, bpf_token_put_deferred); 57 schedule_work(&token->work); 58 } 59 60 static int bpf_token_release(struct inode *inode, struct file *filp) 61 { 62 struct bpf_token *token = filp->private_data; 63 64 bpf_token_put(token); 65 return 0; 66 } 67 68 static void bpf_token_show_fdinfo(struct seq_file *m, struct file *filp) 69 { 70 struct bpf_token *token = filp->private_data; 71 u64 mask; 72 73 BUILD_BUG_ON(__MAX_BPF_CMD >= 64); 74 mask = BIT_ULL(__MAX_BPF_CMD) - 1; 75 if ((token->allowed_cmds & mask) == mask) 76 seq_printf(m, "allowed_cmds:\tany\n"); 77 else 78 seq_printf(m, "allowed_cmds:\t0x%llx\n", token->allowed_cmds); 79 80 BUILD_BUG_ON(__MAX_BPF_MAP_TYPE >= 64); 81 mask = BIT_ULL(__MAX_BPF_MAP_TYPE) - 1; 82 if ((token->allowed_maps & mask) == mask) 83 seq_printf(m, "allowed_maps:\tany\n"); 84 else 85 seq_printf(m, "allowed_maps:\t0x%llx\n", token->allowed_maps); 86 87 BUILD_BUG_ON(__MAX_BPF_PROG_TYPE >= 64); 88 mask = BIT_ULL(__MAX_BPF_PROG_TYPE) - 1; 89 if ((token->allowed_progs & mask) == mask) 90 seq_printf(m, "allowed_progs:\tany\n"); 91 else 92 seq_printf(m, "allowed_progs:\t0x%llx\n", token->allowed_progs); 93 94 BUILD_BUG_ON(__MAX_BPF_ATTACH_TYPE >= 64); 95 mask = BIT_ULL(__MAX_BPF_ATTACH_TYPE) - 1; 96 if ((token->allowed_attachs & mask) == mask) 97 seq_printf(m, "allowed_attachs:\tany\n"); 98 else 99 seq_printf(m, "allowed_attachs:\t0x%llx\n", token->allowed_attachs); 100 } 101 102 #define BPF_TOKEN_INODE_NAME "bpf-token" 103 104 static const struct inode_operations bpf_token_iops = { }; 105 106 static const struct file_operations bpf_token_fops = { 107 .release = bpf_token_release, 108 .show_fdinfo = bpf_token_show_fdinfo, 109 }; 110 111 int bpf_token_create(union bpf_attr *attr) 112 { 113 struct bpf_mount_opts *mnt_opts; 114 struct bpf_token *token = NULL; 115 struct user_namespace *userns; 116 struct inode *inode; 117 struct file *file; 118 CLASS(fd, f)(attr->token_create.bpffs_fd); 119 struct path path; 120 struct super_block *sb; 121 umode_t mode; 122 int err, fd; 123 124 if (fd_empty(f)) 125 return -EBADF; 126 127 path = fd_file(f)->f_path; 128 sb = path.dentry->d_sb; 129 130 if (path.dentry != sb->s_root) 131 return -EINVAL; 132 if (sb->s_op != &bpf_super_ops) 133 return -EINVAL; 134 err = path_permission(&path, MAY_ACCESS); 135 if (err) 136 return err; 137 138 userns = sb->s_user_ns; 139 /* 140 * Enforce that creators of BPF tokens are in the same user 141 * namespace as the BPF FS instance. This makes reasoning about 142 * permissions a lot easier and we can always relax this later. 143 */ 144 if (current_user_ns() != userns) 145 return -EPERM; 146 if (!ns_capable(userns, CAP_BPF)) 147 return -EPERM; 148 149 /* Creating BPF token in init_user_ns doesn't make much sense. */ 150 if (current_user_ns() == &init_user_ns) 151 return -EOPNOTSUPP; 152 153 mnt_opts = sb->s_fs_info; 154 if (mnt_opts->delegate_cmds == 0 && 155 mnt_opts->delegate_maps == 0 && 156 mnt_opts->delegate_progs == 0 && 157 mnt_opts->delegate_attachs == 0) 158 return -ENOENT; /* no BPF token delegation is set up */ 159 160 mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask()); 161 inode = bpf_get_inode(sb, NULL, mode); 162 if (IS_ERR(inode)) 163 return PTR_ERR(inode); 164 165 inode->i_op = &bpf_token_iops; 166 inode->i_fop = &bpf_token_fops; 167 clear_nlink(inode); /* make sure it is unlinked */ 168 169 file = alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME, O_RDWR, &bpf_token_fops); 170 if (IS_ERR(file)) { 171 iput(inode); 172 return PTR_ERR(file); 173 } 174 175 token = kzalloc(sizeof(*token), GFP_USER); 176 if (!token) { 177 err = -ENOMEM; 178 goto out_file; 179 } 180 181 atomic64_set(&token->refcnt, 1); 182 183 /* remember bpffs owning userns for future ns_capable() checks */ 184 token->userns = get_user_ns(userns); 185 186 token->allowed_cmds = mnt_opts->delegate_cmds; 187 token->allowed_maps = mnt_opts->delegate_maps; 188 token->allowed_progs = mnt_opts->delegate_progs; 189 token->allowed_attachs = mnt_opts->delegate_attachs; 190 191 err = security_bpf_token_create(token, attr, &path); 192 if (err) 193 goto out_token; 194 195 fd = get_unused_fd_flags(O_CLOEXEC); 196 if (fd < 0) { 197 err = fd; 198 goto out_token; 199 } 200 201 file->private_data = token; 202 fd_install(fd, file); 203 204 return fd; 205 206 out_token: 207 bpf_token_free(token); 208 out_file: 209 fput(file); 210 return err; 211 } 212 213 struct bpf_token *bpf_token_get_from_fd(u32 ufd) 214 { 215 CLASS(fd, f)(ufd); 216 struct bpf_token *token; 217 218 if (fd_empty(f)) 219 return ERR_PTR(-EBADF); 220 if (fd_file(f)->f_op != &bpf_token_fops) 221 return ERR_PTR(-EINVAL); 222 223 token = fd_file(f)->private_data; 224 bpf_token_inc(token); 225 226 return token; 227 } 228 229 bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd) 230 { 231 if (!token) 232 return false; 233 if (!(token->allowed_cmds & BIT_ULL(cmd))) 234 return false; 235 return security_bpf_token_cmd(token, cmd) == 0; 236 } 237 238 bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type) 239 { 240 if (!token || type >= __MAX_BPF_MAP_TYPE) 241 return false; 242 243 return token->allowed_maps & BIT_ULL(type); 244 } 245 246 bool bpf_token_allow_prog_type(const struct bpf_token *token, 247 enum bpf_prog_type prog_type, 248 enum bpf_attach_type attach_type) 249 { 250 if (!token || prog_type >= __MAX_BPF_PROG_TYPE || attach_type >= __MAX_BPF_ATTACH_TYPE) 251 return false; 252 253 return (token->allowed_progs & BIT_ULL(prog_type)) && 254 (token->allowed_attachs & BIT_ULL(attach_type)); 255 } 256