xref: /linux/security/device_cgroup.c (revision 121cc35cfb55ab0bcf04c8ba6b364a0990eb2449)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * device_cgroup.c - device cgroup subsystem
4  *
5  * Copyright 2007 IBM Corp
6  */
7 
8 #include <linux/bpf-cgroup.h>
9 #include <linux/device_cgroup.h>
10 #include <linux/cgroup.h>
11 #include <linux/ctype.h>
12 #include <linux/list.h>
13 #include <linux/uaccess.h>
14 #include <linux/seq_file.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/mutex.h>
18 
19 #ifdef CONFIG_CGROUP_DEVICE
20 
21 static DEFINE_MUTEX(devcgroup_mutex);
22 
23 enum devcg_behavior {
24 	DEVCG_DEFAULT_NONE,
25 	DEVCG_DEFAULT_ALLOW,
26 	DEVCG_DEFAULT_DENY,
27 };
28 
29 /*
30  * exception list locking rules:
31  * hold devcgroup_mutex for update/read.
32  * hold rcu_read_lock() for read.
33  */
34 
35 struct dev_exception_item {
36 	u32 major, minor;
37 	short type;
38 	short access;
39 	struct list_head list;
40 	struct rcu_head rcu;
41 };
42 
43 struct dev_cgroup {
44 	struct cgroup_subsys_state css;
45 	struct list_head exceptions;
46 	enum devcg_behavior behavior;
47 };
48 
css_to_devcgroup(struct cgroup_subsys_state * s)49 static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
50 {
51 	return s ? container_of(s, struct dev_cgroup, css) : NULL;
52 }
53 
task_devcgroup(struct task_struct * task)54 static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
55 {
56 	return css_to_devcgroup(task_css(task, devices_cgrp_id));
57 }
58 
59 /*
60  * called under devcgroup_mutex
61  */
dev_exceptions_copy(struct list_head * dest,struct list_head * orig)62 static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
63 {
64 	struct dev_exception_item *ex, *tmp, *new;
65 
66 	lockdep_assert_held(&devcgroup_mutex);
67 
68 	list_for_each_entry(ex, orig, list) {
69 		new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
70 		if (!new)
71 			goto free_and_exit;
72 		list_add_tail(&new->list, dest);
73 	}
74 
75 	return 0;
76 
77 free_and_exit:
78 	list_for_each_entry_safe(ex, tmp, dest, list) {
79 		list_del(&ex->list);
80 		kfree(ex);
81 	}
82 	return -ENOMEM;
83 }
84 
dev_exceptions_move(struct list_head * dest,struct list_head * orig)85 static void dev_exceptions_move(struct list_head *dest, struct list_head *orig)
86 {
87 	struct dev_exception_item *ex, *tmp;
88 
89 	lockdep_assert_held(&devcgroup_mutex);
90 
91 	list_for_each_entry_safe(ex, tmp, orig, list) {
92 		list_move_tail(&ex->list, dest);
93 	}
94 }
95 
96 /*
97  * called under devcgroup_mutex
98  */
dev_exception_add(struct dev_cgroup * dev_cgroup,struct dev_exception_item * ex)99 static int dev_exception_add(struct dev_cgroup *dev_cgroup,
100 			     struct dev_exception_item *ex)
101 {
102 	struct dev_exception_item *excopy, *walk;
103 
104 	lockdep_assert_held(&devcgroup_mutex);
105 
106 	excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
107 	if (!excopy)
108 		return -ENOMEM;
109 
110 	list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
111 		if (walk->type != ex->type)
112 			continue;
113 		if (walk->major != ex->major)
114 			continue;
115 		if (walk->minor != ex->minor)
116 			continue;
117 
118 		walk->access |= ex->access;
119 		kfree(excopy);
120 		excopy = NULL;
121 	}
122 
123 	if (excopy != NULL)
124 		list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
125 	return 0;
126 }
127 
128 /*
129  * called under devcgroup_mutex
130  */
dev_exception_rm(struct dev_cgroup * dev_cgroup,struct dev_exception_item * ex)131 static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
132 			     struct dev_exception_item *ex)
133 {
134 	struct dev_exception_item *walk, *tmp;
135 
136 	lockdep_assert_held(&devcgroup_mutex);
137 
138 	list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
139 		if (walk->type != ex->type)
140 			continue;
141 		if (walk->major != ex->major)
142 			continue;
143 		if (walk->minor != ex->minor)
144 			continue;
145 
146 		walk->access &= ~ex->access;
147 		if (!walk->access) {
148 			list_del_rcu(&walk->list);
149 			kfree_rcu(walk, rcu);
150 		}
151 	}
152 }
153 
__dev_exception_clean(struct dev_cgroup * dev_cgroup)154 static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
155 {
156 	struct dev_exception_item *ex, *tmp;
157 
158 	list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
159 		list_del_rcu(&ex->list);
160 		kfree_rcu(ex, rcu);
161 	}
162 }
163 
164 /**
165  * dev_exception_clean - frees all entries of the exception list
166  * @dev_cgroup: dev_cgroup with the exception list to be cleaned
167  *
168  * called under devcgroup_mutex
169  */
dev_exception_clean(struct dev_cgroup * dev_cgroup)170 static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
171 {
172 	lockdep_assert_held(&devcgroup_mutex);
173 
174 	__dev_exception_clean(dev_cgroup);
175 }
176 
is_devcg_online(const struct dev_cgroup * devcg)177 static inline bool is_devcg_online(const struct dev_cgroup *devcg)
178 {
179 	return (devcg->behavior != DEVCG_DEFAULT_NONE);
180 }
181 
182 /**
183  * devcgroup_online - initializes devcgroup's behavior and exceptions based on
184  * 		      parent's
185  * @css: css getting online
186  * returns 0 in case of success, error code otherwise
187  */
devcgroup_online(struct cgroup_subsys_state * css)188 static int devcgroup_online(struct cgroup_subsys_state *css)
189 {
190 	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
191 	struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent);
192 	int ret = 0;
193 
194 	mutex_lock(&devcgroup_mutex);
195 
196 	if (parent_dev_cgroup == NULL)
197 		dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
198 	else {
199 		ret = dev_exceptions_copy(&dev_cgroup->exceptions,
200 					  &parent_dev_cgroup->exceptions);
201 		if (!ret)
202 			dev_cgroup->behavior = parent_dev_cgroup->behavior;
203 	}
204 	mutex_unlock(&devcgroup_mutex);
205 
206 	return ret;
207 }
208 
devcgroup_offline(struct cgroup_subsys_state * css)209 static void devcgroup_offline(struct cgroup_subsys_state *css)
210 {
211 	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
212 
213 	mutex_lock(&devcgroup_mutex);
214 	dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
215 	mutex_unlock(&devcgroup_mutex);
216 }
217 
218 /*
219  * called from kernel/cgroup/cgroup.c with cgroup_lock() held.
220  */
221 static struct cgroup_subsys_state *
devcgroup_css_alloc(struct cgroup_subsys_state * parent_css)222 devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
223 {
224 	struct dev_cgroup *dev_cgroup;
225 
226 	dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
227 	if (!dev_cgroup)
228 		return ERR_PTR(-ENOMEM);
229 	INIT_LIST_HEAD(&dev_cgroup->exceptions);
230 	dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
231 
232 	return &dev_cgroup->css;
233 }
234 
devcgroup_css_free(struct cgroup_subsys_state * css)235 static void devcgroup_css_free(struct cgroup_subsys_state *css)
236 {
237 	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
238 
239 	__dev_exception_clean(dev_cgroup);
240 	kfree(dev_cgroup);
241 }
242 
243 #define DEVCG_ALLOW 1
244 #define DEVCG_DENY 2
245 #define DEVCG_LIST 3
246 
seq_putaccess(struct seq_file * m,short access)247 static void seq_putaccess(struct seq_file *m, short access)
248 {
249 	if (access & DEVCG_ACC_READ)
250 		seq_putc(m, 'r');
251 	if (access & DEVCG_ACC_WRITE)
252 		seq_putc(m, 'w');
253 	if (access & DEVCG_ACC_MKNOD)
254 		seq_putc(m, 'm');
255 }
256 
seq_puttype(struct seq_file * m,short type)257 static void seq_puttype(struct seq_file *m, short type)
258 {
259 	if (type == DEVCG_DEV_ALL)
260 		seq_putc(m, 'a');
261 	else if (type == DEVCG_DEV_CHAR)
262 		seq_putc(m, 'c');
263 	else if (type == DEVCG_DEV_BLOCK)
264 		seq_putc(m, 'b');
265 	else
266 		seq_putc(m, 'X');
267 }
268 
seq_putversion(struct seq_file * m,unsigned int version)269 static void seq_putversion(struct seq_file *m, unsigned int version)
270 {
271 	if (version == ~0)
272 		seq_putc(m, '*');
273 	else
274 		seq_printf(m, "%u", version);
275 }
276 
devcgroup_seq_show(struct seq_file * m,void * v)277 static int devcgroup_seq_show(struct seq_file *m, void *v)
278 {
279 	struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m));
280 	struct dev_exception_item *ex;
281 
282 	rcu_read_lock();
283 	/*
284 	 * To preserve the compatibility:
285 	 * - Only show the "all devices" when the default policy is to allow
286 	 * - List the exceptions in case the default policy is to deny
287 	 * This way, the file remains as a "whitelist of devices"
288 	 */
289 	if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
290 		seq_puts(m, "a *:* rwm\n");
291 	} else {
292 		list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
293 			seq_puttype(m, ex->type);
294 			seq_putc(m, ' ');
295 			seq_putversion(m, ex->major);
296 			seq_putc(m, ':');
297 			seq_putversion(m, ex->minor);
298 			seq_putc(m, ' ');
299 			seq_putaccess(m, ex->access);
300 			seq_putc(m, '\n');
301 		}
302 	}
303 	rcu_read_unlock();
304 
305 	return 0;
306 }
307 
308 /**
309  * match_exception	- iterates the exception list trying to find a complete match
310  * @exceptions: list of exceptions
311  * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR)
312  * @major: device file major number, ~0 to match all
313  * @minor: device file minor number, ~0 to match all
314  * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD)
315  *
316  * It is considered a complete match if an exception is found that will
317  * contain the entire range of provided parameters.
318  *
319  * Return: true in case it matches an exception completely
320  */
match_exception(struct list_head * exceptions,short type,u32 major,u32 minor,short access)321 static bool match_exception(struct list_head *exceptions, short type,
322 			    u32 major, u32 minor, short access)
323 {
324 	struct dev_exception_item *ex;
325 
326 	list_for_each_entry_rcu(ex, exceptions, list) {
327 		if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
328 			continue;
329 		if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
330 			continue;
331 		if (ex->major != ~0 && ex->major != major)
332 			continue;
333 		if (ex->minor != ~0 && ex->minor != minor)
334 			continue;
335 		/* provided access cannot have more than the exception rule */
336 		if (access & (~ex->access))
337 			continue;
338 		return true;
339 	}
340 	return false;
341 }
342 
343 /**
344  * match_exception_partial - iterates the exception list trying to find a partial match
345  * @exceptions: list of exceptions
346  * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR)
347  * @major: device file major number, ~0 to match all
348  * @minor: device file minor number, ~0 to match all
349  * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD)
350  *
351  * It is considered a partial match if an exception's range is found to
352  * contain *any* of the devices specified by provided parameters. This is
353  * used to make sure no extra access is being granted that is forbidden by
354  * any of the exception list.
355  *
356  * Return: true in case the provided range mat matches an exception completely
357  */
match_exception_partial(struct list_head * exceptions,short type,u32 major,u32 minor,short access)358 static bool match_exception_partial(struct list_head *exceptions, short type,
359 				    u32 major, u32 minor, short access)
360 {
361 	struct dev_exception_item *ex;
362 
363 	list_for_each_entry_rcu(ex, exceptions, list,
364 				lockdep_is_held(&devcgroup_mutex)) {
365 		if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
366 			continue;
367 		if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
368 			continue;
369 		/*
370 		 * We must be sure that both the exception and the provided
371 		 * range aren't masking all devices
372 		 */
373 		if (ex->major != ~0 && major != ~0 && ex->major != major)
374 			continue;
375 		if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
376 			continue;
377 		/*
378 		 * In order to make sure the provided range isn't matching
379 		 * an exception, all its access bits shouldn't match the
380 		 * exception's access bits
381 		 */
382 		if (!(access & ex->access))
383 			continue;
384 		return true;
385 	}
386 	return false;
387 }
388 
389 /**
390  * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
391  * @dev_cgroup: dev cgroup to be tested against
392  * @refex: new exception
393  * @behavior: behavior of the exception's dev_cgroup
394  *
395  * This is used to make sure a child cgroup won't have more privileges
396  * than its parent
397  */
verify_new_ex(struct dev_cgroup * dev_cgroup,struct dev_exception_item * refex,enum devcg_behavior behavior)398 static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
399 		          struct dev_exception_item *refex,
400 		          enum devcg_behavior behavior)
401 {
402 	bool match = false;
403 
404 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
405 			 !lockdep_is_held(&devcgroup_mutex),
406 			 "device_cgroup:verify_new_ex called without proper synchronization");
407 
408 	if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
409 		if (behavior == DEVCG_DEFAULT_ALLOW) {
410 			/*
411 			 * new exception in the child doesn't matter, only
412 			 * adding extra restrictions
413 			 */
414 			return true;
415 		} else {
416 			/*
417 			 * new exception in the child will add more devices
418 			 * that can be accessed, so it can't match any of
419 			 * parent's exceptions, even slightly
420 			 */
421 			match = match_exception_partial(&dev_cgroup->exceptions,
422 							refex->type,
423 							refex->major,
424 							refex->minor,
425 							refex->access);
426 
427 			if (match)
428 				return false;
429 			return true;
430 		}
431 	} else {
432 		/*
433 		 * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
434 		 * the new exception will add access to more devices and must
435 		 * be contained completely in an parent's exception to be
436 		 * allowed
437 		 */
438 		match = match_exception(&dev_cgroup->exceptions, refex->type,
439 					refex->major, refex->minor,
440 					refex->access);
441 
442 		if (match)
443 			/* parent has an exception that matches the proposed */
444 			return true;
445 		else
446 			return false;
447 	}
448 	return false;
449 }
450 
451 /*
452  * parent_has_perm:
453  * when adding a new allow rule to a device exception list, the rule
454  * must be allowed in the parent device
455  */
parent_has_perm(struct dev_cgroup * childcg,struct dev_exception_item * ex)456 static int parent_has_perm(struct dev_cgroup *childcg,
457 				  struct dev_exception_item *ex)
458 {
459 	struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
460 
461 	if (!parent)
462 		return 1;
463 	return verify_new_ex(parent, ex, childcg->behavior);
464 }
465 
466 /**
467  * parent_allows_removal - verify if it's ok to remove an exception
468  * @childcg: child cgroup from where the exception will be removed
469  * @ex: exception being removed
470  *
471  * When removing an exception in cgroups with default ALLOW policy, it must
472  * be checked if removing it will give the child cgroup more access than the
473  * parent.
474  *
475  * Return: true if it's ok to remove exception, false otherwise
476  */
parent_allows_removal(struct dev_cgroup * childcg,struct dev_exception_item * ex)477 static bool parent_allows_removal(struct dev_cgroup *childcg,
478 				  struct dev_exception_item *ex)
479 {
480 	struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
481 
482 	if (!parent)
483 		return true;
484 
485 	/* It's always allowed to remove access to devices */
486 	if (childcg->behavior == DEVCG_DEFAULT_DENY)
487 		return true;
488 
489 	/*
490 	 * Make sure you're not removing part or a whole exception existing in
491 	 * the parent cgroup
492 	 */
493 	return !match_exception_partial(&parent->exceptions, ex->type,
494 					ex->major, ex->minor, ex->access);
495 }
496 
497 /**
498  * may_allow_all - checks if it's possible to change the behavior to
499  *		   allow based on parent's rules.
500  * @parent: device cgroup's parent
501  * returns: != 0 in case it's allowed, 0 otherwise
502  */
may_allow_all(struct dev_cgroup * parent)503 static inline int may_allow_all(struct dev_cgroup *parent)
504 {
505 	if (!parent)
506 		return 1;
507 	return parent->behavior == DEVCG_DEFAULT_ALLOW;
508 }
509 
510 /**
511  * revalidate_active_exceptions - walks through the active exception list and
512  * 				  revalidates the exceptions based on parent's
513  * 				  behavior and exceptions. The exceptions that
514  * 				  are no longer valid will be removed.
515  * 				  Called with devcgroup_mutex held.
516  * @devcg: cgroup which exceptions will be checked
517  *
518  * This is one of the three key functions for hierarchy implementation.
519  * This function is responsible for re-evaluating all the cgroup's active
520  * exceptions due to a parent's exception change.
521  * Refer to Documentation/admin-guide/cgroup-v1/devices.rst for more details.
522  */
revalidate_active_exceptions(struct dev_cgroup * devcg)523 static void revalidate_active_exceptions(struct dev_cgroup *devcg)
524 {
525 	struct dev_exception_item *ex;
526 	struct list_head *this, *tmp;
527 
528 	list_for_each_safe(this, tmp, &devcg->exceptions) {
529 		ex = container_of(this, struct dev_exception_item, list);
530 		if (!parent_has_perm(devcg, ex))
531 			dev_exception_rm(devcg, ex);
532 	}
533 }
534 
535 /**
536  * propagate_exception - propagates a new exception to the children
537  * @devcg_root: device cgroup that added a new exception
538  * @ex: new exception to be propagated
539  *
540  * returns: 0 in case of success, != 0 in case of error
541  */
propagate_exception(struct dev_cgroup * devcg_root,struct dev_exception_item * ex)542 static int propagate_exception(struct dev_cgroup *devcg_root,
543 			       struct dev_exception_item *ex)
544 {
545 	struct cgroup_subsys_state *pos;
546 	int rc = 0;
547 
548 	rcu_read_lock();
549 
550 	css_for_each_descendant_pre(pos, &devcg_root->css) {
551 		struct dev_cgroup *devcg = css_to_devcgroup(pos);
552 
553 		/*
554 		 * Because devcgroup_mutex is held, no devcg will become
555 		 * online or offline during the tree walk (see on/offline
556 		 * methods), and online ones are safe to access outside RCU
557 		 * read lock without bumping refcnt.
558 		 */
559 		if (pos == &devcg_root->css || !is_devcg_online(devcg))
560 			continue;
561 
562 		rcu_read_unlock();
563 
564 		/*
565 		 * in case both root's behavior and devcg is allow, a new
566 		 * restriction means adding to the exception list
567 		 */
568 		if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
569 		    devcg->behavior == DEVCG_DEFAULT_ALLOW) {
570 			rc = dev_exception_add(devcg, ex);
571 			if (rc)
572 				return rc;
573 		} else {
574 			/*
575 			 * in the other possible cases:
576 			 * root's behavior: allow, devcg's: deny
577 			 * root's behavior: deny, devcg's: deny
578 			 * the exception will be removed
579 			 */
580 			dev_exception_rm(devcg, ex);
581 		}
582 		revalidate_active_exceptions(devcg);
583 
584 		rcu_read_lock();
585 	}
586 
587 	rcu_read_unlock();
588 	return rc;
589 }
590 
591 /*
592  * Modify the exception list using allow/deny rules.
593  * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
594  * so we can give a container CAP_MKNOD to let it create devices but not
595  * modify the exception list.
596  * It seems likely we'll want to add a CAP_CONTAINER capability to allow
597  * us to also grant CAP_SYS_ADMIN to containers without giving away the
598  * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
599  *
600  * Taking rules away is always allowed (given CAP_SYS_ADMIN).  Granting
601  * new access is only allowed if you're in the top-level cgroup, or your
602  * parent cgroup has the access you're asking for.
603  */
devcgroup_update_access(struct dev_cgroup * devcgroup,int filetype,char * buffer)604 static int devcgroup_update_access(struct dev_cgroup *devcgroup,
605 				   int filetype, char *buffer)
606 {
607 	const char *b;
608 	char temp[12];		/* 11 + 1 characters needed for a u32 */
609 	int count, rc = 0;
610 	struct dev_exception_item ex;
611 	struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
612 	struct dev_cgroup tmp_devcgrp;
613 
614 	if (!capable(CAP_SYS_ADMIN))
615 		return -EPERM;
616 
617 	memset(&ex, 0, sizeof(ex));
618 	memset(&tmp_devcgrp, 0, sizeof(tmp_devcgrp));
619 	b = buffer;
620 
621 	switch (*b) {
622 	case 'a':
623 		switch (filetype) {
624 		case DEVCG_ALLOW:
625 			if (css_has_online_children(&devcgroup->css))
626 				return -EINVAL;
627 
628 			if (!may_allow_all(parent))
629 				return -EPERM;
630 			if (!parent) {
631 				devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
632 				dev_exception_clean(devcgroup);
633 				break;
634 			}
635 
636 			INIT_LIST_HEAD(&tmp_devcgrp.exceptions);
637 			rc = dev_exceptions_copy(&tmp_devcgrp.exceptions,
638 						 &devcgroup->exceptions);
639 			if (rc)
640 				return rc;
641 			dev_exception_clean(devcgroup);
642 			rc = dev_exceptions_copy(&devcgroup->exceptions,
643 						 &parent->exceptions);
644 			if (rc) {
645 				dev_exceptions_move(&devcgroup->exceptions,
646 						    &tmp_devcgrp.exceptions);
647 				return rc;
648 			}
649 			devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
650 			dev_exception_clean(&tmp_devcgrp);
651 			break;
652 		case DEVCG_DENY:
653 			if (css_has_online_children(&devcgroup->css))
654 				return -EINVAL;
655 
656 			dev_exception_clean(devcgroup);
657 			devcgroup->behavior = DEVCG_DEFAULT_DENY;
658 			break;
659 		default:
660 			return -EINVAL;
661 		}
662 		return 0;
663 	case 'b':
664 		ex.type = DEVCG_DEV_BLOCK;
665 		break;
666 	case 'c':
667 		ex.type = DEVCG_DEV_CHAR;
668 		break;
669 	default:
670 		return -EINVAL;
671 	}
672 	b++;
673 	if (!isspace(*b))
674 		return -EINVAL;
675 	b++;
676 	if (*b == '*') {
677 		ex.major = ~0;
678 		b++;
679 	} else if (isdigit(*b)) {
680 		memset(temp, 0, sizeof(temp));
681 		for (count = 0; count < sizeof(temp) - 1; count++) {
682 			temp[count] = *b;
683 			b++;
684 			if (!isdigit(*b))
685 				break;
686 		}
687 		rc = kstrtou32(temp, 10, &ex.major);
688 		if (rc)
689 			return -EINVAL;
690 	} else {
691 		return -EINVAL;
692 	}
693 	if (*b != ':')
694 		return -EINVAL;
695 	b++;
696 
697 	/* read minor */
698 	if (*b == '*') {
699 		ex.minor = ~0;
700 		b++;
701 	} else if (isdigit(*b)) {
702 		memset(temp, 0, sizeof(temp));
703 		for (count = 0; count < sizeof(temp) - 1; count++) {
704 			temp[count] = *b;
705 			b++;
706 			if (!isdigit(*b))
707 				break;
708 		}
709 		rc = kstrtou32(temp, 10, &ex.minor);
710 		if (rc)
711 			return -EINVAL;
712 	} else {
713 		return -EINVAL;
714 	}
715 	if (!isspace(*b))
716 		return -EINVAL;
717 	for (b++, count = 0; count < 3; count++, b++) {
718 		switch (*b) {
719 		case 'r':
720 			ex.access |= DEVCG_ACC_READ;
721 			break;
722 		case 'w':
723 			ex.access |= DEVCG_ACC_WRITE;
724 			break;
725 		case 'm':
726 			ex.access |= DEVCG_ACC_MKNOD;
727 			break;
728 		case '\n':
729 		case '\0':
730 			count = 3;
731 			break;
732 		default:
733 			return -EINVAL;
734 		}
735 	}
736 
737 	switch (filetype) {
738 	case DEVCG_ALLOW:
739 		/*
740 		 * If the default policy is to allow by default, try to remove
741 		 * an matching exception instead. And be silent about it: we
742 		 * don't want to break compatibility
743 		 */
744 		if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
745 			/* Check if the parent allows removing it first */
746 			if (!parent_allows_removal(devcgroup, &ex))
747 				return -EPERM;
748 			dev_exception_rm(devcgroup, &ex);
749 			break;
750 		}
751 
752 		if (!parent_has_perm(devcgroup, &ex))
753 			return -EPERM;
754 		rc = dev_exception_add(devcgroup, &ex);
755 		break;
756 	case DEVCG_DENY:
757 		/*
758 		 * If the default policy is to deny by default, try to remove
759 		 * an matching exception instead. And be silent about it: we
760 		 * don't want to break compatibility
761 		 */
762 		if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
763 			dev_exception_rm(devcgroup, &ex);
764 		else
765 			rc = dev_exception_add(devcgroup, &ex);
766 
767 		if (rc)
768 			break;
769 		/* we only propagate new restrictions */
770 		rc = propagate_exception(devcgroup, &ex);
771 		break;
772 	default:
773 		rc = -EINVAL;
774 	}
775 	return rc;
776 }
777 
devcgroup_access_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)778 static ssize_t devcgroup_access_write(struct kernfs_open_file *of,
779 				      char *buf, size_t nbytes, loff_t off)
780 {
781 	int retval;
782 
783 	mutex_lock(&devcgroup_mutex);
784 	retval = devcgroup_update_access(css_to_devcgroup(of_css(of)),
785 					 of_cft(of)->private, strstrip(buf));
786 	mutex_unlock(&devcgroup_mutex);
787 	return retval ?: nbytes;
788 }
789 
790 static struct cftype dev_cgroup_files[] = {
791 	{
792 		.name = "allow",
793 		.write = devcgroup_access_write,
794 		.private = DEVCG_ALLOW,
795 	},
796 	{
797 		.name = "deny",
798 		.write = devcgroup_access_write,
799 		.private = DEVCG_DENY,
800 	},
801 	{
802 		.name = "list",
803 		.seq_show = devcgroup_seq_show,
804 		.private = DEVCG_LIST,
805 	},
806 	{ }	/* terminate */
807 };
808 
809 struct cgroup_subsys devices_cgrp_subsys = {
810 	.css_alloc = devcgroup_css_alloc,
811 	.css_free = devcgroup_css_free,
812 	.css_online = devcgroup_online,
813 	.css_offline = devcgroup_offline,
814 	.legacy_cftypes = dev_cgroup_files,
815 };
816 
817 /**
818  * devcgroup_legacy_check_permission - checks if an inode operation is permitted
819  * @type: device type
820  * @major: device major number
821  * @minor: device minor number
822  * @access: combination of DEVCG_ACC_WRITE, DEVCG_ACC_READ and DEVCG_ACC_MKNOD
823  *
824  * returns 0 on success, -EPERM case the operation is not permitted
825  */
devcgroup_legacy_check_permission(short type,u32 major,u32 minor,short access)826 static int devcgroup_legacy_check_permission(short type, u32 major, u32 minor,
827 					short access)
828 {
829 	struct dev_cgroup *dev_cgroup;
830 	bool rc;
831 
832 	rcu_read_lock();
833 	dev_cgroup = task_devcgroup(current);
834 	if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
835 		/* Can't match any of the exceptions, even partially */
836 		rc = !match_exception_partial(&dev_cgroup->exceptions,
837 					      type, major, minor, access);
838 	else
839 		/* Need to match completely one exception to be allowed */
840 		rc = match_exception(&dev_cgroup->exceptions, type, major,
841 				     minor, access);
842 	rcu_read_unlock();
843 
844 	if (!rc)
845 		return -EPERM;
846 
847 	return 0;
848 }
849 
850 #endif /* CONFIG_CGROUP_DEVICE */
851 
852 #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
853 
devcgroup_check_permission(short type,u32 major,u32 minor,short access)854 int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
855 {
856 	int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
857 
858 	if (rc)
859 		return rc;
860 
861 	#ifdef CONFIG_CGROUP_DEVICE
862 	return devcgroup_legacy_check_permission(type, major, minor, access);
863 
864 	#else /* CONFIG_CGROUP_DEVICE */
865 	return 0;
866 
867 	#endif /* CONFIG_CGROUP_DEVICE */
868 }
869 EXPORT_SYMBOL(devcgroup_check_permission);
870 #endif /* defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) */
871