xref: /linux/fs/cachefiles/daemon.c (revision 1623bc27a85a93e82194c8d077eccc464efa67db)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Daemon interface
3  *
4  * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/sched.h>
11 #include <linux/completion.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/file.h>
15 #include <linux/namei.h>
16 #include <linux/poll.h>
17 #include <linux/mount.h>
18 #include <linux/security.h>
19 #include <linux/statfs.h>
20 #include <linux/ctype.h>
21 #include <linux/string.h>
22 #include <linux/fs_struct.h>
23 #include "internal.h"
24 
25 static int cachefiles_daemon_open(struct inode *, struct file *);
26 static int cachefiles_daemon_release(struct inode *, struct file *);
27 static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
28 				      loff_t *);
29 static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
30 				       size_t, loff_t *);
31 static __poll_t cachefiles_daemon_poll(struct file *,
32 					   struct poll_table_struct *);
33 static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
34 static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
35 static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *);
36 static int cachefiles_daemon_brun(struct cachefiles_cache *, char *);
37 static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *);
38 static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *);
39 static int cachefiles_daemon_cull(struct cachefiles_cache *, char *);
40 static int cachefiles_daemon_debug(struct cachefiles_cache *, char *);
41 static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
42 static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
43 static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
44 static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
45 static int cachefiles_daemon_bind(struct cachefiles_cache *, char *);
46 static void cachefiles_daemon_unbind(struct cachefiles_cache *);
47 
48 static unsigned long cachefiles_open;
49 
50 const struct file_operations cachefiles_daemon_fops = {
51 	.owner		= THIS_MODULE,
52 	.open		= cachefiles_daemon_open,
53 	.release	= cachefiles_daemon_release,
54 	.read		= cachefiles_daemon_read,
55 	.write		= cachefiles_daemon_write,
56 	.poll		= cachefiles_daemon_poll,
57 	.llseek		= noop_llseek,
58 };
59 
60 struct cachefiles_daemon_cmd {
61 	char name[8];
62 	int (*handler)(struct cachefiles_cache *cache, char *args);
63 };
64 
65 static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
66 	{ "bind",	cachefiles_daemon_bind		},
67 	{ "brun",	cachefiles_daemon_brun		},
68 	{ "bcull",	cachefiles_daemon_bcull		},
69 	{ "bstop",	cachefiles_daemon_bstop		},
70 	{ "cull",	cachefiles_daemon_cull		},
71 	{ "debug",	cachefiles_daemon_debug		},
72 	{ "dir",	cachefiles_daemon_dir		},
73 	{ "frun",	cachefiles_daemon_frun		},
74 	{ "fcull",	cachefiles_daemon_fcull		},
75 	{ "fstop",	cachefiles_daemon_fstop		},
76 	{ "inuse",	cachefiles_daemon_inuse		},
77 	{ "secctx",	cachefiles_daemon_secctx	},
78 	{ "tag",	cachefiles_daemon_tag		},
79 #ifdef CONFIG_CACHEFILES_ONDEMAND
80 	{ "copen",	cachefiles_ondemand_copen	},
81 	{ "restore",	cachefiles_ondemand_restore	},
82 #endif
83 	{ "",		NULL				}
84 };
85 
86 
87 /*
88  * Prepare a cache for caching.
89  */
90 static int cachefiles_daemon_open(struct inode *inode, struct file *file)
91 {
92 	struct cachefiles_cache *cache;
93 
94 	_enter("");
95 
96 	/* only the superuser may do this */
97 	if (!capable(CAP_SYS_ADMIN))
98 		return -EPERM;
99 
100 	/* the cachefiles device may only be open once at a time */
101 	if (xchg(&cachefiles_open, 1) == 1)
102 		return -EBUSY;
103 
104 	/* allocate a cache record */
105 	cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL);
106 	if (!cache) {
107 		cachefiles_open = 0;
108 		return -ENOMEM;
109 	}
110 
111 	mutex_init(&cache->daemon_mutex);
112 	init_waitqueue_head(&cache->daemon_pollwq);
113 	INIT_LIST_HEAD(&cache->volumes);
114 	INIT_LIST_HEAD(&cache->object_list);
115 	spin_lock_init(&cache->object_list_lock);
116 	refcount_set(&cache->unbind_pincount, 1);
117 	xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC);
118 	xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1);
119 
120 	/* set default caching limits
121 	 * - limit at 1% free space and/or free files
122 	 * - cull below 5% free space and/or free files
123 	 * - cease culling above 7% free space and/or free files
124 	 */
125 	cache->frun_percent = 7;
126 	cache->fcull_percent = 5;
127 	cache->fstop_percent = 1;
128 	cache->brun_percent = 7;
129 	cache->bcull_percent = 5;
130 	cache->bstop_percent = 1;
131 
132 	file->private_data = cache;
133 	cache->cachefilesd = file;
134 	return 0;
135 }
136 
137 void cachefiles_flush_reqs(struct cachefiles_cache *cache)
138 {
139 	struct xarray *xa = &cache->reqs;
140 	struct cachefiles_req *req;
141 	unsigned long index;
142 
143 	/*
144 	 * Make sure the following two operations won't be reordered.
145 	 *   1) set CACHEFILES_DEAD bit
146 	 *   2) flush requests in the xarray
147 	 * Otherwise the request may be enqueued after xarray has been
148 	 * flushed, leaving the orphan request never being completed.
149 	 *
150 	 * CPU 1			CPU 2
151 	 * =====			=====
152 	 * flush requests in the xarray
153 	 *				test CACHEFILES_DEAD bit
154 	 *				enqueue the request
155 	 * set CACHEFILES_DEAD bit
156 	 */
157 	smp_mb();
158 
159 	xa_lock(xa);
160 	xa_for_each(xa, index, req) {
161 		req->error = -EIO;
162 		complete(&req->done);
163 		__xa_erase(xa, index);
164 	}
165 	xa_unlock(xa);
166 
167 	xa_destroy(&cache->reqs);
168 	xa_destroy(&cache->ondemand_ids);
169 }
170 
171 void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache)
172 {
173 	if (refcount_dec_and_test(&cache->unbind_pincount)) {
174 		cachefiles_daemon_unbind(cache);
175 		cachefiles_open = 0;
176 		kfree(cache);
177 	}
178 }
179 
180 void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache)
181 {
182 	refcount_inc(&cache->unbind_pincount);
183 }
184 
185 /*
186  * Release a cache.
187  */
188 static int cachefiles_daemon_release(struct inode *inode, struct file *file)
189 {
190 	struct cachefiles_cache *cache = file->private_data;
191 
192 	_enter("");
193 
194 	ASSERT(cache);
195 
196 	set_bit(CACHEFILES_DEAD, &cache->flags);
197 
198 	if (cachefiles_in_ondemand_mode(cache))
199 		cachefiles_flush_reqs(cache);
200 
201 	/* clean up the control file interface */
202 	cache->cachefilesd = NULL;
203 	file->private_data = NULL;
204 
205 	cachefiles_put_unbind_pincount(cache);
206 
207 	_leave("");
208 	return 0;
209 }
210 
211 static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache,
212 					 char __user *_buffer, size_t buflen)
213 {
214 	unsigned long long b_released;
215 	unsigned f_released;
216 	char buffer[256];
217 	int n;
218 
219 	/* check how much space the cache has */
220 	cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
221 
222 	/* summarise */
223 	f_released = atomic_xchg(&cache->f_released, 0);
224 	b_released = atomic_long_xchg(&cache->b_released, 0);
225 	clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
226 
227 	n = snprintf(buffer, sizeof(buffer),
228 		     "cull=%c"
229 		     " frun=%llx"
230 		     " fcull=%llx"
231 		     " fstop=%llx"
232 		     " brun=%llx"
233 		     " bcull=%llx"
234 		     " bstop=%llx"
235 		     " freleased=%x"
236 		     " breleased=%llx",
237 		     test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0',
238 		     (unsigned long long) cache->frun,
239 		     (unsigned long long) cache->fcull,
240 		     (unsigned long long) cache->fstop,
241 		     (unsigned long long) cache->brun,
242 		     (unsigned long long) cache->bcull,
243 		     (unsigned long long) cache->bstop,
244 		     f_released,
245 		     b_released);
246 
247 	if (n > buflen)
248 		return -EMSGSIZE;
249 
250 	if (copy_to_user(_buffer, buffer, n) != 0)
251 		return -EFAULT;
252 
253 	return n;
254 }
255 
256 /*
257  * Read the cache state.
258  */
259 static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
260 				      size_t buflen, loff_t *pos)
261 {
262 	struct cachefiles_cache *cache = file->private_data;
263 
264 	//_enter(",,%zu,", buflen);
265 
266 	if (!test_bit(CACHEFILES_READY, &cache->flags))
267 		return 0;
268 
269 	if (cachefiles_in_ondemand_mode(cache))
270 		return cachefiles_ondemand_daemon_read(cache, _buffer, buflen);
271 	else
272 		return cachefiles_do_daemon_read(cache, _buffer, buflen);
273 }
274 
275 /*
276  * Take a command from cachefilesd, parse it and act on it.
277  */
278 static ssize_t cachefiles_daemon_write(struct file *file,
279 				       const char __user *_data,
280 				       size_t datalen,
281 				       loff_t *pos)
282 {
283 	const struct cachefiles_daemon_cmd *cmd;
284 	struct cachefiles_cache *cache = file->private_data;
285 	ssize_t ret;
286 	char *data, *args, *cp;
287 
288 	//_enter(",,%zu,", datalen);
289 
290 	ASSERT(cache);
291 
292 	if (test_bit(CACHEFILES_DEAD, &cache->flags))
293 		return -EIO;
294 
295 	if (datalen > PAGE_SIZE - 1)
296 		return -EOPNOTSUPP;
297 
298 	/* drag the command string into the kernel so we can parse it */
299 	data = memdup_user_nul(_data, datalen);
300 	if (IS_ERR(data))
301 		return PTR_ERR(data);
302 
303 	ret = -EINVAL;
304 	if (memchr(data, '\0', datalen))
305 		goto error;
306 
307 	/* strip any newline */
308 	cp = memchr(data, '\n', datalen);
309 	if (cp) {
310 		if (cp == data)
311 			goto error;
312 
313 		*cp = '\0';
314 	}
315 
316 	/* parse the command */
317 	ret = -EOPNOTSUPP;
318 
319 	for (args = data; *args; args++)
320 		if (isspace(*args))
321 			break;
322 	if (*args) {
323 		if (args == data)
324 			goto error;
325 		*args = '\0';
326 		args = skip_spaces(++args);
327 	}
328 
329 	/* run the appropriate command handler */
330 	for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++)
331 		if (strcmp(cmd->name, data) == 0)
332 			goto found_command;
333 
334 error:
335 	kfree(data);
336 	//_leave(" = %zd", ret);
337 	return ret;
338 
339 found_command:
340 	mutex_lock(&cache->daemon_mutex);
341 
342 	ret = -EIO;
343 	if (!test_bit(CACHEFILES_DEAD, &cache->flags))
344 		ret = cmd->handler(cache, args);
345 
346 	mutex_unlock(&cache->daemon_mutex);
347 
348 	if (ret == 0)
349 		ret = datalen;
350 	goto error;
351 }
352 
353 /*
354  * Poll for culling state
355  * - use EPOLLOUT to indicate culling state
356  */
357 static __poll_t cachefiles_daemon_poll(struct file *file,
358 					   struct poll_table_struct *poll)
359 {
360 	struct cachefiles_cache *cache = file->private_data;
361 	XA_STATE(xas, &cache->reqs, 0);
362 	struct cachefiles_req *req;
363 	__poll_t mask;
364 
365 	poll_wait(file, &cache->daemon_pollwq, poll);
366 	mask = 0;
367 
368 	if (cachefiles_in_ondemand_mode(cache)) {
369 		if (!xa_empty(&cache->reqs)) {
370 			xas_lock(&xas);
371 			xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
372 				if (!cachefiles_ondemand_is_reopening_read(req)) {
373 					mask |= EPOLLIN;
374 					break;
375 				}
376 			}
377 			xas_unlock(&xas);
378 		}
379 	} else {
380 		if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
381 			mask |= EPOLLIN;
382 	}
383 
384 	if (test_bit(CACHEFILES_CULLING, &cache->flags))
385 		mask |= EPOLLOUT;
386 
387 	return mask;
388 }
389 
390 /*
391  * Give a range error for cache space constraints
392  * - can be tail-called
393  */
394 static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
395 					 char *args)
396 {
397 	pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
398 
399 	return -EINVAL;
400 }
401 
402 /*
403  * Set the percentage of files at which to stop culling
404  * - command: "frun <N>%"
405  */
406 static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
407 {
408 	unsigned long frun;
409 
410 	_enter(",%s", args);
411 
412 	if (!*args)
413 		return -EINVAL;
414 
415 	frun = simple_strtoul(args, &args, 10);
416 	if (args[0] != '%' || args[1] != '\0')
417 		return -EINVAL;
418 
419 	if (frun <= cache->fcull_percent || frun >= 100)
420 		return cachefiles_daemon_range_error(cache, args);
421 
422 	cache->frun_percent = frun;
423 	return 0;
424 }
425 
426 /*
427  * Set the percentage of files at which to start culling
428  * - command: "fcull <N>%"
429  */
430 static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
431 {
432 	unsigned long fcull;
433 
434 	_enter(",%s", args);
435 
436 	if (!*args)
437 		return -EINVAL;
438 
439 	fcull = simple_strtoul(args, &args, 10);
440 	if (args[0] != '%' || args[1] != '\0')
441 		return -EINVAL;
442 
443 	if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent)
444 		return cachefiles_daemon_range_error(cache, args);
445 
446 	cache->fcull_percent = fcull;
447 	return 0;
448 }
449 
450 /*
451  * Set the percentage of files at which to stop allocating
452  * - command: "fstop <N>%"
453  */
454 static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
455 {
456 	unsigned long fstop;
457 
458 	_enter(",%s", args);
459 
460 	if (!*args)
461 		return -EINVAL;
462 
463 	fstop = simple_strtoul(args, &args, 10);
464 	if (args[0] != '%' || args[1] != '\0')
465 		return -EINVAL;
466 
467 	if (fstop >= cache->fcull_percent)
468 		return cachefiles_daemon_range_error(cache, args);
469 
470 	cache->fstop_percent = fstop;
471 	return 0;
472 }
473 
474 /*
475  * Set the percentage of blocks at which to stop culling
476  * - command: "brun <N>%"
477  */
478 static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
479 {
480 	unsigned long brun;
481 
482 	_enter(",%s", args);
483 
484 	if (!*args)
485 		return -EINVAL;
486 
487 	brun = simple_strtoul(args, &args, 10);
488 	if (args[0] != '%' || args[1] != '\0')
489 		return -EINVAL;
490 
491 	if (brun <= cache->bcull_percent || brun >= 100)
492 		return cachefiles_daemon_range_error(cache, args);
493 
494 	cache->brun_percent = brun;
495 	return 0;
496 }
497 
498 /*
499  * Set the percentage of blocks at which to start culling
500  * - command: "bcull <N>%"
501  */
502 static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
503 {
504 	unsigned long bcull;
505 
506 	_enter(",%s", args);
507 
508 	if (!*args)
509 		return -EINVAL;
510 
511 	bcull = simple_strtoul(args, &args, 10);
512 	if (args[0] != '%' || args[1] != '\0')
513 		return -EINVAL;
514 
515 	if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent)
516 		return cachefiles_daemon_range_error(cache, args);
517 
518 	cache->bcull_percent = bcull;
519 	return 0;
520 }
521 
522 /*
523  * Set the percentage of blocks at which to stop allocating
524  * - command: "bstop <N>%"
525  */
526 static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
527 {
528 	unsigned long bstop;
529 
530 	_enter(",%s", args);
531 
532 	if (!*args)
533 		return -EINVAL;
534 
535 	bstop = simple_strtoul(args, &args, 10);
536 	if (args[0] != '%' || args[1] != '\0')
537 		return -EINVAL;
538 
539 	if (bstop >= cache->bcull_percent)
540 		return cachefiles_daemon_range_error(cache, args);
541 
542 	cache->bstop_percent = bstop;
543 	return 0;
544 }
545 
546 /*
547  * Set the cache directory
548  * - command: "dir <name>"
549  */
550 static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
551 {
552 	char *dir;
553 
554 	_enter(",%s", args);
555 
556 	if (!*args) {
557 		pr_err("Empty directory specified\n");
558 		return -EINVAL;
559 	}
560 
561 	if (cache->rootdirname) {
562 		pr_err("Second cache directory specified\n");
563 		return -EEXIST;
564 	}
565 
566 	dir = kstrdup(args, GFP_KERNEL);
567 	if (!dir)
568 		return -ENOMEM;
569 
570 	cache->rootdirname = dir;
571 	return 0;
572 }
573 
574 /*
575  * Set the cache security context
576  * - command: "secctx <ctx>"
577  */
578 static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
579 {
580 	int err;
581 
582 	_enter(",%s", args);
583 
584 	if (!*args) {
585 		pr_err("Empty security context specified\n");
586 		return -EINVAL;
587 	}
588 
589 	if (cache->have_secid) {
590 		pr_err("Second security context specified\n");
591 		return -EINVAL;
592 	}
593 
594 	err = security_secctx_to_secid(args, strlen(args), &cache->secid);
595 	if (err)
596 		return err;
597 
598 	cache->have_secid = true;
599 	return 0;
600 }
601 
602 /*
603  * Set the cache tag
604  * - command: "tag <name>"
605  */
606 static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
607 {
608 	char *tag;
609 
610 	_enter(",%s", args);
611 
612 	if (!*args) {
613 		pr_err("Empty tag specified\n");
614 		return -EINVAL;
615 	}
616 
617 	if (cache->tag)
618 		return -EEXIST;
619 
620 	tag = kstrdup(args, GFP_KERNEL);
621 	if (!tag)
622 		return -ENOMEM;
623 
624 	cache->tag = tag;
625 	return 0;
626 }
627 
628 /*
629  * Request a node in the cache be culled from the current working directory
630  * - command: "cull <name>"
631  */
632 static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
633 {
634 	struct path path;
635 	const struct cred *saved_cred;
636 	int ret;
637 
638 	_enter(",%s", args);
639 
640 	if (strchr(args, '/'))
641 		goto inval;
642 
643 	if (!test_bit(CACHEFILES_READY, &cache->flags)) {
644 		pr_err("cull applied to unready cache\n");
645 		return -EIO;
646 	}
647 
648 	if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
649 		pr_err("cull applied to dead cache\n");
650 		return -EIO;
651 	}
652 
653 	get_fs_pwd(current->fs, &path);
654 
655 	if (!d_can_lookup(path.dentry))
656 		goto notdir;
657 
658 	cachefiles_begin_secure(cache, &saved_cred);
659 	ret = cachefiles_cull(cache, path.dentry, args);
660 	cachefiles_end_secure(cache, saved_cred);
661 
662 	path_put(&path);
663 	_leave(" = %d", ret);
664 	return ret;
665 
666 notdir:
667 	path_put(&path);
668 	pr_err("cull command requires dirfd to be a directory\n");
669 	return -ENOTDIR;
670 
671 inval:
672 	pr_err("cull command requires dirfd and filename\n");
673 	return -EINVAL;
674 }
675 
676 /*
677  * Set debugging mode
678  * - command: "debug <mask>"
679  */
680 static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
681 {
682 	unsigned long mask;
683 
684 	_enter(",%s", args);
685 
686 	mask = simple_strtoul(args, &args, 0);
687 	if (args[0] != '\0')
688 		goto inval;
689 
690 	cachefiles_debug = mask;
691 	_leave(" = 0");
692 	return 0;
693 
694 inval:
695 	pr_err("debug command requires mask\n");
696 	return -EINVAL;
697 }
698 
699 /*
700  * Find out whether an object in the current working directory is in use or not
701  * - command: "inuse <name>"
702  */
703 static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
704 {
705 	struct path path;
706 	const struct cred *saved_cred;
707 	int ret;
708 
709 	//_enter(",%s", args);
710 
711 	if (strchr(args, '/'))
712 		goto inval;
713 
714 	if (!test_bit(CACHEFILES_READY, &cache->flags)) {
715 		pr_err("inuse applied to unready cache\n");
716 		return -EIO;
717 	}
718 
719 	if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
720 		pr_err("inuse applied to dead cache\n");
721 		return -EIO;
722 	}
723 
724 	get_fs_pwd(current->fs, &path);
725 
726 	if (!d_can_lookup(path.dentry))
727 		goto notdir;
728 
729 	cachefiles_begin_secure(cache, &saved_cred);
730 	ret = cachefiles_check_in_use(cache, path.dentry, args);
731 	cachefiles_end_secure(cache, saved_cred);
732 
733 	path_put(&path);
734 	//_leave(" = %d", ret);
735 	return ret;
736 
737 notdir:
738 	path_put(&path);
739 	pr_err("inuse command requires dirfd to be a directory\n");
740 	return -ENOTDIR;
741 
742 inval:
743 	pr_err("inuse command requires dirfd and filename\n");
744 	return -EINVAL;
745 }
746 
747 /*
748  * Bind a directory as a cache
749  */
750 static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
751 {
752 	_enter("{%u,%u,%u,%u,%u,%u},%s",
753 	       cache->frun_percent,
754 	       cache->fcull_percent,
755 	       cache->fstop_percent,
756 	       cache->brun_percent,
757 	       cache->bcull_percent,
758 	       cache->bstop_percent,
759 	       args);
760 
761 	if (cache->fstop_percent >= cache->fcull_percent ||
762 	    cache->fcull_percent >= cache->frun_percent ||
763 	    cache->frun_percent  >= 100)
764 		return -ERANGE;
765 
766 	if (cache->bstop_percent >= cache->bcull_percent ||
767 	    cache->bcull_percent >= cache->brun_percent ||
768 	    cache->brun_percent  >= 100)
769 		return -ERANGE;
770 
771 	if (!cache->rootdirname) {
772 		pr_err("No cache directory specified\n");
773 		return -EINVAL;
774 	}
775 
776 	/* Don't permit already bound caches to be re-bound */
777 	if (test_bit(CACHEFILES_READY, &cache->flags)) {
778 		pr_err("Cache already bound\n");
779 		return -EBUSY;
780 	}
781 
782 	if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) {
783 		if (!strcmp(args, "ondemand")) {
784 			set_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags);
785 		} else if (*args) {
786 			pr_err("Invalid argument to the 'bind' command\n");
787 			return -EINVAL;
788 		}
789 	} else if (*args) {
790 		pr_err("'bind' command doesn't take an argument\n");
791 		return -EINVAL;
792 	}
793 
794 	/* Make sure we have copies of the tag string */
795 	if (!cache->tag) {
796 		/*
797 		 * The tag string is released by the fops->release()
798 		 * function, so we don't release it on error here
799 		 */
800 		cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
801 		if (!cache->tag)
802 			return -ENOMEM;
803 	}
804 
805 	return cachefiles_add_cache(cache);
806 }
807 
808 /*
809  * Unbind a cache.
810  */
811 static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
812 {
813 	_enter("");
814 
815 	if (test_bit(CACHEFILES_READY, &cache->flags))
816 		cachefiles_withdraw_cache(cache);
817 
818 	cachefiles_put_directory(cache->graveyard);
819 	cachefiles_put_directory(cache->store);
820 	mntput(cache->mnt);
821 	put_cred(cache->cache_cred);
822 
823 	kfree(cache->rootdirname);
824 	kfree(cache->tag);
825 
826 	_leave("");
827 }
828