xref: /linux/fs/cachefiles/ondemand.c (revision c4101e55974cc7d835fbd2d8e01553a3f61e9e75)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/fdtable.h>
3 #include <linux/anon_inodes.h>
4 #include <linux/uio.h>
5 #include "internal.h"
6 
7 static int cachefiles_ondemand_fd_release(struct inode *inode,
8 					  struct file *file)
9 {
10 	struct cachefiles_object *object = file->private_data;
11 	struct cachefiles_cache *cache = object->volume->cache;
12 	struct cachefiles_ondemand_info *info = object->ondemand;
13 	int object_id = info->ondemand_id;
14 	struct cachefiles_req *req;
15 	XA_STATE(xas, &cache->reqs, 0);
16 
17 	xa_lock(&cache->reqs);
18 	info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
19 	cachefiles_ondemand_set_object_close(object);
20 
21 	/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
22 	xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
23 		if (req->msg.object_id == object_id &&
24 		    req->msg.opcode == CACHEFILES_OP_CLOSE) {
25 			complete(&req->done);
26 			xas_store(&xas, NULL);
27 		}
28 	}
29 	xa_unlock(&cache->reqs);
30 
31 	xa_erase(&cache->ondemand_ids, object_id);
32 	trace_cachefiles_ondemand_fd_release(object, object_id);
33 	cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
34 	cachefiles_put_unbind_pincount(cache);
35 	return 0;
36 }
37 
38 static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
39 						 struct iov_iter *iter)
40 {
41 	struct cachefiles_object *object = kiocb->ki_filp->private_data;
42 	struct cachefiles_cache *cache = object->volume->cache;
43 	struct file *file = object->file;
44 	size_t len = iter->count;
45 	loff_t pos = kiocb->ki_pos;
46 	const struct cred *saved_cred;
47 	int ret;
48 
49 	if (!file)
50 		return -ENOBUFS;
51 
52 	cachefiles_begin_secure(cache, &saved_cred);
53 	ret = __cachefiles_prepare_write(object, file, &pos, &len, true);
54 	cachefiles_end_secure(cache, saved_cred);
55 	if (ret < 0)
56 		return ret;
57 
58 	trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
59 	ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
60 	if (!ret)
61 		ret = len;
62 
63 	return ret;
64 }
65 
66 static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
67 					    int whence)
68 {
69 	struct cachefiles_object *object = filp->private_data;
70 	struct file *file = object->file;
71 
72 	if (!file)
73 		return -ENOBUFS;
74 
75 	return vfs_llseek(file, pos, whence);
76 }
77 
78 static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
79 					 unsigned long arg)
80 {
81 	struct cachefiles_object *object = filp->private_data;
82 	struct cachefiles_cache *cache = object->volume->cache;
83 	struct cachefiles_req *req;
84 	unsigned long id;
85 
86 	if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
87 		return -EINVAL;
88 
89 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
90 		return -EOPNOTSUPP;
91 
92 	id = arg;
93 	req = xa_erase(&cache->reqs, id);
94 	if (!req)
95 		return -EINVAL;
96 
97 	trace_cachefiles_ondemand_cread(object, id);
98 	complete(&req->done);
99 	return 0;
100 }
101 
102 static const struct file_operations cachefiles_ondemand_fd_fops = {
103 	.owner		= THIS_MODULE,
104 	.release	= cachefiles_ondemand_fd_release,
105 	.write_iter	= cachefiles_ondemand_fd_write_iter,
106 	.llseek		= cachefiles_ondemand_fd_llseek,
107 	.unlocked_ioctl	= cachefiles_ondemand_fd_ioctl,
108 };
109 
110 /*
111  * OPEN request Completion (copen)
112  * - command: "copen <id>,<cache_size>"
113  *   <cache_size> indicates the object size if >=0, error code if negative
114  */
115 int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
116 {
117 	struct cachefiles_req *req;
118 	struct fscache_cookie *cookie;
119 	char *pid, *psize;
120 	unsigned long id;
121 	long size;
122 	int ret;
123 
124 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
125 		return -EOPNOTSUPP;
126 
127 	if (!*args) {
128 		pr_err("Empty id specified\n");
129 		return -EINVAL;
130 	}
131 
132 	pid = args;
133 	psize = strchr(args, ',');
134 	if (!psize) {
135 		pr_err("Cache size is not specified\n");
136 		return -EINVAL;
137 	}
138 
139 	*psize = 0;
140 	psize++;
141 
142 	ret = kstrtoul(pid, 0, &id);
143 	if (ret)
144 		return ret;
145 
146 	req = xa_erase(&cache->reqs, id);
147 	if (!req)
148 		return -EINVAL;
149 
150 	/* fail OPEN request if copen format is invalid */
151 	ret = kstrtol(psize, 0, &size);
152 	if (ret) {
153 		req->error = ret;
154 		goto out;
155 	}
156 
157 	/* fail OPEN request if daemon reports an error */
158 	if (size < 0) {
159 		if (!IS_ERR_VALUE(size)) {
160 			req->error = -EINVAL;
161 			ret = -EINVAL;
162 		} else {
163 			req->error = size;
164 			ret = 0;
165 		}
166 		goto out;
167 	}
168 
169 	cookie = req->object->cookie;
170 	cookie->object_size = size;
171 	if (size)
172 		clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
173 	else
174 		set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
175 	trace_cachefiles_ondemand_copen(req->object, id, size);
176 
177 	cachefiles_ondemand_set_object_open(req->object);
178 	wake_up_all(&cache->daemon_pollwq);
179 
180 out:
181 	complete(&req->done);
182 	return ret;
183 }
184 
185 int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
186 {
187 	struct cachefiles_req *req;
188 
189 	XA_STATE(xas, &cache->reqs, 0);
190 
191 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
192 		return -EOPNOTSUPP;
193 
194 	/*
195 	 * Reset the requests to CACHEFILES_REQ_NEW state, so that the
196 	 * requests have been processed halfway before the crash of the
197 	 * user daemon could be reprocessed after the recovery.
198 	 */
199 	xas_lock(&xas);
200 	xas_for_each(&xas, req, ULONG_MAX)
201 		xas_set_mark(&xas, CACHEFILES_REQ_NEW);
202 	xas_unlock(&xas);
203 
204 	wake_up_all(&cache->daemon_pollwq);
205 	return 0;
206 }
207 
208 static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
209 {
210 	struct cachefiles_object *object;
211 	struct cachefiles_cache *cache;
212 	struct cachefiles_open *load;
213 	struct file *file;
214 	u32 object_id;
215 	int ret, fd;
216 
217 	object = cachefiles_grab_object(req->object,
218 			cachefiles_obj_get_ondemand_fd);
219 	cache = object->volume->cache;
220 
221 	ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL,
222 			      XA_LIMIT(1, INT_MAX),
223 			      &cache->ondemand_id_next, GFP_KERNEL);
224 	if (ret < 0)
225 		goto err;
226 
227 	fd = get_unused_fd_flags(O_WRONLY);
228 	if (fd < 0) {
229 		ret = fd;
230 		goto err_free_id;
231 	}
232 
233 	file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
234 				  object, O_WRONLY);
235 	if (IS_ERR(file)) {
236 		ret = PTR_ERR(file);
237 		goto err_put_fd;
238 	}
239 
240 	file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
241 	fd_install(fd, file);
242 
243 	load = (void *)req->msg.data;
244 	load->fd = fd;
245 	object->ondemand->ondemand_id = object_id;
246 
247 	cachefiles_get_unbind_pincount(cache);
248 	trace_cachefiles_ondemand_open(object, &req->msg, load);
249 	return 0;
250 
251 err_put_fd:
252 	put_unused_fd(fd);
253 err_free_id:
254 	xa_erase(&cache->ondemand_ids, object_id);
255 err:
256 	cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
257 	return ret;
258 }
259 
260 static void ondemand_object_worker(struct work_struct *work)
261 {
262 	struct cachefiles_ondemand_info *info =
263 		container_of(work, struct cachefiles_ondemand_info, ondemand_work);
264 
265 	cachefiles_ondemand_init_object(info->object);
266 }
267 
268 /*
269  * If there are any inflight or subsequent READ requests on the
270  * closed object, reopen it.
271  * Skip read requests whose related object is reopening.
272  */
273 static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas,
274 							      unsigned long xa_max)
275 {
276 	struct cachefiles_req *req;
277 	struct cachefiles_object *object;
278 	struct cachefiles_ondemand_info *info;
279 
280 	xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) {
281 		if (req->msg.opcode != CACHEFILES_OP_READ)
282 			return req;
283 		object = req->object;
284 		info = object->ondemand;
285 		if (cachefiles_ondemand_object_is_close(object)) {
286 			cachefiles_ondemand_set_object_reopening(object);
287 			queue_work(fscache_wq, &info->ondemand_work);
288 			continue;
289 		}
290 		if (cachefiles_ondemand_object_is_reopening(object))
291 			continue;
292 		return req;
293 	}
294 	return NULL;
295 }
296 
297 ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
298 					char __user *_buffer, size_t buflen)
299 {
300 	struct cachefiles_req *req;
301 	struct cachefiles_msg *msg;
302 	unsigned long id = 0;
303 	size_t n;
304 	int ret = 0;
305 	XA_STATE(xas, &cache->reqs, cache->req_id_next);
306 
307 	xa_lock(&cache->reqs);
308 	/*
309 	 * Cyclically search for a request that has not ever been processed,
310 	 * to prevent requests from being processed repeatedly, and make
311 	 * request distribution fair.
312 	 */
313 	req = cachefiles_ondemand_select_req(&xas, ULONG_MAX);
314 	if (!req && cache->req_id_next > 0) {
315 		xas_set(&xas, 0);
316 		req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1);
317 	}
318 	if (!req) {
319 		xa_unlock(&cache->reqs);
320 		return 0;
321 	}
322 
323 	msg = &req->msg;
324 	n = msg->len;
325 
326 	if (n > buflen) {
327 		xa_unlock(&cache->reqs);
328 		return -EMSGSIZE;
329 	}
330 
331 	xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
332 	cache->req_id_next = xas.xa_index + 1;
333 	xa_unlock(&cache->reqs);
334 
335 	id = xas.xa_index;
336 
337 	if (msg->opcode == CACHEFILES_OP_OPEN) {
338 		ret = cachefiles_ondemand_get_fd(req);
339 		if (ret) {
340 			cachefiles_ondemand_set_object_close(req->object);
341 			goto error;
342 		}
343 	}
344 
345 	msg->msg_id = id;
346 	msg->object_id = req->object->ondemand->ondemand_id;
347 
348 	if (copy_to_user(_buffer, msg, n) != 0) {
349 		ret = -EFAULT;
350 		goto err_put_fd;
351 	}
352 
353 	/* CLOSE request has no reply */
354 	if (msg->opcode == CACHEFILES_OP_CLOSE) {
355 		xa_erase(&cache->reqs, id);
356 		complete(&req->done);
357 	}
358 
359 	return n;
360 
361 err_put_fd:
362 	if (msg->opcode == CACHEFILES_OP_OPEN)
363 		close_fd(((struct cachefiles_open *)msg->data)->fd);
364 error:
365 	xa_erase(&cache->reqs, id);
366 	req->error = ret;
367 	complete(&req->done);
368 	return ret;
369 }
370 
371 typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
372 
373 static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
374 					enum cachefiles_opcode opcode,
375 					size_t data_len,
376 					init_req_fn init_req,
377 					void *private)
378 {
379 	struct cachefiles_cache *cache = object->volume->cache;
380 	struct cachefiles_req *req = NULL;
381 	XA_STATE(xas, &cache->reqs, 0);
382 	int ret;
383 
384 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
385 		return 0;
386 
387 	if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
388 		ret = -EIO;
389 		goto out;
390 	}
391 
392 	req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
393 	if (!req) {
394 		ret = -ENOMEM;
395 		goto out;
396 	}
397 
398 	req->object = object;
399 	init_completion(&req->done);
400 	req->msg.opcode = opcode;
401 	req->msg.len = sizeof(struct cachefiles_msg) + data_len;
402 
403 	ret = init_req(req, private);
404 	if (ret)
405 		goto out;
406 
407 	do {
408 		/*
409 		 * Stop enqueuing the request when daemon is dying. The
410 		 * following two operations need to be atomic as a whole.
411 		 *   1) check cache state, and
412 		 *   2) enqueue request if cache is alive.
413 		 * Otherwise the request may be enqueued after xarray has been
414 		 * flushed, leaving the orphan request never being completed.
415 		 *
416 		 * CPU 1			CPU 2
417 		 * =====			=====
418 		 *				test CACHEFILES_DEAD bit
419 		 * set CACHEFILES_DEAD bit
420 		 * flush requests in the xarray
421 		 *				enqueue the request
422 		 */
423 		xas_lock(&xas);
424 
425 		if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
426 			xas_unlock(&xas);
427 			ret = -EIO;
428 			goto out;
429 		}
430 
431 		/* coupled with the barrier in cachefiles_flush_reqs() */
432 		smp_mb();
433 
434 		if (opcode == CACHEFILES_OP_CLOSE &&
435 			!cachefiles_ondemand_object_is_open(object)) {
436 			WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
437 			xas_unlock(&xas);
438 			ret = -EIO;
439 			goto out;
440 		}
441 
442 		xas.xa_index = 0;
443 		xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
444 		if (xas.xa_node == XAS_RESTART)
445 			xas_set_err(&xas, -EBUSY);
446 		xas_store(&xas, req);
447 		xas_clear_mark(&xas, XA_FREE_MARK);
448 		xas_set_mark(&xas, CACHEFILES_REQ_NEW);
449 		xas_unlock(&xas);
450 	} while (xas_nomem(&xas, GFP_KERNEL));
451 
452 	ret = xas_error(&xas);
453 	if (ret)
454 		goto out;
455 
456 	wake_up_all(&cache->daemon_pollwq);
457 	wait_for_completion(&req->done);
458 	ret = req->error;
459 	kfree(req);
460 	return ret;
461 out:
462 	/* Reset the object to close state in error handling path.
463 	 * If error occurs after creating the anonymous fd,
464 	 * cachefiles_ondemand_fd_release() will set object to close.
465 	 */
466 	if (opcode == CACHEFILES_OP_OPEN)
467 		cachefiles_ondemand_set_object_close(object);
468 	kfree(req);
469 	return ret;
470 }
471 
472 static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req,
473 					     void *private)
474 {
475 	struct cachefiles_object *object = req->object;
476 	struct fscache_cookie *cookie = object->cookie;
477 	struct fscache_volume *volume = object->volume->vcookie;
478 	struct cachefiles_open *load = (void *)req->msg.data;
479 	size_t volume_key_size, cookie_key_size;
480 	void *volume_key, *cookie_key;
481 
482 	/*
483 	 * Volume key is a NUL-terminated string. key[0] stores strlen() of the
484 	 * string, followed by the content of the string (excluding '\0').
485 	 */
486 	volume_key_size = volume->key[0] + 1;
487 	volume_key = volume->key + 1;
488 
489 	/* Cookie key is binary data, which is netfs specific. */
490 	cookie_key_size = cookie->key_len;
491 	cookie_key = fscache_get_key(cookie);
492 
493 	if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) {
494 		pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n");
495 		return -EINVAL;
496 	}
497 
498 	load->volume_key_size = volume_key_size;
499 	load->cookie_key_size = cookie_key_size;
500 	memcpy(load->data, volume_key, volume_key_size);
501 	memcpy(load->data + volume_key_size, cookie_key, cookie_key_size);
502 
503 	return 0;
504 }
505 
506 static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
507 					      void *private)
508 {
509 	struct cachefiles_object *object = req->object;
510 
511 	if (!cachefiles_ondemand_object_is_open(object))
512 		return -ENOENT;
513 
514 	trace_cachefiles_ondemand_close(object, &req->msg);
515 	return 0;
516 }
517 
518 struct cachefiles_read_ctx {
519 	loff_t off;
520 	size_t len;
521 };
522 
523 static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
524 					     void *private)
525 {
526 	struct cachefiles_object *object = req->object;
527 	struct cachefiles_read *load = (void *)req->msg.data;
528 	struct cachefiles_read_ctx *read_ctx = private;
529 
530 	load->off = read_ctx->off;
531 	load->len = read_ctx->len;
532 	trace_cachefiles_ondemand_read(object, &req->msg, load);
533 	return 0;
534 }
535 
536 int cachefiles_ondemand_init_object(struct cachefiles_object *object)
537 {
538 	struct fscache_cookie *cookie = object->cookie;
539 	struct fscache_volume *volume = object->volume->vcookie;
540 	size_t volume_key_size, cookie_key_size, data_len;
541 
542 	/*
543 	 * CacheFiles will firstly check the cache file under the root cache
544 	 * directory. If the coherency check failed, it will fallback to
545 	 * creating a new tmpfile as the cache file. Reuse the previously
546 	 * allocated object ID if any.
547 	 */
548 	if (cachefiles_ondemand_object_is_open(object))
549 		return 0;
550 
551 	volume_key_size = volume->key[0] + 1;
552 	cookie_key_size = cookie->key_len;
553 	data_len = sizeof(struct cachefiles_open) +
554 		   volume_key_size + cookie_key_size;
555 
556 	return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN,
557 			data_len, cachefiles_ondemand_init_open_req, NULL);
558 }
559 
560 void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
561 {
562 	cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
563 			cachefiles_ondemand_init_close_req, NULL);
564 }
565 
566 int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
567 				struct cachefiles_volume *volume)
568 {
569 	if (!cachefiles_in_ondemand_mode(volume->cache))
570 		return 0;
571 
572 	object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info),
573 					GFP_KERNEL);
574 	if (!object->ondemand)
575 		return -ENOMEM;
576 
577 	object->ondemand->object = object;
578 	INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
579 	return 0;
580 }
581 
582 void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object)
583 {
584 	kfree(object->ondemand);
585 	object->ondemand = NULL;
586 }
587 
588 int cachefiles_ondemand_read(struct cachefiles_object *object,
589 			     loff_t pos, size_t len)
590 {
591 	struct cachefiles_read_ctx read_ctx = {pos, len};
592 
593 	return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ,
594 			sizeof(struct cachefiles_read),
595 			cachefiles_ondemand_init_read_req, &read_ctx);
596 }
597