xref: /linux/fs/fuse/virtio_fs.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * virtio-fs: Virtio Filesystem
4  * Copyright (C) 2018 Red Hat, Inc.
5  */
6 
7 #include <linux/fs.h>
8 #include <linux/dax.h>
9 #include <linux/pci.h>
10 #include <linux/interrupt.h>
11 #include <linux/group_cpus.h>
12 #include <linux/pfn_t.h>
13 #include <linux/memremap.h>
14 #include <linux/module.h>
15 #include <linux/virtio.h>
16 #include <linux/virtio_fs.h>
17 #include <linux/delay.h>
18 #include <linux/fs_context.h>
19 #include <linux/fs_parser.h>
20 #include <linux/highmem.h>
21 #include <linux/cleanup.h>
22 #include <linux/uio.h>
23 #include "fuse_i.h"
24 
25 /* Used to help calculate the FUSE connection's max_pages limit for a request's
26  * size. Parts of the struct fuse_req are sliced into scattergather lists in
27  * addition to the pages used, so this can help account for that overhead.
28  */
29 #define FUSE_HEADER_OVERHEAD    4
30 
31 /* List of virtio-fs device instances and a lock for the list. Also provides
32  * mutual exclusion in device removal and mounting path
33  */
34 static DEFINE_MUTEX(virtio_fs_mutex);
35 static LIST_HEAD(virtio_fs_instances);
36 
37 /* The /sys/fs/virtio_fs/ kset */
38 static struct kset *virtio_fs_kset;
39 
40 enum {
41 	VQ_HIPRIO,
42 	VQ_REQUEST
43 };
44 
45 #define VQ_NAME_LEN	24
46 
47 /* Per-virtqueue state */
48 struct virtio_fs_vq {
49 	spinlock_t lock;
50 	struct virtqueue *vq;     /* protected by ->lock */
51 	struct work_struct done_work;
52 	struct list_head queued_reqs;
53 	struct list_head end_reqs;	/* End these requests */
54 	struct work_struct dispatch_work;
55 	struct fuse_dev *fud;
56 	bool connected;
57 	long in_flight;
58 	struct completion in_flight_zero; /* No inflight requests */
59 	struct kobject *kobj;
60 	char name[VQ_NAME_LEN];
61 } ____cacheline_aligned_in_smp;
62 
63 /* A virtio-fs device instance */
64 struct virtio_fs {
65 	struct kobject kobj;
66 	struct kobject *mqs_kobj;
67 	struct list_head list;    /* on virtio_fs_instances */
68 	char *tag;
69 	struct virtio_fs_vq *vqs;
70 	unsigned int nvqs;               /* number of virtqueues */
71 	unsigned int num_request_queues; /* number of request queues */
72 	struct dax_device *dax_dev;
73 
74 	unsigned int *mq_map; /* index = cpu id, value = request vq id */
75 
76 	/* DAX memory window where file contents are mapped */
77 	void *window_kaddr;
78 	phys_addr_t window_phys_addr;
79 	size_t window_len;
80 };
81 
82 struct virtio_fs_forget_req {
83 	struct fuse_in_header ih;
84 	struct fuse_forget_in arg;
85 };
86 
87 struct virtio_fs_forget {
88 	/* This request can be temporarily queued on virt queue */
89 	struct list_head list;
90 	struct virtio_fs_forget_req req;
91 };
92 
93 struct virtio_fs_req_work {
94 	struct fuse_req *req;
95 	struct virtio_fs_vq *fsvq;
96 	struct work_struct done_work;
97 };
98 
99 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
100 				 struct fuse_req *req, bool in_flight,
101 				 gfp_t gfp);
102 
103 static const struct constant_table dax_param_enums[] = {
104 	{"always",	FUSE_DAX_ALWAYS },
105 	{"never",	FUSE_DAX_NEVER },
106 	{"inode",	FUSE_DAX_INODE_USER },
107 	{}
108 };
109 
110 enum {
111 	OPT_DAX,
112 	OPT_DAX_ENUM,
113 };
114 
115 static const struct fs_parameter_spec virtio_fs_parameters[] = {
116 	fsparam_flag("dax", OPT_DAX),
117 	fsparam_enum("dax", OPT_DAX_ENUM, dax_param_enums),
118 	{}
119 };
120 
121 static int virtio_fs_parse_param(struct fs_context *fsc,
122 				 struct fs_parameter *param)
123 {
124 	struct fs_parse_result result;
125 	struct fuse_fs_context *ctx = fsc->fs_private;
126 	int opt;
127 
128 	opt = fs_parse(fsc, virtio_fs_parameters, param, &result);
129 	if (opt < 0)
130 		return opt;
131 
132 	switch (opt) {
133 	case OPT_DAX:
134 		ctx->dax_mode = FUSE_DAX_ALWAYS;
135 		break;
136 	case OPT_DAX_ENUM:
137 		ctx->dax_mode = result.uint_32;
138 		break;
139 	default:
140 		return -EINVAL;
141 	}
142 
143 	return 0;
144 }
145 
146 static void virtio_fs_free_fsc(struct fs_context *fsc)
147 {
148 	struct fuse_fs_context *ctx = fsc->fs_private;
149 
150 	kfree(ctx);
151 }
152 
153 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
154 {
155 	struct virtio_fs *fs = vq->vdev->priv;
156 
157 	return &fs->vqs[vq->index];
158 }
159 
160 /* Should be called with fsvq->lock held. */
161 static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
162 {
163 	fsvq->in_flight++;
164 }
165 
166 /* Should be called with fsvq->lock held. */
167 static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
168 {
169 	WARN_ON(fsvq->in_flight <= 0);
170 	fsvq->in_flight--;
171 	if (!fsvq->in_flight)
172 		complete(&fsvq->in_flight_zero);
173 }
174 
175 static ssize_t tag_show(struct kobject *kobj,
176 		struct kobj_attribute *attr, char *buf)
177 {
178 	struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj);
179 
180 	return sysfs_emit(buf, "%s\n", fs->tag);
181 }
182 
183 static struct kobj_attribute virtio_fs_tag_attr = __ATTR_RO(tag);
184 
185 static struct attribute *virtio_fs_attrs[] = {
186 	&virtio_fs_tag_attr.attr,
187 	NULL
188 };
189 ATTRIBUTE_GROUPS(virtio_fs);
190 
191 static void virtio_fs_ktype_release(struct kobject *kobj)
192 {
193 	struct virtio_fs *vfs = container_of(kobj, struct virtio_fs, kobj);
194 
195 	kfree(vfs->mq_map);
196 	kfree(vfs->vqs);
197 	kfree(vfs);
198 }
199 
200 static const struct kobj_type virtio_fs_ktype = {
201 	.release = virtio_fs_ktype_release,
202 	.sysfs_ops = &kobj_sysfs_ops,
203 	.default_groups = virtio_fs_groups,
204 };
205 
206 static struct virtio_fs_vq *virtio_fs_kobj_to_vq(struct virtio_fs *fs,
207 		struct kobject *kobj)
208 {
209 	int i;
210 
211 	for (i = 0; i < fs->nvqs; i++) {
212 		if (kobj == fs->vqs[i].kobj)
213 			return &fs->vqs[i];
214 	}
215 	return NULL;
216 }
217 
218 static ssize_t name_show(struct kobject *kobj,
219 		struct kobj_attribute *attr, char *buf)
220 {
221 	struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
222 	struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
223 
224 	if (!fsvq)
225 		return -EINVAL;
226 	return sysfs_emit(buf, "%s\n", fsvq->name);
227 }
228 
229 static struct kobj_attribute virtio_fs_vq_name_attr = __ATTR_RO(name);
230 
231 static ssize_t cpu_list_show(struct kobject *kobj,
232 		struct kobj_attribute *attr, char *buf)
233 {
234 	struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
235 	struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
236 	unsigned int cpu, qid;
237 	const size_t size = PAGE_SIZE - 1;
238 	bool first = true;
239 	int ret = 0, pos = 0;
240 
241 	if (!fsvq)
242 		return -EINVAL;
243 
244 	qid = fsvq->vq->index;
245 	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
246 		if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid)) {
247 			if (first)
248 				ret = snprintf(buf + pos, size - pos, "%u", cpu);
249 			else
250 				ret = snprintf(buf + pos, size - pos, ", %u", cpu);
251 
252 			if (ret >= size - pos)
253 				break;
254 			first = false;
255 			pos += ret;
256 		}
257 	}
258 	ret = snprintf(buf + pos, size + 1 - pos, "\n");
259 	return pos + ret;
260 }
261 
262 static struct kobj_attribute virtio_fs_vq_cpu_list_attr = __ATTR_RO(cpu_list);
263 
264 static struct attribute *virtio_fs_vq_attrs[] = {
265 	&virtio_fs_vq_name_attr.attr,
266 	&virtio_fs_vq_cpu_list_attr.attr,
267 	NULL
268 };
269 
270 static struct attribute_group virtio_fs_vq_attr_group = {
271 	.attrs = virtio_fs_vq_attrs,
272 };
273 
274 /* Make sure virtiofs_mutex is held */
275 static void virtio_fs_put_locked(struct virtio_fs *fs)
276 {
277 	lockdep_assert_held(&virtio_fs_mutex);
278 
279 	kobject_put(&fs->kobj);
280 }
281 
282 static void virtio_fs_put(struct virtio_fs *fs)
283 {
284 	mutex_lock(&virtio_fs_mutex);
285 	virtio_fs_put_locked(fs);
286 	mutex_unlock(&virtio_fs_mutex);
287 }
288 
289 static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
290 {
291 	struct virtio_fs *vfs = fiq->priv;
292 
293 	virtio_fs_put(vfs);
294 }
295 
296 static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
297 {
298 	WARN_ON(fsvq->in_flight < 0);
299 
300 	/* Wait for in flight requests to finish.*/
301 	spin_lock(&fsvq->lock);
302 	if (fsvq->in_flight) {
303 		/* We are holding virtio_fs_mutex. There should not be any
304 		 * waiters waiting for completion.
305 		 */
306 		reinit_completion(&fsvq->in_flight_zero);
307 		spin_unlock(&fsvq->lock);
308 		wait_for_completion(&fsvq->in_flight_zero);
309 	} else {
310 		spin_unlock(&fsvq->lock);
311 	}
312 
313 	flush_work(&fsvq->done_work);
314 	flush_work(&fsvq->dispatch_work);
315 }
316 
317 static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
318 {
319 	struct virtio_fs_vq *fsvq;
320 	int i;
321 
322 	for (i = 0; i < fs->nvqs; i++) {
323 		fsvq = &fs->vqs[i];
324 		virtio_fs_drain_queue(fsvq);
325 	}
326 }
327 
328 static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
329 {
330 	/* Provides mutual exclusion between ->remove and ->kill_sb
331 	 * paths. We don't want both of these draining queue at the
332 	 * same time. Current completion logic reinits completion
333 	 * and that means there should not be any other thread
334 	 * doing reinit or waiting for completion already.
335 	 */
336 	mutex_lock(&virtio_fs_mutex);
337 	virtio_fs_drain_all_queues_locked(fs);
338 	mutex_unlock(&virtio_fs_mutex);
339 }
340 
341 static void virtio_fs_start_all_queues(struct virtio_fs *fs)
342 {
343 	struct virtio_fs_vq *fsvq;
344 	int i;
345 
346 	for (i = 0; i < fs->nvqs; i++) {
347 		fsvq = &fs->vqs[i];
348 		spin_lock(&fsvq->lock);
349 		fsvq->connected = true;
350 		spin_unlock(&fsvq->lock);
351 	}
352 }
353 
354 static void virtio_fs_delete_queues_sysfs(struct virtio_fs *fs)
355 {
356 	struct virtio_fs_vq *fsvq;
357 	int i;
358 
359 	for (i = 0; i < fs->nvqs; i++) {
360 		fsvq = &fs->vqs[i];
361 		kobject_put(fsvq->kobj);
362 	}
363 }
364 
365 static int virtio_fs_add_queues_sysfs(struct virtio_fs *fs)
366 {
367 	struct virtio_fs_vq *fsvq;
368 	char buff[12];
369 	int i, j, ret;
370 
371 	for (i = 0; i < fs->nvqs; i++) {
372 		fsvq = &fs->vqs[i];
373 
374 		sprintf(buff, "%d", i);
375 		fsvq->kobj = kobject_create_and_add(buff, fs->mqs_kobj);
376 		if (!fs->mqs_kobj) {
377 			ret = -ENOMEM;
378 			goto out_del;
379 		}
380 
381 		ret = sysfs_create_group(fsvq->kobj, &virtio_fs_vq_attr_group);
382 		if (ret) {
383 			kobject_put(fsvq->kobj);
384 			goto out_del;
385 		}
386 	}
387 
388 	return 0;
389 
390 out_del:
391 	for (j = 0; j < i; j++) {
392 		fsvq = &fs->vqs[j];
393 		kobject_put(fsvq->kobj);
394 	}
395 	return ret;
396 }
397 
398 /* Add a new instance to the list or return -EEXIST if tag name exists*/
399 static int virtio_fs_add_instance(struct virtio_device *vdev,
400 				  struct virtio_fs *fs)
401 {
402 	struct virtio_fs *fs2;
403 	int ret;
404 
405 	mutex_lock(&virtio_fs_mutex);
406 
407 	list_for_each_entry(fs2, &virtio_fs_instances, list) {
408 		if (strcmp(fs->tag, fs2->tag) == 0) {
409 			mutex_unlock(&virtio_fs_mutex);
410 			return -EEXIST;
411 		}
412 	}
413 
414 	/* Use the virtio_device's index as a unique identifier, there is no
415 	 * need to allocate our own identifiers because the virtio_fs instance
416 	 * is only visible to userspace as long as the underlying virtio_device
417 	 * exists.
418 	 */
419 	fs->kobj.kset = virtio_fs_kset;
420 	ret = kobject_add(&fs->kobj, NULL, "%d", vdev->index);
421 	if (ret < 0)
422 		goto out_unlock;
423 
424 	fs->mqs_kobj = kobject_create_and_add("mqs", &fs->kobj);
425 	if (!fs->mqs_kobj) {
426 		ret = -ENOMEM;
427 		goto out_del;
428 	}
429 
430 	ret = sysfs_create_link(&fs->kobj, &vdev->dev.kobj, "device");
431 	if (ret < 0)
432 		goto out_put;
433 
434 	ret = virtio_fs_add_queues_sysfs(fs);
435 	if (ret)
436 		goto out_remove;
437 
438 	list_add_tail(&fs->list, &virtio_fs_instances);
439 
440 	mutex_unlock(&virtio_fs_mutex);
441 
442 	kobject_uevent(&fs->kobj, KOBJ_ADD);
443 
444 	return 0;
445 
446 out_remove:
447 	sysfs_remove_link(&fs->kobj, "device");
448 out_put:
449 	kobject_put(fs->mqs_kobj);
450 out_del:
451 	kobject_del(&fs->kobj);
452 out_unlock:
453 	mutex_unlock(&virtio_fs_mutex);
454 	return ret;
455 }
456 
457 /* Return the virtio_fs with a given tag, or NULL */
458 static struct virtio_fs *virtio_fs_find_instance(const char *tag)
459 {
460 	struct virtio_fs *fs;
461 
462 	mutex_lock(&virtio_fs_mutex);
463 
464 	list_for_each_entry(fs, &virtio_fs_instances, list) {
465 		if (strcmp(fs->tag, tag) == 0) {
466 			kobject_get(&fs->kobj);
467 			goto found;
468 		}
469 	}
470 
471 	fs = NULL; /* not found */
472 
473 found:
474 	mutex_unlock(&virtio_fs_mutex);
475 
476 	return fs;
477 }
478 
479 static void virtio_fs_free_devs(struct virtio_fs *fs)
480 {
481 	unsigned int i;
482 
483 	for (i = 0; i < fs->nvqs; i++) {
484 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
485 
486 		if (!fsvq->fud)
487 			continue;
488 
489 		fuse_dev_free(fsvq->fud);
490 		fsvq->fud = NULL;
491 	}
492 }
493 
494 /* Read filesystem name from virtio config into fs->tag (must kfree()). */
495 static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
496 {
497 	char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
498 	char *end;
499 	size_t len;
500 
501 	virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
502 			   &tag_buf, sizeof(tag_buf));
503 	end = memchr(tag_buf, '\0', sizeof(tag_buf));
504 	if (end == tag_buf)
505 		return -EINVAL; /* empty tag */
506 	if (!end)
507 		end = &tag_buf[sizeof(tag_buf)];
508 
509 	len = end - tag_buf;
510 	fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
511 	if (!fs->tag)
512 		return -ENOMEM;
513 	memcpy(fs->tag, tag_buf, len);
514 	fs->tag[len] = '\0';
515 
516 	/* While the VIRTIO specification allows any character, newlines are
517 	 * awkward on mount(8) command-lines and cause problems in the sysfs
518 	 * "tag" attr and uevent TAG= properties. Forbid them.
519 	 */
520 	if (strchr(fs->tag, '\n')) {
521 		dev_dbg(&vdev->dev, "refusing virtiofs tag with newline character\n");
522 		return -EINVAL;
523 	}
524 
525 	dev_info(&vdev->dev, "discovered new tag: %s\n", fs->tag);
526 	return 0;
527 }
528 
529 /* Work function for hiprio completion */
530 static void virtio_fs_hiprio_done_work(struct work_struct *work)
531 {
532 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
533 						 done_work);
534 	struct virtqueue *vq = fsvq->vq;
535 
536 	/* Free completed FUSE_FORGET requests */
537 	spin_lock(&fsvq->lock);
538 	do {
539 		unsigned int len;
540 		void *req;
541 
542 		virtqueue_disable_cb(vq);
543 
544 		while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
545 			kfree(req);
546 			dec_in_flight_req(fsvq);
547 		}
548 	} while (!virtqueue_enable_cb(vq));
549 
550 	if (!list_empty(&fsvq->queued_reqs))
551 		schedule_work(&fsvq->dispatch_work);
552 
553 	spin_unlock(&fsvq->lock);
554 }
555 
556 static void virtio_fs_request_dispatch_work(struct work_struct *work)
557 {
558 	struct fuse_req *req;
559 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
560 						 dispatch_work);
561 	int ret;
562 
563 	pr_debug("virtio-fs: worker %s called.\n", __func__);
564 	while (1) {
565 		spin_lock(&fsvq->lock);
566 		req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
567 					       list);
568 		if (!req) {
569 			spin_unlock(&fsvq->lock);
570 			break;
571 		}
572 
573 		list_del_init(&req->list);
574 		spin_unlock(&fsvq->lock);
575 		fuse_request_end(req);
576 	}
577 
578 	/* Dispatch pending requests */
579 	while (1) {
580 		unsigned int flags;
581 
582 		spin_lock(&fsvq->lock);
583 		req = list_first_entry_or_null(&fsvq->queued_reqs,
584 					       struct fuse_req, list);
585 		if (!req) {
586 			spin_unlock(&fsvq->lock);
587 			return;
588 		}
589 		list_del_init(&req->list);
590 		spin_unlock(&fsvq->lock);
591 
592 		flags = memalloc_nofs_save();
593 		ret = virtio_fs_enqueue_req(fsvq, req, true, GFP_KERNEL);
594 		memalloc_nofs_restore(flags);
595 		if (ret < 0) {
596 			if (ret == -ENOSPC) {
597 				spin_lock(&fsvq->lock);
598 				list_add_tail(&req->list, &fsvq->queued_reqs);
599 				spin_unlock(&fsvq->lock);
600 				return;
601 			}
602 			req->out.h.error = ret;
603 			spin_lock(&fsvq->lock);
604 			dec_in_flight_req(fsvq);
605 			spin_unlock(&fsvq->lock);
606 			pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
607 			       ret);
608 			fuse_request_end(req);
609 		}
610 	}
611 }
612 
613 /*
614  * Returns 1 if queue is full and sender should wait a bit before sending
615  * next request, 0 otherwise.
616  */
617 static int send_forget_request(struct virtio_fs_vq *fsvq,
618 			       struct virtio_fs_forget *forget,
619 			       bool in_flight)
620 {
621 	struct scatterlist sg;
622 	struct virtqueue *vq;
623 	int ret = 0;
624 	bool notify;
625 	struct virtio_fs_forget_req *req = &forget->req;
626 
627 	spin_lock(&fsvq->lock);
628 	if (!fsvq->connected) {
629 		if (in_flight)
630 			dec_in_flight_req(fsvq);
631 		kfree(forget);
632 		goto out;
633 	}
634 
635 	sg_init_one(&sg, req, sizeof(*req));
636 	vq = fsvq->vq;
637 	dev_dbg(&vq->vdev->dev, "%s\n", __func__);
638 
639 	ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
640 	if (ret < 0) {
641 		if (ret == -ENOSPC) {
642 			pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
643 				 ret);
644 			list_add_tail(&forget->list, &fsvq->queued_reqs);
645 			if (!in_flight)
646 				inc_in_flight_req(fsvq);
647 			/* Queue is full */
648 			ret = 1;
649 		} else {
650 			pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
651 				 ret);
652 			kfree(forget);
653 			if (in_flight)
654 				dec_in_flight_req(fsvq);
655 		}
656 		goto out;
657 	}
658 
659 	if (!in_flight)
660 		inc_in_flight_req(fsvq);
661 	notify = virtqueue_kick_prepare(vq);
662 	spin_unlock(&fsvq->lock);
663 
664 	if (notify)
665 		virtqueue_notify(vq);
666 	return ret;
667 out:
668 	spin_unlock(&fsvq->lock);
669 	return ret;
670 }
671 
672 static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
673 {
674 	struct virtio_fs_forget *forget;
675 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
676 						 dispatch_work);
677 	pr_debug("virtio-fs: worker %s called.\n", __func__);
678 	while (1) {
679 		spin_lock(&fsvq->lock);
680 		forget = list_first_entry_or_null(&fsvq->queued_reqs,
681 					struct virtio_fs_forget, list);
682 		if (!forget) {
683 			spin_unlock(&fsvq->lock);
684 			return;
685 		}
686 
687 		list_del(&forget->list);
688 		spin_unlock(&fsvq->lock);
689 		if (send_forget_request(fsvq, forget, true))
690 			return;
691 	}
692 }
693 
694 /* Allocate and copy args into req->argbuf */
695 static int copy_args_to_argbuf(struct fuse_req *req, gfp_t gfp)
696 {
697 	struct fuse_args *args = req->args;
698 	unsigned int offset = 0;
699 	unsigned int num_in;
700 	unsigned int num_out;
701 	unsigned int len;
702 	unsigned int i;
703 
704 	num_in = args->in_numargs - args->in_pages;
705 	num_out = args->out_numargs - args->out_pages;
706 	len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
707 	      fuse_len_args(num_out, args->out_args);
708 
709 	req->argbuf = kmalloc(len, gfp);
710 	if (!req->argbuf)
711 		return -ENOMEM;
712 
713 	for (i = 0; i < num_in; i++) {
714 		memcpy(req->argbuf + offset,
715 		       args->in_args[i].value,
716 		       args->in_args[i].size);
717 		offset += args->in_args[i].size;
718 	}
719 
720 	return 0;
721 }
722 
723 /* Copy args out of and free req->argbuf */
724 static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
725 {
726 	unsigned int remaining;
727 	unsigned int offset;
728 	unsigned int num_in;
729 	unsigned int num_out;
730 	unsigned int i;
731 
732 	remaining = req->out.h.len - sizeof(req->out.h);
733 	num_in = args->in_numargs - args->in_pages;
734 	num_out = args->out_numargs - args->out_pages;
735 	offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
736 
737 	for (i = 0; i < num_out; i++) {
738 		unsigned int argsize = args->out_args[i].size;
739 
740 		if (args->out_argvar &&
741 		    i == args->out_numargs - 1 &&
742 		    argsize > remaining) {
743 			argsize = remaining;
744 		}
745 
746 		memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
747 		offset += argsize;
748 
749 		if (i != args->out_numargs - 1)
750 			remaining -= argsize;
751 	}
752 
753 	/* Store the actual size of the variable-length arg */
754 	if (args->out_argvar)
755 		args->out_args[args->out_numargs - 1].size = remaining;
756 
757 	kfree(req->argbuf);
758 	req->argbuf = NULL;
759 }
760 
761 /* Work function for request completion */
762 static void virtio_fs_request_complete(struct fuse_req *req,
763 				       struct virtio_fs_vq *fsvq)
764 {
765 	struct fuse_pqueue *fpq = &fsvq->fud->pq;
766 	struct fuse_args *args;
767 	struct fuse_args_pages *ap;
768 	unsigned int len, i, thislen;
769 	struct folio *folio;
770 
771 	/*
772 	 * TODO verify that server properly follows FUSE protocol
773 	 * (oh.uniq, oh.len)
774 	 */
775 	args = req->args;
776 	copy_args_from_argbuf(args, req);
777 
778 	if (args->out_pages && args->page_zeroing) {
779 		len = args->out_args[args->out_numargs - 1].size;
780 		ap = container_of(args, typeof(*ap), args);
781 		for (i = 0; i < ap->num_folios; i++) {
782 			thislen = ap->descs[i].length;
783 			if (len < thislen) {
784 				WARN_ON(ap->descs[i].offset);
785 				folio = ap->folios[i];
786 				folio_zero_segment(folio, len, thislen);
787 				len = 0;
788 			} else {
789 				len -= thislen;
790 			}
791 		}
792 	}
793 
794 	spin_lock(&fpq->lock);
795 	clear_bit(FR_SENT, &req->flags);
796 	spin_unlock(&fpq->lock);
797 
798 	fuse_request_end(req);
799 	spin_lock(&fsvq->lock);
800 	dec_in_flight_req(fsvq);
801 	spin_unlock(&fsvq->lock);
802 }
803 
804 static void virtio_fs_complete_req_work(struct work_struct *work)
805 {
806 	struct virtio_fs_req_work *w =
807 		container_of(work, typeof(*w), done_work);
808 
809 	virtio_fs_request_complete(w->req, w->fsvq);
810 	kfree(w);
811 }
812 
813 static void virtio_fs_requests_done_work(struct work_struct *work)
814 {
815 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
816 						 done_work);
817 	struct fuse_pqueue *fpq = &fsvq->fud->pq;
818 	struct virtqueue *vq = fsvq->vq;
819 	struct fuse_req *req;
820 	struct fuse_req *next;
821 	unsigned int len;
822 	LIST_HEAD(reqs);
823 
824 	/* Collect completed requests off the virtqueue */
825 	spin_lock(&fsvq->lock);
826 	do {
827 		virtqueue_disable_cb(vq);
828 
829 		while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
830 			spin_lock(&fpq->lock);
831 			list_move_tail(&req->list, &reqs);
832 			spin_unlock(&fpq->lock);
833 		}
834 	} while (!virtqueue_enable_cb(vq));
835 	spin_unlock(&fsvq->lock);
836 
837 	/* End requests */
838 	list_for_each_entry_safe(req, next, &reqs, list) {
839 		list_del_init(&req->list);
840 
841 		/* blocking async request completes in a worker context */
842 		if (req->args->may_block) {
843 			struct virtio_fs_req_work *w;
844 
845 			w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
846 			INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
847 			w->fsvq = fsvq;
848 			w->req = req;
849 			schedule_work(&w->done_work);
850 		} else {
851 			virtio_fs_request_complete(req, fsvq);
852 		}
853 	}
854 
855 	/* Try to push previously queued requests, as the queue might no longer be full */
856 	spin_lock(&fsvq->lock);
857 	if (!list_empty(&fsvq->queued_reqs))
858 		schedule_work(&fsvq->dispatch_work);
859 	spin_unlock(&fsvq->lock);
860 }
861 
862 static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
863 {
864 	const struct cpumask *mask, *masks;
865 	unsigned int q, cpu;
866 
867 	/* First attempt to map using existing transport layer affinities
868 	 * e.g. PCIe MSI-X
869 	 */
870 	if (!vdev->config->get_vq_affinity)
871 		goto fallback;
872 
873 	for (q = 0; q < fs->num_request_queues; q++) {
874 		mask = vdev->config->get_vq_affinity(vdev, VQ_REQUEST + q);
875 		if (!mask)
876 			goto fallback;
877 
878 		for_each_cpu(cpu, mask)
879 			fs->mq_map[cpu] = q + VQ_REQUEST;
880 	}
881 
882 	return;
883 fallback:
884 	/* Attempt to map evenly in groups over the CPUs */
885 	masks = group_cpus_evenly(fs->num_request_queues);
886 	/* If even this fails we default to all CPUs use first request queue */
887 	if (!masks) {
888 		for_each_possible_cpu(cpu)
889 			fs->mq_map[cpu] = VQ_REQUEST;
890 		return;
891 	}
892 
893 	for (q = 0; q < fs->num_request_queues; q++) {
894 		for_each_cpu(cpu, &masks[q])
895 			fs->mq_map[cpu] = q + VQ_REQUEST;
896 	}
897 	kfree(masks);
898 }
899 
900 /* Virtqueue interrupt handler */
901 static void virtio_fs_vq_done(struct virtqueue *vq)
902 {
903 	struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
904 
905 	dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
906 
907 	schedule_work(&fsvq->done_work);
908 }
909 
910 static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
911 			      int vq_type)
912 {
913 	strscpy(fsvq->name, name, VQ_NAME_LEN);
914 	spin_lock_init(&fsvq->lock);
915 	INIT_LIST_HEAD(&fsvq->queued_reqs);
916 	INIT_LIST_HEAD(&fsvq->end_reqs);
917 	init_completion(&fsvq->in_flight_zero);
918 
919 	if (vq_type == VQ_REQUEST) {
920 		INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
921 		INIT_WORK(&fsvq->dispatch_work,
922 				virtio_fs_request_dispatch_work);
923 	} else {
924 		INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
925 		INIT_WORK(&fsvq->dispatch_work,
926 				virtio_fs_hiprio_dispatch_work);
927 	}
928 }
929 
930 /* Initialize virtqueues */
931 static int virtio_fs_setup_vqs(struct virtio_device *vdev,
932 			       struct virtio_fs *fs)
933 {
934 	struct virtqueue_info *vqs_info;
935 	struct virtqueue **vqs;
936 	/* Specify pre_vectors to ensure that the queues before the
937 	 * request queues (e.g. hiprio) don't claim any of the CPUs in
938 	 * the multi-queue mapping and interrupt affinities
939 	 */
940 	struct irq_affinity desc = { .pre_vectors = VQ_REQUEST };
941 	unsigned int i;
942 	int ret = 0;
943 
944 	virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
945 			&fs->num_request_queues);
946 	if (fs->num_request_queues == 0)
947 		return -EINVAL;
948 
949 	/* Truncate nr of request queues to nr_cpu_id */
950 	fs->num_request_queues = min_t(unsigned int, fs->num_request_queues,
951 					nr_cpu_ids);
952 	fs->nvqs = VQ_REQUEST + fs->num_request_queues;
953 	fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
954 	if (!fs->vqs)
955 		return -ENOMEM;
956 
957 	vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
958 	fs->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*fs->mq_map), GFP_KERNEL,
959 					dev_to_node(&vdev->dev));
960 	vqs_info = kcalloc(fs->nvqs, sizeof(*vqs_info), GFP_KERNEL);
961 	if (!vqs || !vqs_info || !fs->mq_map) {
962 		ret = -ENOMEM;
963 		goto out;
964 	}
965 
966 	/* Initialize the hiprio/forget request virtqueue */
967 	vqs_info[VQ_HIPRIO].callback = virtio_fs_vq_done;
968 	virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
969 	vqs_info[VQ_HIPRIO].name = fs->vqs[VQ_HIPRIO].name;
970 
971 	/* Initialize the requests virtqueues */
972 	for (i = VQ_REQUEST; i < fs->nvqs; i++) {
973 		char vq_name[VQ_NAME_LEN];
974 
975 		snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
976 		virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
977 		vqs_info[i].callback = virtio_fs_vq_done;
978 		vqs_info[i].name = fs->vqs[i].name;
979 	}
980 
981 	ret = virtio_find_vqs(vdev, fs->nvqs, vqs, vqs_info, &desc);
982 	if (ret < 0)
983 		goto out;
984 
985 	for (i = 0; i < fs->nvqs; i++)
986 		fs->vqs[i].vq = vqs[i];
987 
988 	virtio_fs_start_all_queues(fs);
989 out:
990 	kfree(vqs_info);
991 	kfree(vqs);
992 	if (ret) {
993 		kfree(fs->vqs);
994 		kfree(fs->mq_map);
995 	}
996 	return ret;
997 }
998 
999 /* Free virtqueues (device must already be reset) */
1000 static void virtio_fs_cleanup_vqs(struct virtio_device *vdev)
1001 {
1002 	vdev->config->del_vqs(vdev);
1003 }
1004 
1005 /* Map a window offset to a page frame number.  The window offset will have
1006  * been produced by .iomap_begin(), which maps a file offset to a window
1007  * offset.
1008  */
1009 static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1010 				    long nr_pages, enum dax_access_mode mode,
1011 				    void **kaddr, pfn_t *pfn)
1012 {
1013 	struct virtio_fs *fs = dax_get_private(dax_dev);
1014 	phys_addr_t offset = PFN_PHYS(pgoff);
1015 	size_t max_nr_pages = fs->window_len / PAGE_SIZE - pgoff;
1016 
1017 	if (kaddr)
1018 		*kaddr = fs->window_kaddr + offset;
1019 	if (pfn)
1020 		*pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
1021 					PFN_DEV | PFN_MAP);
1022 	return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
1023 }
1024 
1025 static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
1026 				     pgoff_t pgoff, size_t nr_pages)
1027 {
1028 	long rc;
1029 	void *kaddr;
1030 
1031 	rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, &kaddr,
1032 			       NULL);
1033 	if (rc < 0)
1034 		return dax_mem2blk_err(rc);
1035 
1036 	memset(kaddr, 0, nr_pages << PAGE_SHIFT);
1037 	dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
1038 	return 0;
1039 }
1040 
1041 static const struct dax_operations virtio_fs_dax_ops = {
1042 	.direct_access = virtio_fs_direct_access,
1043 	.zero_page_range = virtio_fs_zero_page_range,
1044 };
1045 
1046 static void virtio_fs_cleanup_dax(void *data)
1047 {
1048 	struct dax_device *dax_dev = data;
1049 
1050 	kill_dax(dax_dev);
1051 	put_dax(dax_dev);
1052 }
1053 
1054 DEFINE_FREE(cleanup_dax, struct dax_dev *, if (!IS_ERR_OR_NULL(_T)) virtio_fs_cleanup_dax(_T))
1055 
1056 static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
1057 {
1058 	struct dax_device *dax_dev __free(cleanup_dax) = NULL;
1059 	struct virtio_shm_region cache_reg;
1060 	struct dev_pagemap *pgmap;
1061 	bool have_cache;
1062 
1063 	if (!IS_ENABLED(CONFIG_FUSE_DAX))
1064 		return 0;
1065 
1066 	dax_dev = alloc_dax(fs, &virtio_fs_dax_ops);
1067 	if (IS_ERR(dax_dev)) {
1068 		int rc = PTR_ERR(dax_dev);
1069 		return rc == -EOPNOTSUPP ? 0 : rc;
1070 	}
1071 
1072 	/* Get cache region */
1073 	have_cache = virtio_get_shm_region(vdev, &cache_reg,
1074 					   (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
1075 	if (!have_cache) {
1076 		dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
1077 		return 0;
1078 	}
1079 
1080 	if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
1081 				     dev_name(&vdev->dev))) {
1082 		dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
1083 			 cache_reg.addr, cache_reg.len);
1084 		return -EBUSY;
1085 	}
1086 
1087 	dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
1088 		   cache_reg.addr);
1089 
1090 	pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
1091 	if (!pgmap)
1092 		return -ENOMEM;
1093 
1094 	pgmap->type = MEMORY_DEVICE_FS_DAX;
1095 
1096 	/* Ideally we would directly use the PCI BAR resource but
1097 	 * devm_memremap_pages() wants its own copy in pgmap.  So
1098 	 * initialize a struct resource from scratch (only the start
1099 	 * and end fields will be used).
1100 	 */
1101 	pgmap->range = (struct range) {
1102 		.start = (phys_addr_t) cache_reg.addr,
1103 		.end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
1104 	};
1105 	pgmap->nr_range = 1;
1106 
1107 	fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
1108 	if (IS_ERR(fs->window_kaddr))
1109 		return PTR_ERR(fs->window_kaddr);
1110 
1111 	fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
1112 	fs->window_len = (phys_addr_t) cache_reg.len;
1113 
1114 	dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
1115 		__func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
1116 
1117 	fs->dax_dev = no_free_ptr(dax_dev);
1118 	return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
1119 					fs->dax_dev);
1120 }
1121 
1122 static int virtio_fs_probe(struct virtio_device *vdev)
1123 {
1124 	struct virtio_fs *fs;
1125 	int ret;
1126 
1127 	fs = kzalloc(sizeof(*fs), GFP_KERNEL);
1128 	if (!fs)
1129 		return -ENOMEM;
1130 	kobject_init(&fs->kobj, &virtio_fs_ktype);
1131 	vdev->priv = fs;
1132 
1133 	ret = virtio_fs_read_tag(vdev, fs);
1134 	if (ret < 0)
1135 		goto out;
1136 
1137 	ret = virtio_fs_setup_vqs(vdev, fs);
1138 	if (ret < 0)
1139 		goto out;
1140 
1141 	virtio_fs_map_queues(vdev, fs);
1142 
1143 	ret = virtio_fs_setup_dax(vdev, fs);
1144 	if (ret < 0)
1145 		goto out_vqs;
1146 
1147 	/* Bring the device online in case the filesystem is mounted and
1148 	 * requests need to be sent before we return.
1149 	 */
1150 	virtio_device_ready(vdev);
1151 
1152 	ret = virtio_fs_add_instance(vdev, fs);
1153 	if (ret < 0)
1154 		goto out_vqs;
1155 
1156 	return 0;
1157 
1158 out_vqs:
1159 	virtio_reset_device(vdev);
1160 	virtio_fs_cleanup_vqs(vdev);
1161 
1162 out:
1163 	vdev->priv = NULL;
1164 	kobject_put(&fs->kobj);
1165 	return ret;
1166 }
1167 
1168 static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
1169 {
1170 	struct virtio_fs_vq *fsvq;
1171 	int i;
1172 
1173 	for (i = 0; i < fs->nvqs; i++) {
1174 		fsvq = &fs->vqs[i];
1175 		spin_lock(&fsvq->lock);
1176 		fsvq->connected = false;
1177 		spin_unlock(&fsvq->lock);
1178 	}
1179 }
1180 
1181 static void virtio_fs_remove(struct virtio_device *vdev)
1182 {
1183 	struct virtio_fs *fs = vdev->priv;
1184 
1185 	mutex_lock(&virtio_fs_mutex);
1186 	/* This device is going away. No one should get new reference */
1187 	list_del_init(&fs->list);
1188 	virtio_fs_delete_queues_sysfs(fs);
1189 	sysfs_remove_link(&fs->kobj, "device");
1190 	kobject_put(fs->mqs_kobj);
1191 	kobject_del(&fs->kobj);
1192 	virtio_fs_stop_all_queues(fs);
1193 	virtio_fs_drain_all_queues_locked(fs);
1194 	virtio_reset_device(vdev);
1195 	virtio_fs_cleanup_vqs(vdev);
1196 
1197 	vdev->priv = NULL;
1198 	/* Put device reference on virtio_fs object */
1199 	virtio_fs_put_locked(fs);
1200 	mutex_unlock(&virtio_fs_mutex);
1201 }
1202 
1203 #ifdef CONFIG_PM_SLEEP
1204 static int virtio_fs_freeze(struct virtio_device *vdev)
1205 {
1206 	/* TODO need to save state here */
1207 	pr_warn("virtio-fs: suspend/resume not yet supported\n");
1208 	return -EOPNOTSUPP;
1209 }
1210 
1211 static int virtio_fs_restore(struct virtio_device *vdev)
1212 {
1213 	 /* TODO need to restore state here */
1214 	return 0;
1215 }
1216 #endif /* CONFIG_PM_SLEEP */
1217 
1218 static const struct virtio_device_id id_table[] = {
1219 	{ VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
1220 	{},
1221 };
1222 
1223 static const unsigned int feature_table[] = {};
1224 
1225 static struct virtio_driver virtio_fs_driver = {
1226 	.driver.name		= KBUILD_MODNAME,
1227 	.id_table		= id_table,
1228 	.feature_table		= feature_table,
1229 	.feature_table_size	= ARRAY_SIZE(feature_table),
1230 	.probe			= virtio_fs_probe,
1231 	.remove			= virtio_fs_remove,
1232 #ifdef CONFIG_PM_SLEEP
1233 	.freeze			= virtio_fs_freeze,
1234 	.restore		= virtio_fs_restore,
1235 #endif
1236 };
1237 
1238 static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link)
1239 {
1240 	struct virtio_fs_forget *forget;
1241 	struct virtio_fs_forget_req *req;
1242 	struct virtio_fs *fs = fiq->priv;
1243 	struct virtio_fs_vq *fsvq = &fs->vqs[VQ_HIPRIO];
1244 	u64 unique = fuse_get_unique(fiq);
1245 
1246 	/* Allocate a buffer for the request */
1247 	forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
1248 	req = &forget->req;
1249 
1250 	req->ih = (struct fuse_in_header){
1251 		.opcode = FUSE_FORGET,
1252 		.nodeid = link->forget_one.nodeid,
1253 		.unique = unique,
1254 		.len = sizeof(*req),
1255 	};
1256 	req->arg = (struct fuse_forget_in){
1257 		.nlookup = link->forget_one.nlookup,
1258 	};
1259 
1260 	send_forget_request(fsvq, forget, false);
1261 	kfree(link);
1262 }
1263 
1264 static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
1265 {
1266 	/*
1267 	 * TODO interrupts.
1268 	 *
1269 	 * Normal fs operations on a local filesystems aren't interruptible.
1270 	 * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
1271 	 * with shared lock between host and guest.
1272 	 */
1273 }
1274 
1275 /* Count number of scatter-gather elements required */
1276 static unsigned int sg_count_fuse_folios(struct fuse_folio_desc *folio_descs,
1277 					 unsigned int num_folios,
1278 					 unsigned int total_len)
1279 {
1280 	unsigned int i;
1281 	unsigned int this_len;
1282 
1283 	for (i = 0; i < num_folios && total_len; i++) {
1284 		this_len =  min(folio_descs[i].length, total_len);
1285 		total_len -= this_len;
1286 	}
1287 
1288 	return i;
1289 }
1290 
1291 /* Return the number of scatter-gather list elements required */
1292 static unsigned int sg_count_fuse_req(struct fuse_req *req)
1293 {
1294 	struct fuse_args *args = req->args;
1295 	struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
1296 	unsigned int size, total_sgs = 1 /* fuse_in_header */;
1297 
1298 	if (args->in_numargs - args->in_pages)
1299 		total_sgs += 1;
1300 
1301 	if (args->in_pages) {
1302 		size = args->in_args[args->in_numargs - 1].size;
1303 		total_sgs += sg_count_fuse_folios(ap->descs, ap->num_folios,
1304 						  size);
1305 	}
1306 
1307 	if (!test_bit(FR_ISREPLY, &req->flags))
1308 		return total_sgs;
1309 
1310 	total_sgs += 1 /* fuse_out_header */;
1311 
1312 	if (args->out_numargs - args->out_pages)
1313 		total_sgs += 1;
1314 
1315 	if (args->out_pages) {
1316 		size = args->out_args[args->out_numargs - 1].size;
1317 		total_sgs += sg_count_fuse_folios(ap->descs, ap->num_folios,
1318 						  size);
1319 	}
1320 
1321 	return total_sgs;
1322 }
1323 
1324 /* Add folios to scatter-gather list and return number of elements used */
1325 static unsigned int sg_init_fuse_folios(struct scatterlist *sg,
1326 					struct folio **folios,
1327 					struct fuse_folio_desc *folio_descs,
1328 					unsigned int num_folios,
1329 				        unsigned int total_len)
1330 {
1331 	unsigned int i;
1332 	unsigned int this_len;
1333 
1334 	for (i = 0; i < num_folios && total_len; i++) {
1335 		sg_init_table(&sg[i], 1);
1336 		this_len =  min(folio_descs[i].length, total_len);
1337 		sg_set_folio(&sg[i], folios[i], this_len, folio_descs[i].offset);
1338 		total_len -= this_len;
1339 	}
1340 
1341 	return i;
1342 }
1343 
1344 /* Add args to scatter-gather list and return number of elements used */
1345 static unsigned int sg_init_fuse_args(struct scatterlist *sg,
1346 				      struct fuse_req *req,
1347 				      struct fuse_arg *args,
1348 				      unsigned int numargs,
1349 				      bool argpages,
1350 				      void *argbuf,
1351 				      unsigned int *len_used)
1352 {
1353 	struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1354 	unsigned int total_sgs = 0;
1355 	unsigned int len;
1356 
1357 	len = fuse_len_args(numargs - argpages, args);
1358 	if (len)
1359 		sg_init_one(&sg[total_sgs++], argbuf, len);
1360 
1361 	if (argpages)
1362 		total_sgs += sg_init_fuse_folios(&sg[total_sgs],
1363 						 ap->folios, ap->descs,
1364 						 ap->num_folios,
1365 						 args[numargs - 1].size);
1366 
1367 	if (len_used)
1368 		*len_used = len;
1369 
1370 	return total_sgs;
1371 }
1372 
1373 /* Add a request to a virtqueue and kick the device */
1374 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
1375 				 struct fuse_req *req, bool in_flight,
1376 				 gfp_t gfp)
1377 {
1378 	/* requests need at least 4 elements */
1379 	struct scatterlist *stack_sgs[6];
1380 	struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
1381 	struct scatterlist **sgs = stack_sgs;
1382 	struct scatterlist *sg = stack_sg;
1383 	struct virtqueue *vq;
1384 	struct fuse_args *args = req->args;
1385 	unsigned int argbuf_used = 0;
1386 	unsigned int out_sgs = 0;
1387 	unsigned int in_sgs = 0;
1388 	unsigned int total_sgs;
1389 	unsigned int i;
1390 	int ret;
1391 	bool notify;
1392 	struct fuse_pqueue *fpq;
1393 
1394 	/* Does the sglist fit on the stack? */
1395 	total_sgs = sg_count_fuse_req(req);
1396 	if (total_sgs > ARRAY_SIZE(stack_sgs)) {
1397 		sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), gfp);
1398 		sg = kmalloc_array(total_sgs, sizeof(sg[0]), gfp);
1399 		if (!sgs || !sg) {
1400 			ret = -ENOMEM;
1401 			goto out;
1402 		}
1403 	}
1404 
1405 	/* Use a bounce buffer since stack args cannot be mapped */
1406 	ret = copy_args_to_argbuf(req, gfp);
1407 	if (ret < 0)
1408 		goto out;
1409 
1410 	/* Request elements */
1411 	sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
1412 	out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
1413 				     (struct fuse_arg *)args->in_args,
1414 				     args->in_numargs, args->in_pages,
1415 				     req->argbuf, &argbuf_used);
1416 
1417 	/* Reply elements */
1418 	if (test_bit(FR_ISREPLY, &req->flags)) {
1419 		sg_init_one(&sg[out_sgs + in_sgs++],
1420 			    &req->out.h, sizeof(req->out.h));
1421 		in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
1422 					    args->out_args, args->out_numargs,
1423 					    args->out_pages,
1424 					    req->argbuf + argbuf_used, NULL);
1425 	}
1426 
1427 	WARN_ON(out_sgs + in_sgs != total_sgs);
1428 
1429 	for (i = 0; i < total_sgs; i++)
1430 		sgs[i] = &sg[i];
1431 
1432 	spin_lock(&fsvq->lock);
1433 
1434 	if (!fsvq->connected) {
1435 		spin_unlock(&fsvq->lock);
1436 		ret = -ENOTCONN;
1437 		goto out;
1438 	}
1439 
1440 	vq = fsvq->vq;
1441 	ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
1442 	if (ret < 0) {
1443 		spin_unlock(&fsvq->lock);
1444 		goto out;
1445 	}
1446 
1447 	/* Request successfully sent. */
1448 	fpq = &fsvq->fud->pq;
1449 	spin_lock(&fpq->lock);
1450 	list_add_tail(&req->list, fpq->processing);
1451 	spin_unlock(&fpq->lock);
1452 	set_bit(FR_SENT, &req->flags);
1453 	/* matches barrier in request_wait_answer() */
1454 	smp_mb__after_atomic();
1455 
1456 	if (!in_flight)
1457 		inc_in_flight_req(fsvq);
1458 	notify = virtqueue_kick_prepare(vq);
1459 
1460 	spin_unlock(&fsvq->lock);
1461 
1462 	if (notify)
1463 		virtqueue_notify(vq);
1464 
1465 out:
1466 	if (ret < 0 && req->argbuf) {
1467 		kfree(req->argbuf);
1468 		req->argbuf = NULL;
1469 	}
1470 	if (sgs != stack_sgs) {
1471 		kfree(sgs);
1472 		kfree(sg);
1473 	}
1474 
1475 	return ret;
1476 }
1477 
1478 static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
1479 {
1480 	unsigned int queue_id;
1481 	struct virtio_fs *fs;
1482 	struct virtio_fs_vq *fsvq;
1483 	int ret;
1484 
1485 	if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
1486 		req->in.h.unique = fuse_get_unique(fiq);
1487 
1488 	clear_bit(FR_PENDING, &req->flags);
1489 
1490 	fs = fiq->priv;
1491 	queue_id = fs->mq_map[raw_smp_processor_id()];
1492 
1493 	pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n",
1494 		 __func__, req->in.h.opcode, req->in.h.unique,
1495 		 req->in.h.nodeid, req->in.h.len,
1496 		 fuse_len_args(req->args->out_numargs, req->args->out_args),
1497 		 queue_id);
1498 
1499 	fsvq = &fs->vqs[queue_id];
1500 	ret = virtio_fs_enqueue_req(fsvq, req, false, GFP_ATOMIC);
1501 	if (ret < 0) {
1502 		if (ret == -ENOSPC) {
1503 			/*
1504 			 * Virtqueue full. Retry submission from worker
1505 			 * context as we might be holding fc->bg_lock.
1506 			 */
1507 			spin_lock(&fsvq->lock);
1508 			list_add_tail(&req->list, &fsvq->queued_reqs);
1509 			inc_in_flight_req(fsvq);
1510 			spin_unlock(&fsvq->lock);
1511 			return;
1512 		}
1513 		req->out.h.error = ret;
1514 		pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
1515 
1516 		/* Can't end request in submission context. Use a worker */
1517 		spin_lock(&fsvq->lock);
1518 		list_add_tail(&req->list, &fsvq->end_reqs);
1519 		schedule_work(&fsvq->dispatch_work);
1520 		spin_unlock(&fsvq->lock);
1521 		return;
1522 	}
1523 }
1524 
1525 static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
1526 	.send_forget	= virtio_fs_send_forget,
1527 	.send_interrupt	= virtio_fs_send_interrupt,
1528 	.send_req	= virtio_fs_send_req,
1529 	.release	= virtio_fs_fiq_release,
1530 };
1531 
1532 static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
1533 {
1534 	ctx->rootmode = S_IFDIR;
1535 	ctx->default_permissions = 1;
1536 	ctx->allow_other = 1;
1537 	ctx->max_read = UINT_MAX;
1538 	ctx->blksize = 512;
1539 	ctx->destroy = true;
1540 	ctx->no_control = true;
1541 	ctx->no_force_umount = true;
1542 }
1543 
1544 static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
1545 {
1546 	struct fuse_mount *fm = get_fuse_mount_super(sb);
1547 	struct fuse_conn *fc = fm->fc;
1548 	struct virtio_fs *fs = fc->iq.priv;
1549 	struct fuse_fs_context *ctx = fsc->fs_private;
1550 	unsigned int i;
1551 	int err;
1552 
1553 	virtio_fs_ctx_set_defaults(ctx);
1554 	mutex_lock(&virtio_fs_mutex);
1555 
1556 	/* After holding mutex, make sure virtiofs device is still there.
1557 	 * Though we are holding a reference to it, drive ->remove might
1558 	 * still have cleaned up virtual queues. In that case bail out.
1559 	 */
1560 	err = -EINVAL;
1561 	if (list_empty(&fs->list)) {
1562 		pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
1563 		goto err;
1564 	}
1565 
1566 	err = -ENOMEM;
1567 	/* Allocate fuse_dev for hiprio and notification queues */
1568 	for (i = 0; i < fs->nvqs; i++) {
1569 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
1570 
1571 		fsvq->fud = fuse_dev_alloc();
1572 		if (!fsvq->fud)
1573 			goto err_free_fuse_devs;
1574 	}
1575 
1576 	/* virtiofs allocates and installs its own fuse devices */
1577 	ctx->fudptr = NULL;
1578 	if (ctx->dax_mode != FUSE_DAX_NEVER) {
1579 		if (ctx->dax_mode == FUSE_DAX_ALWAYS && !fs->dax_dev) {
1580 			err = -EINVAL;
1581 			pr_err("virtio-fs: dax can't be enabled as filesystem"
1582 			       " device does not support it.\n");
1583 			goto err_free_fuse_devs;
1584 		}
1585 		ctx->dax_dev = fs->dax_dev;
1586 	}
1587 	err = fuse_fill_super_common(sb, ctx);
1588 	if (err < 0)
1589 		goto err_free_fuse_devs;
1590 
1591 	for (i = 0; i < fs->nvqs; i++) {
1592 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
1593 
1594 		fuse_dev_install(fsvq->fud, fc);
1595 	}
1596 
1597 	/* Previous unmount will stop all queues. Start these again */
1598 	virtio_fs_start_all_queues(fs);
1599 	fuse_send_init(fm);
1600 	mutex_unlock(&virtio_fs_mutex);
1601 	return 0;
1602 
1603 err_free_fuse_devs:
1604 	virtio_fs_free_devs(fs);
1605 err:
1606 	mutex_unlock(&virtio_fs_mutex);
1607 	return err;
1608 }
1609 
1610 static void virtio_fs_conn_destroy(struct fuse_mount *fm)
1611 {
1612 	struct fuse_conn *fc = fm->fc;
1613 	struct virtio_fs *vfs = fc->iq.priv;
1614 	struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
1615 
1616 	/* Stop dax worker. Soon evict_inodes() will be called which
1617 	 * will free all memory ranges belonging to all inodes.
1618 	 */
1619 	if (IS_ENABLED(CONFIG_FUSE_DAX))
1620 		fuse_dax_cancel_work(fc);
1621 
1622 	/* Stop forget queue. Soon destroy will be sent */
1623 	spin_lock(&fsvq->lock);
1624 	fsvq->connected = false;
1625 	spin_unlock(&fsvq->lock);
1626 	virtio_fs_drain_all_queues(vfs);
1627 
1628 	fuse_conn_destroy(fm);
1629 
1630 	/* fuse_conn_destroy() must have sent destroy. Stop all queues
1631 	 * and drain one more time and free fuse devices. Freeing fuse
1632 	 * devices will drop their reference on fuse_conn and that in
1633 	 * turn will drop its reference on virtio_fs object.
1634 	 */
1635 	virtio_fs_stop_all_queues(vfs);
1636 	virtio_fs_drain_all_queues(vfs);
1637 	virtio_fs_free_devs(vfs);
1638 }
1639 
1640 static void virtio_kill_sb(struct super_block *sb)
1641 {
1642 	struct fuse_mount *fm = get_fuse_mount_super(sb);
1643 	bool last;
1644 
1645 	/* If mount failed, we can still be called without any fc */
1646 	if (sb->s_root) {
1647 		last = fuse_mount_remove(fm);
1648 		if (last)
1649 			virtio_fs_conn_destroy(fm);
1650 	}
1651 	kill_anon_super(sb);
1652 	fuse_mount_destroy(fm);
1653 }
1654 
1655 static int virtio_fs_test_super(struct super_block *sb,
1656 				struct fs_context *fsc)
1657 {
1658 	struct fuse_mount *fsc_fm = fsc->s_fs_info;
1659 	struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
1660 
1661 	return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
1662 }
1663 
1664 static int virtio_fs_get_tree(struct fs_context *fsc)
1665 {
1666 	struct virtio_fs *fs;
1667 	struct super_block *sb;
1668 	struct fuse_conn *fc = NULL;
1669 	struct fuse_mount *fm;
1670 	unsigned int virtqueue_size;
1671 	int err = -EIO;
1672 
1673 	/* This gets a reference on virtio_fs object. This ptr gets installed
1674 	 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
1675 	 * to drop the reference to this object.
1676 	 */
1677 	fs = virtio_fs_find_instance(fsc->source);
1678 	if (!fs) {
1679 		pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
1680 		return -EINVAL;
1681 	}
1682 
1683 	virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq);
1684 	if (WARN_ON(virtqueue_size <= FUSE_HEADER_OVERHEAD))
1685 		goto out_err;
1686 
1687 	err = -ENOMEM;
1688 	fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
1689 	if (!fc)
1690 		goto out_err;
1691 
1692 	fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
1693 	if (!fm)
1694 		goto out_err;
1695 
1696 	fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
1697 	fc->release = fuse_free_conn;
1698 	fc->delete_stale = true;
1699 	fc->auto_submounts = true;
1700 	fc->sync_fs = true;
1701 	fc->use_pages_for_kvec_io = true;
1702 
1703 	/* Tell FUSE to split requests that exceed the virtqueue's size */
1704 	fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
1705 				    virtqueue_size - FUSE_HEADER_OVERHEAD);
1706 
1707 	fsc->s_fs_info = fm;
1708 	sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
1709 	if (fsc->s_fs_info)
1710 		fuse_mount_destroy(fm);
1711 	if (IS_ERR(sb))
1712 		return PTR_ERR(sb);
1713 
1714 	if (!sb->s_root) {
1715 		err = virtio_fs_fill_super(sb, fsc);
1716 		if (err) {
1717 			deactivate_locked_super(sb);
1718 			return err;
1719 		}
1720 
1721 		sb->s_flags |= SB_ACTIVE;
1722 	}
1723 
1724 	WARN_ON(fsc->root);
1725 	fsc->root = dget(sb->s_root);
1726 	return 0;
1727 
1728 out_err:
1729 	kfree(fc);
1730 	virtio_fs_put(fs);
1731 	return err;
1732 }
1733 
1734 static const struct fs_context_operations virtio_fs_context_ops = {
1735 	.free		= virtio_fs_free_fsc,
1736 	.parse_param	= virtio_fs_parse_param,
1737 	.get_tree	= virtio_fs_get_tree,
1738 };
1739 
1740 static int virtio_fs_init_fs_context(struct fs_context *fsc)
1741 {
1742 	struct fuse_fs_context *ctx;
1743 
1744 	if (fsc->purpose == FS_CONTEXT_FOR_SUBMOUNT)
1745 		return fuse_init_fs_context_submount(fsc);
1746 
1747 	ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
1748 	if (!ctx)
1749 		return -ENOMEM;
1750 	fsc->fs_private = ctx;
1751 	fsc->ops = &virtio_fs_context_ops;
1752 	return 0;
1753 }
1754 
1755 static struct file_system_type virtio_fs_type = {
1756 	.owner		= THIS_MODULE,
1757 	.name		= "virtiofs",
1758 	.init_fs_context = virtio_fs_init_fs_context,
1759 	.kill_sb	= virtio_kill_sb,
1760 	.fs_flags	= FS_ALLOW_IDMAP,
1761 };
1762 
1763 static int virtio_fs_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
1764 {
1765 	const struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj);
1766 
1767 	add_uevent_var(env, "TAG=%s", fs->tag);
1768 	return 0;
1769 }
1770 
1771 static const struct kset_uevent_ops virtio_fs_uevent_ops = {
1772 	.uevent = virtio_fs_uevent,
1773 };
1774 
1775 static int __init virtio_fs_sysfs_init(void)
1776 {
1777 	virtio_fs_kset = kset_create_and_add("virtiofs", &virtio_fs_uevent_ops,
1778 					     fs_kobj);
1779 	if (!virtio_fs_kset)
1780 		return -ENOMEM;
1781 	return 0;
1782 }
1783 
1784 static void virtio_fs_sysfs_exit(void)
1785 {
1786 	kset_unregister(virtio_fs_kset);
1787 	virtio_fs_kset = NULL;
1788 }
1789 
1790 static int __init virtio_fs_init(void)
1791 {
1792 	int ret;
1793 
1794 	ret = virtio_fs_sysfs_init();
1795 	if (ret < 0)
1796 		return ret;
1797 
1798 	ret = register_virtio_driver(&virtio_fs_driver);
1799 	if (ret < 0)
1800 		goto sysfs_exit;
1801 
1802 	ret = register_filesystem(&virtio_fs_type);
1803 	if (ret < 0)
1804 		goto unregister_virtio_driver;
1805 
1806 	return 0;
1807 
1808 unregister_virtio_driver:
1809 	unregister_virtio_driver(&virtio_fs_driver);
1810 sysfs_exit:
1811 	virtio_fs_sysfs_exit();
1812 	return ret;
1813 }
1814 module_init(virtio_fs_init);
1815 
1816 static void __exit virtio_fs_exit(void)
1817 {
1818 	unregister_filesystem(&virtio_fs_type);
1819 	unregister_virtio_driver(&virtio_fs_driver);
1820 	virtio_fs_sysfs_exit();
1821 }
1822 module_exit(virtio_fs_exit);
1823 
1824 MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
1825 MODULE_DESCRIPTION("Virtio Filesystem");
1826 MODULE_LICENSE("GPL");
1827 MODULE_ALIAS_FS(KBUILD_MODNAME);
1828 MODULE_DEVICE_TABLE(virtio, id_table);
1829