xref: /linux/fs/fuse/virtio_fs.c (revision 9ad8d22f2f3fad7a366c9772362795ef6d6a2d51)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * virtio-fs: Virtio Filesystem
4  * Copyright (C) 2018 Red Hat, Inc.
5  */
6 
7 #include <linux/fs.h>
8 #include <linux/dax.h>
9 #include <linux/pci.h>
10 #include <linux/interrupt.h>
11 #include <linux/group_cpus.h>
12 #include <linux/pfn_t.h>
13 #include <linux/memremap.h>
14 #include <linux/module.h>
15 #include <linux/virtio.h>
16 #include <linux/virtio_fs.h>
17 #include <linux/delay.h>
18 #include <linux/fs_context.h>
19 #include <linux/fs_parser.h>
20 #include <linux/highmem.h>
21 #include <linux/cleanup.h>
22 #include <linux/uio.h>
23 #include "fuse_i.h"
24 
25 /* Used to help calculate the FUSE connection's max_pages limit for a request's
26  * size. Parts of the struct fuse_req are sliced into scattergather lists in
27  * addition to the pages used, so this can help account for that overhead.
28  */
29 #define FUSE_HEADER_OVERHEAD    4
30 
31 /* List of virtio-fs device instances and a lock for the list. Also provides
32  * mutual exclusion in device removal and mounting path
33  */
34 static DEFINE_MUTEX(virtio_fs_mutex);
35 static LIST_HEAD(virtio_fs_instances);
36 
37 /* The /sys/fs/virtio_fs/ kset */
38 static struct kset *virtio_fs_kset;
39 
40 enum {
41 	VQ_HIPRIO,
42 	VQ_REQUEST
43 };
44 
45 #define VQ_NAME_LEN	24
46 
47 /* Per-virtqueue state */
48 struct virtio_fs_vq {
49 	spinlock_t lock;
50 	struct virtqueue *vq;     /* protected by ->lock */
51 	struct work_struct done_work;
52 	struct list_head queued_reqs;
53 	struct list_head end_reqs;	/* End these requests */
54 	struct work_struct dispatch_work;
55 	struct fuse_dev *fud;
56 	bool connected;
57 	long in_flight;
58 	struct completion in_flight_zero; /* No inflight requests */
59 	struct kobject *kobj;
60 	char name[VQ_NAME_LEN];
61 } ____cacheline_aligned_in_smp;
62 
63 /* A virtio-fs device instance */
64 struct virtio_fs {
65 	struct kobject kobj;
66 	struct kobject *mqs_kobj;
67 	struct list_head list;    /* on virtio_fs_instances */
68 	char *tag;
69 	struct virtio_fs_vq *vqs;
70 	unsigned int nvqs;               /* number of virtqueues */
71 	unsigned int num_request_queues; /* number of request queues */
72 	struct dax_device *dax_dev;
73 
74 	unsigned int *mq_map; /* index = cpu id, value = request vq id */
75 
76 	/* DAX memory window where file contents are mapped */
77 	void *window_kaddr;
78 	phys_addr_t window_phys_addr;
79 	size_t window_len;
80 };
81 
82 struct virtio_fs_forget_req {
83 	struct fuse_in_header ih;
84 	struct fuse_forget_in arg;
85 };
86 
87 struct virtio_fs_forget {
88 	/* This request can be temporarily queued on virt queue */
89 	struct list_head list;
90 	struct virtio_fs_forget_req req;
91 };
92 
93 struct virtio_fs_req_work {
94 	struct fuse_req *req;
95 	struct virtio_fs_vq *fsvq;
96 	struct work_struct done_work;
97 };
98 
99 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
100 				 struct fuse_req *req, bool in_flight,
101 				 gfp_t gfp);
102 
103 static const struct constant_table dax_param_enums[] = {
104 	{"always",	FUSE_DAX_ALWAYS },
105 	{"never",	FUSE_DAX_NEVER },
106 	{"inode",	FUSE_DAX_INODE_USER },
107 	{}
108 };
109 
110 enum {
111 	OPT_DAX,
112 	OPT_DAX_ENUM,
113 };
114 
115 static const struct fs_parameter_spec virtio_fs_parameters[] = {
116 	fsparam_flag("dax", OPT_DAX),
117 	fsparam_enum("dax", OPT_DAX_ENUM, dax_param_enums),
118 	{}
119 };
120 
121 static int virtio_fs_parse_param(struct fs_context *fsc,
122 				 struct fs_parameter *param)
123 {
124 	struct fs_parse_result result;
125 	struct fuse_fs_context *ctx = fsc->fs_private;
126 	int opt;
127 
128 	opt = fs_parse(fsc, virtio_fs_parameters, param, &result);
129 	if (opt < 0)
130 		return opt;
131 
132 	switch (opt) {
133 	case OPT_DAX:
134 		ctx->dax_mode = FUSE_DAX_ALWAYS;
135 		break;
136 	case OPT_DAX_ENUM:
137 		ctx->dax_mode = result.uint_32;
138 		break;
139 	default:
140 		return -EINVAL;
141 	}
142 
143 	return 0;
144 }
145 
146 static void virtio_fs_free_fsc(struct fs_context *fsc)
147 {
148 	struct fuse_fs_context *ctx = fsc->fs_private;
149 
150 	kfree(ctx);
151 }
152 
153 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
154 {
155 	struct virtio_fs *fs = vq->vdev->priv;
156 
157 	return &fs->vqs[vq->index];
158 }
159 
160 /* Should be called with fsvq->lock held. */
161 static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
162 {
163 	fsvq->in_flight++;
164 }
165 
166 /* Should be called with fsvq->lock held. */
167 static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
168 {
169 	WARN_ON(fsvq->in_flight <= 0);
170 	fsvq->in_flight--;
171 	if (!fsvq->in_flight)
172 		complete(&fsvq->in_flight_zero);
173 }
174 
175 static ssize_t tag_show(struct kobject *kobj,
176 		struct kobj_attribute *attr, char *buf)
177 {
178 	struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj);
179 
180 	return sysfs_emit(buf, "%s\n", fs->tag);
181 }
182 
183 static struct kobj_attribute virtio_fs_tag_attr = __ATTR_RO(tag);
184 
185 static struct attribute *virtio_fs_attrs[] = {
186 	&virtio_fs_tag_attr.attr,
187 	NULL
188 };
189 ATTRIBUTE_GROUPS(virtio_fs);
190 
191 static void virtio_fs_ktype_release(struct kobject *kobj)
192 {
193 	struct virtio_fs *vfs = container_of(kobj, struct virtio_fs, kobj);
194 
195 	kfree(vfs->mq_map);
196 	kfree(vfs->vqs);
197 	kfree(vfs);
198 }
199 
200 static const struct kobj_type virtio_fs_ktype = {
201 	.release = virtio_fs_ktype_release,
202 	.sysfs_ops = &kobj_sysfs_ops,
203 	.default_groups = virtio_fs_groups,
204 };
205 
206 static struct virtio_fs_vq *virtio_fs_kobj_to_vq(struct virtio_fs *fs,
207 		struct kobject *kobj)
208 {
209 	int i;
210 
211 	for (i = 0; i < fs->nvqs; i++) {
212 		if (kobj == fs->vqs[i].kobj)
213 			return &fs->vqs[i];
214 	}
215 	return NULL;
216 }
217 
218 static ssize_t name_show(struct kobject *kobj,
219 		struct kobj_attribute *attr, char *buf)
220 {
221 	struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
222 	struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
223 
224 	if (!fsvq)
225 		return -EINVAL;
226 	return sysfs_emit(buf, "%s\n", fsvq->name);
227 }
228 
229 static struct kobj_attribute virtio_fs_vq_name_attr = __ATTR_RO(name);
230 
231 static ssize_t cpu_list_show(struct kobject *kobj,
232 		struct kobj_attribute *attr, char *buf)
233 {
234 	struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
235 	struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
236 	unsigned int cpu, qid;
237 	const size_t size = PAGE_SIZE - 1;
238 	bool first = true;
239 	int ret = 0, pos = 0;
240 
241 	if (!fsvq)
242 		return -EINVAL;
243 
244 	qid = fsvq->vq->index;
245 	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
246 		if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid - VQ_REQUEST)) {
247 			if (first)
248 				ret = snprintf(buf + pos, size - pos, "%u", cpu);
249 			else
250 				ret = snprintf(buf + pos, size - pos, ", %u", cpu);
251 
252 			if (ret >= size - pos)
253 				break;
254 			first = false;
255 			pos += ret;
256 		}
257 	}
258 	ret = snprintf(buf + pos, size + 1 - pos, "\n");
259 	return pos + ret;
260 }
261 
262 static struct kobj_attribute virtio_fs_vq_cpu_list_attr = __ATTR_RO(cpu_list);
263 
264 static struct attribute *virtio_fs_vq_attrs[] = {
265 	&virtio_fs_vq_name_attr.attr,
266 	&virtio_fs_vq_cpu_list_attr.attr,
267 	NULL
268 };
269 
270 static struct attribute_group virtio_fs_vq_attr_group = {
271 	.attrs = virtio_fs_vq_attrs,
272 };
273 
274 /* Make sure virtiofs_mutex is held */
275 static void virtio_fs_put_locked(struct virtio_fs *fs)
276 {
277 	lockdep_assert_held(&virtio_fs_mutex);
278 
279 	kobject_put(&fs->kobj);
280 }
281 
282 static void virtio_fs_put(struct virtio_fs *fs)
283 {
284 	mutex_lock(&virtio_fs_mutex);
285 	virtio_fs_put_locked(fs);
286 	mutex_unlock(&virtio_fs_mutex);
287 }
288 
289 static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
290 {
291 	struct virtio_fs *vfs = fiq->priv;
292 
293 	virtio_fs_put(vfs);
294 }
295 
296 static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
297 {
298 	WARN_ON(fsvq->in_flight < 0);
299 
300 	/* Wait for in flight requests to finish.*/
301 	spin_lock(&fsvq->lock);
302 	if (fsvq->in_flight) {
303 		/* We are holding virtio_fs_mutex. There should not be any
304 		 * waiters waiting for completion.
305 		 */
306 		reinit_completion(&fsvq->in_flight_zero);
307 		spin_unlock(&fsvq->lock);
308 		wait_for_completion(&fsvq->in_flight_zero);
309 	} else {
310 		spin_unlock(&fsvq->lock);
311 	}
312 
313 	flush_work(&fsvq->done_work);
314 	flush_work(&fsvq->dispatch_work);
315 }
316 
317 static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
318 {
319 	struct virtio_fs_vq *fsvq;
320 	int i;
321 
322 	for (i = 0; i < fs->nvqs; i++) {
323 		fsvq = &fs->vqs[i];
324 		virtio_fs_drain_queue(fsvq);
325 	}
326 }
327 
328 static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
329 {
330 	/* Provides mutual exclusion between ->remove and ->kill_sb
331 	 * paths. We don't want both of these draining queue at the
332 	 * same time. Current completion logic reinits completion
333 	 * and that means there should not be any other thread
334 	 * doing reinit or waiting for completion already.
335 	 */
336 	mutex_lock(&virtio_fs_mutex);
337 	virtio_fs_drain_all_queues_locked(fs);
338 	mutex_unlock(&virtio_fs_mutex);
339 }
340 
341 static void virtio_fs_start_all_queues(struct virtio_fs *fs)
342 {
343 	struct virtio_fs_vq *fsvq;
344 	int i;
345 
346 	for (i = 0; i < fs->nvqs; i++) {
347 		fsvq = &fs->vqs[i];
348 		spin_lock(&fsvq->lock);
349 		fsvq->connected = true;
350 		spin_unlock(&fsvq->lock);
351 	}
352 }
353 
354 static void virtio_fs_delete_queues_sysfs(struct virtio_fs *fs)
355 {
356 	struct virtio_fs_vq *fsvq;
357 	int i;
358 
359 	for (i = 0; i < fs->nvqs; i++) {
360 		fsvq = &fs->vqs[i];
361 		kobject_put(fsvq->kobj);
362 	}
363 }
364 
365 static int virtio_fs_add_queues_sysfs(struct virtio_fs *fs)
366 {
367 	struct virtio_fs_vq *fsvq;
368 	char buff[12];
369 	int i, j, ret;
370 
371 	for (i = 0; i < fs->nvqs; i++) {
372 		fsvq = &fs->vqs[i];
373 
374 		sprintf(buff, "%d", i);
375 		fsvq->kobj = kobject_create_and_add(buff, fs->mqs_kobj);
376 		if (!fs->mqs_kobj) {
377 			ret = -ENOMEM;
378 			goto out_del;
379 		}
380 
381 		ret = sysfs_create_group(fsvq->kobj, &virtio_fs_vq_attr_group);
382 		if (ret) {
383 			kobject_put(fsvq->kobj);
384 			goto out_del;
385 		}
386 	}
387 
388 	return 0;
389 
390 out_del:
391 	for (j = 0; j < i; j++) {
392 		fsvq = &fs->vqs[j];
393 		kobject_put(fsvq->kobj);
394 	}
395 	return ret;
396 }
397 
398 /* Add a new instance to the list or return -EEXIST if tag name exists*/
399 static int virtio_fs_add_instance(struct virtio_device *vdev,
400 				  struct virtio_fs *fs)
401 {
402 	struct virtio_fs *fs2;
403 	int ret;
404 
405 	mutex_lock(&virtio_fs_mutex);
406 
407 	list_for_each_entry(fs2, &virtio_fs_instances, list) {
408 		if (strcmp(fs->tag, fs2->tag) == 0) {
409 			mutex_unlock(&virtio_fs_mutex);
410 			return -EEXIST;
411 		}
412 	}
413 
414 	/* Use the virtio_device's index as a unique identifier, there is no
415 	 * need to allocate our own identifiers because the virtio_fs instance
416 	 * is only visible to userspace as long as the underlying virtio_device
417 	 * exists.
418 	 */
419 	fs->kobj.kset = virtio_fs_kset;
420 	ret = kobject_add(&fs->kobj, NULL, "%d", vdev->index);
421 	if (ret < 0)
422 		goto out_unlock;
423 
424 	fs->mqs_kobj = kobject_create_and_add("mqs", &fs->kobj);
425 	if (!fs->mqs_kobj) {
426 		ret = -ENOMEM;
427 		goto out_del;
428 	}
429 
430 	ret = sysfs_create_link(&fs->kobj, &vdev->dev.kobj, "device");
431 	if (ret < 0)
432 		goto out_put;
433 
434 	ret = virtio_fs_add_queues_sysfs(fs);
435 	if (ret)
436 		goto out_remove;
437 
438 	list_add_tail(&fs->list, &virtio_fs_instances);
439 
440 	mutex_unlock(&virtio_fs_mutex);
441 
442 	kobject_uevent(&fs->kobj, KOBJ_ADD);
443 
444 	return 0;
445 
446 out_remove:
447 	sysfs_remove_link(&fs->kobj, "device");
448 out_put:
449 	kobject_put(fs->mqs_kobj);
450 out_del:
451 	kobject_del(&fs->kobj);
452 out_unlock:
453 	mutex_unlock(&virtio_fs_mutex);
454 	return ret;
455 }
456 
457 /* Return the virtio_fs with a given tag, or NULL */
458 static struct virtio_fs *virtio_fs_find_instance(const char *tag)
459 {
460 	struct virtio_fs *fs;
461 
462 	mutex_lock(&virtio_fs_mutex);
463 
464 	list_for_each_entry(fs, &virtio_fs_instances, list) {
465 		if (strcmp(fs->tag, tag) == 0) {
466 			kobject_get(&fs->kobj);
467 			goto found;
468 		}
469 	}
470 
471 	fs = NULL; /* not found */
472 
473 found:
474 	mutex_unlock(&virtio_fs_mutex);
475 
476 	return fs;
477 }
478 
479 static void virtio_fs_free_devs(struct virtio_fs *fs)
480 {
481 	unsigned int i;
482 
483 	for (i = 0; i < fs->nvqs; i++) {
484 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
485 
486 		if (!fsvq->fud)
487 			continue;
488 
489 		fuse_dev_free(fsvq->fud);
490 		fsvq->fud = NULL;
491 	}
492 }
493 
494 /* Read filesystem name from virtio config into fs->tag (must kfree()). */
495 static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
496 {
497 	char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
498 	char *end;
499 	size_t len;
500 
501 	virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
502 			   &tag_buf, sizeof(tag_buf));
503 	end = memchr(tag_buf, '\0', sizeof(tag_buf));
504 	if (end == tag_buf)
505 		return -EINVAL; /* empty tag */
506 	if (!end)
507 		end = &tag_buf[sizeof(tag_buf)];
508 
509 	len = end - tag_buf;
510 	fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
511 	if (!fs->tag)
512 		return -ENOMEM;
513 	memcpy(fs->tag, tag_buf, len);
514 	fs->tag[len] = '\0';
515 
516 	/* While the VIRTIO specification allows any character, newlines are
517 	 * awkward on mount(8) command-lines and cause problems in the sysfs
518 	 * "tag" attr and uevent TAG= properties. Forbid them.
519 	 */
520 	if (strchr(fs->tag, '\n')) {
521 		dev_dbg(&vdev->dev, "refusing virtiofs tag with newline character\n");
522 		return -EINVAL;
523 	}
524 
525 	return 0;
526 }
527 
528 /* Work function for hiprio completion */
529 static void virtio_fs_hiprio_done_work(struct work_struct *work)
530 {
531 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
532 						 done_work);
533 	struct virtqueue *vq = fsvq->vq;
534 
535 	/* Free completed FUSE_FORGET requests */
536 	spin_lock(&fsvq->lock);
537 	do {
538 		unsigned int len;
539 		void *req;
540 
541 		virtqueue_disable_cb(vq);
542 
543 		while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
544 			kfree(req);
545 			dec_in_flight_req(fsvq);
546 		}
547 	} while (!virtqueue_enable_cb(vq));
548 
549 	if (!list_empty(&fsvq->queued_reqs))
550 		schedule_work(&fsvq->dispatch_work);
551 
552 	spin_unlock(&fsvq->lock);
553 }
554 
555 static void virtio_fs_request_dispatch_work(struct work_struct *work)
556 {
557 	struct fuse_req *req;
558 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
559 						 dispatch_work);
560 	int ret;
561 
562 	pr_debug("virtio-fs: worker %s called.\n", __func__);
563 	while (1) {
564 		spin_lock(&fsvq->lock);
565 		req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
566 					       list);
567 		if (!req) {
568 			spin_unlock(&fsvq->lock);
569 			break;
570 		}
571 
572 		list_del_init(&req->list);
573 		spin_unlock(&fsvq->lock);
574 		fuse_request_end(req);
575 	}
576 
577 	/* Dispatch pending requests */
578 	while (1) {
579 		unsigned int flags;
580 
581 		spin_lock(&fsvq->lock);
582 		req = list_first_entry_or_null(&fsvq->queued_reqs,
583 					       struct fuse_req, list);
584 		if (!req) {
585 			spin_unlock(&fsvq->lock);
586 			return;
587 		}
588 		list_del_init(&req->list);
589 		spin_unlock(&fsvq->lock);
590 
591 		flags = memalloc_nofs_save();
592 		ret = virtio_fs_enqueue_req(fsvq, req, true, GFP_KERNEL);
593 		memalloc_nofs_restore(flags);
594 		if (ret < 0) {
595 			if (ret == -ENOSPC) {
596 				spin_lock(&fsvq->lock);
597 				list_add_tail(&req->list, &fsvq->queued_reqs);
598 				spin_unlock(&fsvq->lock);
599 				return;
600 			}
601 			req->out.h.error = ret;
602 			spin_lock(&fsvq->lock);
603 			dec_in_flight_req(fsvq);
604 			spin_unlock(&fsvq->lock);
605 			pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
606 			       ret);
607 			fuse_request_end(req);
608 		}
609 	}
610 }
611 
612 /*
613  * Returns 1 if queue is full and sender should wait a bit before sending
614  * next request, 0 otherwise.
615  */
616 static int send_forget_request(struct virtio_fs_vq *fsvq,
617 			       struct virtio_fs_forget *forget,
618 			       bool in_flight)
619 {
620 	struct scatterlist sg;
621 	struct virtqueue *vq;
622 	int ret = 0;
623 	bool notify;
624 	struct virtio_fs_forget_req *req = &forget->req;
625 
626 	spin_lock(&fsvq->lock);
627 	if (!fsvq->connected) {
628 		if (in_flight)
629 			dec_in_flight_req(fsvq);
630 		kfree(forget);
631 		goto out;
632 	}
633 
634 	sg_init_one(&sg, req, sizeof(*req));
635 	vq = fsvq->vq;
636 	dev_dbg(&vq->vdev->dev, "%s\n", __func__);
637 
638 	ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
639 	if (ret < 0) {
640 		if (ret == -ENOSPC) {
641 			pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
642 				 ret);
643 			list_add_tail(&forget->list, &fsvq->queued_reqs);
644 			if (!in_flight)
645 				inc_in_flight_req(fsvq);
646 			/* Queue is full */
647 			ret = 1;
648 		} else {
649 			pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
650 				 ret);
651 			kfree(forget);
652 			if (in_flight)
653 				dec_in_flight_req(fsvq);
654 		}
655 		goto out;
656 	}
657 
658 	if (!in_flight)
659 		inc_in_flight_req(fsvq);
660 	notify = virtqueue_kick_prepare(vq);
661 	spin_unlock(&fsvq->lock);
662 
663 	if (notify)
664 		virtqueue_notify(vq);
665 	return ret;
666 out:
667 	spin_unlock(&fsvq->lock);
668 	return ret;
669 }
670 
671 static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
672 {
673 	struct virtio_fs_forget *forget;
674 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
675 						 dispatch_work);
676 	pr_debug("virtio-fs: worker %s called.\n", __func__);
677 	while (1) {
678 		spin_lock(&fsvq->lock);
679 		forget = list_first_entry_or_null(&fsvq->queued_reqs,
680 					struct virtio_fs_forget, list);
681 		if (!forget) {
682 			spin_unlock(&fsvq->lock);
683 			return;
684 		}
685 
686 		list_del(&forget->list);
687 		spin_unlock(&fsvq->lock);
688 		if (send_forget_request(fsvq, forget, true))
689 			return;
690 	}
691 }
692 
693 /* Allocate and copy args into req->argbuf */
694 static int copy_args_to_argbuf(struct fuse_req *req, gfp_t gfp)
695 {
696 	struct fuse_args *args = req->args;
697 	unsigned int offset = 0;
698 	unsigned int num_in;
699 	unsigned int num_out;
700 	unsigned int len;
701 	unsigned int i;
702 
703 	num_in = args->in_numargs - args->in_pages;
704 	num_out = args->out_numargs - args->out_pages;
705 	len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
706 	      fuse_len_args(num_out, args->out_args);
707 
708 	req->argbuf = kmalloc(len, gfp);
709 	if (!req->argbuf)
710 		return -ENOMEM;
711 
712 	for (i = 0; i < num_in; i++) {
713 		memcpy(req->argbuf + offset,
714 		       args->in_args[i].value,
715 		       args->in_args[i].size);
716 		offset += args->in_args[i].size;
717 	}
718 
719 	return 0;
720 }
721 
722 /* Copy args out of and free req->argbuf */
723 static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
724 {
725 	unsigned int remaining;
726 	unsigned int offset;
727 	unsigned int num_in;
728 	unsigned int num_out;
729 	unsigned int i;
730 
731 	remaining = req->out.h.len - sizeof(req->out.h);
732 	num_in = args->in_numargs - args->in_pages;
733 	num_out = args->out_numargs - args->out_pages;
734 	offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
735 
736 	for (i = 0; i < num_out; i++) {
737 		unsigned int argsize = args->out_args[i].size;
738 
739 		if (args->out_argvar &&
740 		    i == args->out_numargs - 1 &&
741 		    argsize > remaining) {
742 			argsize = remaining;
743 		}
744 
745 		memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
746 		offset += argsize;
747 
748 		if (i != args->out_numargs - 1)
749 			remaining -= argsize;
750 	}
751 
752 	/* Store the actual size of the variable-length arg */
753 	if (args->out_argvar)
754 		args->out_args[args->out_numargs - 1].size = remaining;
755 
756 	kfree(req->argbuf);
757 	req->argbuf = NULL;
758 }
759 
760 /* Work function for request completion */
761 static void virtio_fs_request_complete(struct fuse_req *req,
762 				       struct virtio_fs_vq *fsvq)
763 {
764 	struct fuse_pqueue *fpq = &fsvq->fud->pq;
765 	struct fuse_args *args;
766 	struct fuse_args_pages *ap;
767 	unsigned int len, i, thislen;
768 	struct folio *folio;
769 
770 	/*
771 	 * TODO verify that server properly follows FUSE protocol
772 	 * (oh.uniq, oh.len)
773 	 */
774 	args = req->args;
775 	copy_args_from_argbuf(args, req);
776 
777 	if (args->out_pages && args->page_zeroing) {
778 		len = args->out_args[args->out_numargs - 1].size;
779 		ap = container_of(args, typeof(*ap), args);
780 		for (i = 0; i < ap->num_folios; i++) {
781 			thislen = ap->descs[i].length;
782 			if (len < thislen) {
783 				WARN_ON(ap->descs[i].offset);
784 				folio = ap->folios[i];
785 				folio_zero_segment(folio, len, thislen);
786 				len = 0;
787 			} else {
788 				len -= thislen;
789 			}
790 		}
791 	}
792 
793 	spin_lock(&fpq->lock);
794 	clear_bit(FR_SENT, &req->flags);
795 	spin_unlock(&fpq->lock);
796 
797 	fuse_request_end(req);
798 	spin_lock(&fsvq->lock);
799 	dec_in_flight_req(fsvq);
800 	spin_unlock(&fsvq->lock);
801 }
802 
803 static void virtio_fs_complete_req_work(struct work_struct *work)
804 {
805 	struct virtio_fs_req_work *w =
806 		container_of(work, typeof(*w), done_work);
807 
808 	virtio_fs_request_complete(w->req, w->fsvq);
809 	kfree(w);
810 }
811 
812 static void virtio_fs_requests_done_work(struct work_struct *work)
813 {
814 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
815 						 done_work);
816 	struct fuse_pqueue *fpq = &fsvq->fud->pq;
817 	struct virtqueue *vq = fsvq->vq;
818 	struct fuse_req *req;
819 	struct fuse_req *next;
820 	unsigned int len;
821 	LIST_HEAD(reqs);
822 
823 	/* Collect completed requests off the virtqueue */
824 	spin_lock(&fsvq->lock);
825 	do {
826 		virtqueue_disable_cb(vq);
827 
828 		while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
829 			spin_lock(&fpq->lock);
830 			list_move_tail(&req->list, &reqs);
831 			spin_unlock(&fpq->lock);
832 		}
833 	} while (!virtqueue_enable_cb(vq));
834 	spin_unlock(&fsvq->lock);
835 
836 	/* End requests */
837 	list_for_each_entry_safe(req, next, &reqs, list) {
838 		list_del_init(&req->list);
839 
840 		/* blocking async request completes in a worker context */
841 		if (req->args->may_block) {
842 			struct virtio_fs_req_work *w;
843 
844 			w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
845 			INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
846 			w->fsvq = fsvq;
847 			w->req = req;
848 			schedule_work(&w->done_work);
849 		} else {
850 			virtio_fs_request_complete(req, fsvq);
851 		}
852 	}
853 
854 	/* Try to push previously queued requests, as the queue might no longer be full */
855 	spin_lock(&fsvq->lock);
856 	if (!list_empty(&fsvq->queued_reqs))
857 		schedule_work(&fsvq->dispatch_work);
858 	spin_unlock(&fsvq->lock);
859 }
860 
861 static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
862 {
863 	const struct cpumask *mask, *masks;
864 	unsigned int q, cpu;
865 
866 	/* First attempt to map using existing transport layer affinities
867 	 * e.g. PCIe MSI-X
868 	 */
869 	if (!vdev->config->get_vq_affinity)
870 		goto fallback;
871 
872 	for (q = 0; q < fs->num_request_queues; q++) {
873 		mask = vdev->config->get_vq_affinity(vdev, VQ_REQUEST + q);
874 		if (!mask)
875 			goto fallback;
876 
877 		for_each_cpu(cpu, mask)
878 			fs->mq_map[cpu] = q;
879 	}
880 
881 	return;
882 fallback:
883 	/* Attempt to map evenly in groups over the CPUs */
884 	masks = group_cpus_evenly(fs->num_request_queues);
885 	/* If even this fails we default to all CPUs use queue zero */
886 	if (!masks) {
887 		for_each_possible_cpu(cpu)
888 			fs->mq_map[cpu] = 0;
889 		return;
890 	}
891 
892 	for (q = 0; q < fs->num_request_queues; q++) {
893 		for_each_cpu(cpu, &masks[q])
894 			fs->mq_map[cpu] = q;
895 	}
896 	kfree(masks);
897 }
898 
899 /* Virtqueue interrupt handler */
900 static void virtio_fs_vq_done(struct virtqueue *vq)
901 {
902 	struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
903 
904 	dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
905 
906 	schedule_work(&fsvq->done_work);
907 }
908 
909 static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
910 			      int vq_type)
911 {
912 	strscpy(fsvq->name, name, VQ_NAME_LEN);
913 	spin_lock_init(&fsvq->lock);
914 	INIT_LIST_HEAD(&fsvq->queued_reqs);
915 	INIT_LIST_HEAD(&fsvq->end_reqs);
916 	init_completion(&fsvq->in_flight_zero);
917 
918 	if (vq_type == VQ_REQUEST) {
919 		INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
920 		INIT_WORK(&fsvq->dispatch_work,
921 				virtio_fs_request_dispatch_work);
922 	} else {
923 		INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
924 		INIT_WORK(&fsvq->dispatch_work,
925 				virtio_fs_hiprio_dispatch_work);
926 	}
927 }
928 
929 /* Initialize virtqueues */
930 static int virtio_fs_setup_vqs(struct virtio_device *vdev,
931 			       struct virtio_fs *fs)
932 {
933 	struct virtqueue_info *vqs_info;
934 	struct virtqueue **vqs;
935 	/* Specify pre_vectors to ensure that the queues before the
936 	 * request queues (e.g. hiprio) don't claim any of the CPUs in
937 	 * the multi-queue mapping and interrupt affinities
938 	 */
939 	struct irq_affinity desc = { .pre_vectors = VQ_REQUEST };
940 	unsigned int i;
941 	int ret = 0;
942 
943 	virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
944 			&fs->num_request_queues);
945 	if (fs->num_request_queues == 0)
946 		return -EINVAL;
947 
948 	/* Truncate nr of request queues to nr_cpu_id */
949 	fs->num_request_queues = min_t(unsigned int, fs->num_request_queues,
950 					nr_cpu_ids);
951 	fs->nvqs = VQ_REQUEST + fs->num_request_queues;
952 	fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
953 	if (!fs->vqs)
954 		return -ENOMEM;
955 
956 	vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
957 	fs->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*fs->mq_map), GFP_KERNEL,
958 					dev_to_node(&vdev->dev));
959 	vqs_info = kcalloc(fs->nvqs, sizeof(*vqs_info), GFP_KERNEL);
960 	if (!vqs || !vqs_info || !fs->mq_map) {
961 		ret = -ENOMEM;
962 		goto out;
963 	}
964 
965 	/* Initialize the hiprio/forget request virtqueue */
966 	vqs_info[VQ_HIPRIO].callback = virtio_fs_vq_done;
967 	virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
968 	vqs_info[VQ_HIPRIO].name = fs->vqs[VQ_HIPRIO].name;
969 
970 	/* Initialize the requests virtqueues */
971 	for (i = VQ_REQUEST; i < fs->nvqs; i++) {
972 		char vq_name[VQ_NAME_LEN];
973 
974 		snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
975 		virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
976 		vqs_info[i].callback = virtio_fs_vq_done;
977 		vqs_info[i].name = fs->vqs[i].name;
978 	}
979 
980 	ret = virtio_find_vqs(vdev, fs->nvqs, vqs, vqs_info, &desc);
981 	if (ret < 0)
982 		goto out;
983 
984 	for (i = 0; i < fs->nvqs; i++)
985 		fs->vqs[i].vq = vqs[i];
986 
987 	virtio_fs_start_all_queues(fs);
988 out:
989 	kfree(vqs_info);
990 	kfree(vqs);
991 	if (ret) {
992 		kfree(fs->vqs);
993 		kfree(fs->mq_map);
994 	}
995 	return ret;
996 }
997 
998 /* Free virtqueues (device must already be reset) */
999 static void virtio_fs_cleanup_vqs(struct virtio_device *vdev)
1000 {
1001 	vdev->config->del_vqs(vdev);
1002 }
1003 
1004 /* Map a window offset to a page frame number.  The window offset will have
1005  * been produced by .iomap_begin(), which maps a file offset to a window
1006  * offset.
1007  */
1008 static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1009 				    long nr_pages, enum dax_access_mode mode,
1010 				    void **kaddr, pfn_t *pfn)
1011 {
1012 	struct virtio_fs *fs = dax_get_private(dax_dev);
1013 	phys_addr_t offset = PFN_PHYS(pgoff);
1014 	size_t max_nr_pages = fs->window_len / PAGE_SIZE - pgoff;
1015 
1016 	if (kaddr)
1017 		*kaddr = fs->window_kaddr + offset;
1018 	if (pfn)
1019 		*pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
1020 					PFN_DEV | PFN_MAP);
1021 	return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
1022 }
1023 
1024 static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
1025 				     pgoff_t pgoff, size_t nr_pages)
1026 {
1027 	long rc;
1028 	void *kaddr;
1029 
1030 	rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, &kaddr,
1031 			       NULL);
1032 	if (rc < 0)
1033 		return dax_mem2blk_err(rc);
1034 
1035 	memset(kaddr, 0, nr_pages << PAGE_SHIFT);
1036 	dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
1037 	return 0;
1038 }
1039 
1040 static const struct dax_operations virtio_fs_dax_ops = {
1041 	.direct_access = virtio_fs_direct_access,
1042 	.zero_page_range = virtio_fs_zero_page_range,
1043 };
1044 
1045 static void virtio_fs_cleanup_dax(void *data)
1046 {
1047 	struct dax_device *dax_dev = data;
1048 
1049 	kill_dax(dax_dev);
1050 	put_dax(dax_dev);
1051 }
1052 
1053 DEFINE_FREE(cleanup_dax, struct dax_dev *, if (!IS_ERR_OR_NULL(_T)) virtio_fs_cleanup_dax(_T))
1054 
1055 static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
1056 {
1057 	struct dax_device *dax_dev __free(cleanup_dax) = NULL;
1058 	struct virtio_shm_region cache_reg;
1059 	struct dev_pagemap *pgmap;
1060 	bool have_cache;
1061 
1062 	if (!IS_ENABLED(CONFIG_FUSE_DAX))
1063 		return 0;
1064 
1065 	dax_dev = alloc_dax(fs, &virtio_fs_dax_ops);
1066 	if (IS_ERR(dax_dev)) {
1067 		int rc = PTR_ERR(dax_dev);
1068 		return rc == -EOPNOTSUPP ? 0 : rc;
1069 	}
1070 
1071 	/* Get cache region */
1072 	have_cache = virtio_get_shm_region(vdev, &cache_reg,
1073 					   (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
1074 	if (!have_cache) {
1075 		dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
1076 		return 0;
1077 	}
1078 
1079 	if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
1080 				     dev_name(&vdev->dev))) {
1081 		dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
1082 			 cache_reg.addr, cache_reg.len);
1083 		return -EBUSY;
1084 	}
1085 
1086 	dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
1087 		   cache_reg.addr);
1088 
1089 	pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
1090 	if (!pgmap)
1091 		return -ENOMEM;
1092 
1093 	pgmap->type = MEMORY_DEVICE_FS_DAX;
1094 
1095 	/* Ideally we would directly use the PCI BAR resource but
1096 	 * devm_memremap_pages() wants its own copy in pgmap.  So
1097 	 * initialize a struct resource from scratch (only the start
1098 	 * and end fields will be used).
1099 	 */
1100 	pgmap->range = (struct range) {
1101 		.start = (phys_addr_t) cache_reg.addr,
1102 		.end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
1103 	};
1104 	pgmap->nr_range = 1;
1105 
1106 	fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
1107 	if (IS_ERR(fs->window_kaddr))
1108 		return PTR_ERR(fs->window_kaddr);
1109 
1110 	fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
1111 	fs->window_len = (phys_addr_t) cache_reg.len;
1112 
1113 	dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
1114 		__func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
1115 
1116 	fs->dax_dev = no_free_ptr(dax_dev);
1117 	return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
1118 					fs->dax_dev);
1119 }
1120 
1121 static int virtio_fs_probe(struct virtio_device *vdev)
1122 {
1123 	struct virtio_fs *fs;
1124 	int ret;
1125 
1126 	fs = kzalloc(sizeof(*fs), GFP_KERNEL);
1127 	if (!fs)
1128 		return -ENOMEM;
1129 	kobject_init(&fs->kobj, &virtio_fs_ktype);
1130 	vdev->priv = fs;
1131 
1132 	ret = virtio_fs_read_tag(vdev, fs);
1133 	if (ret < 0)
1134 		goto out;
1135 
1136 	ret = virtio_fs_setup_vqs(vdev, fs);
1137 	if (ret < 0)
1138 		goto out;
1139 
1140 	virtio_fs_map_queues(vdev, fs);
1141 
1142 	ret = virtio_fs_setup_dax(vdev, fs);
1143 	if (ret < 0)
1144 		goto out_vqs;
1145 
1146 	/* Bring the device online in case the filesystem is mounted and
1147 	 * requests need to be sent before we return.
1148 	 */
1149 	virtio_device_ready(vdev);
1150 
1151 	ret = virtio_fs_add_instance(vdev, fs);
1152 	if (ret < 0)
1153 		goto out_vqs;
1154 
1155 	return 0;
1156 
1157 out_vqs:
1158 	virtio_reset_device(vdev);
1159 	virtio_fs_cleanup_vqs(vdev);
1160 
1161 out:
1162 	vdev->priv = NULL;
1163 	kobject_put(&fs->kobj);
1164 	return ret;
1165 }
1166 
1167 static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
1168 {
1169 	struct virtio_fs_vq *fsvq;
1170 	int i;
1171 
1172 	for (i = 0; i < fs->nvqs; i++) {
1173 		fsvq = &fs->vqs[i];
1174 		spin_lock(&fsvq->lock);
1175 		fsvq->connected = false;
1176 		spin_unlock(&fsvq->lock);
1177 	}
1178 }
1179 
1180 static void virtio_fs_remove(struct virtio_device *vdev)
1181 {
1182 	struct virtio_fs *fs = vdev->priv;
1183 
1184 	mutex_lock(&virtio_fs_mutex);
1185 	/* This device is going away. No one should get new reference */
1186 	list_del_init(&fs->list);
1187 	virtio_fs_delete_queues_sysfs(fs);
1188 	sysfs_remove_link(&fs->kobj, "device");
1189 	kobject_put(fs->mqs_kobj);
1190 	kobject_del(&fs->kobj);
1191 	virtio_fs_stop_all_queues(fs);
1192 	virtio_fs_drain_all_queues_locked(fs);
1193 	virtio_reset_device(vdev);
1194 	virtio_fs_cleanup_vqs(vdev);
1195 
1196 	vdev->priv = NULL;
1197 	/* Put device reference on virtio_fs object */
1198 	virtio_fs_put_locked(fs);
1199 	mutex_unlock(&virtio_fs_mutex);
1200 }
1201 
1202 #ifdef CONFIG_PM_SLEEP
1203 static int virtio_fs_freeze(struct virtio_device *vdev)
1204 {
1205 	/* TODO need to save state here */
1206 	pr_warn("virtio-fs: suspend/resume not yet supported\n");
1207 	return -EOPNOTSUPP;
1208 }
1209 
1210 static int virtio_fs_restore(struct virtio_device *vdev)
1211 {
1212 	 /* TODO need to restore state here */
1213 	return 0;
1214 }
1215 #endif /* CONFIG_PM_SLEEP */
1216 
1217 static const struct virtio_device_id id_table[] = {
1218 	{ VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
1219 	{},
1220 };
1221 
1222 static const unsigned int feature_table[] = {};
1223 
1224 static struct virtio_driver virtio_fs_driver = {
1225 	.driver.name		= KBUILD_MODNAME,
1226 	.id_table		= id_table,
1227 	.feature_table		= feature_table,
1228 	.feature_table_size	= ARRAY_SIZE(feature_table),
1229 	.probe			= virtio_fs_probe,
1230 	.remove			= virtio_fs_remove,
1231 #ifdef CONFIG_PM_SLEEP
1232 	.freeze			= virtio_fs_freeze,
1233 	.restore		= virtio_fs_restore,
1234 #endif
1235 };
1236 
1237 static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link)
1238 {
1239 	struct virtio_fs_forget *forget;
1240 	struct virtio_fs_forget_req *req;
1241 	struct virtio_fs *fs = fiq->priv;
1242 	struct virtio_fs_vq *fsvq = &fs->vqs[VQ_HIPRIO];
1243 	u64 unique = fuse_get_unique(fiq);
1244 
1245 	/* Allocate a buffer for the request */
1246 	forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
1247 	req = &forget->req;
1248 
1249 	req->ih = (struct fuse_in_header){
1250 		.opcode = FUSE_FORGET,
1251 		.nodeid = link->forget_one.nodeid,
1252 		.unique = unique,
1253 		.len = sizeof(*req),
1254 	};
1255 	req->arg = (struct fuse_forget_in){
1256 		.nlookup = link->forget_one.nlookup,
1257 	};
1258 
1259 	send_forget_request(fsvq, forget, false);
1260 	kfree(link);
1261 }
1262 
1263 static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
1264 {
1265 	/*
1266 	 * TODO interrupts.
1267 	 *
1268 	 * Normal fs operations on a local filesystems aren't interruptible.
1269 	 * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
1270 	 * with shared lock between host and guest.
1271 	 */
1272 }
1273 
1274 /* Count number of scatter-gather elements required */
1275 static unsigned int sg_count_fuse_folios(struct fuse_folio_desc *folio_descs,
1276 					 unsigned int num_folios,
1277 					 unsigned int total_len)
1278 {
1279 	unsigned int i;
1280 	unsigned int this_len;
1281 
1282 	for (i = 0; i < num_folios && total_len; i++) {
1283 		this_len =  min(folio_descs[i].length, total_len);
1284 		total_len -= this_len;
1285 	}
1286 
1287 	return i;
1288 }
1289 
1290 /* Return the number of scatter-gather list elements required */
1291 static unsigned int sg_count_fuse_req(struct fuse_req *req)
1292 {
1293 	struct fuse_args *args = req->args;
1294 	struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
1295 	unsigned int size, total_sgs = 1 /* fuse_in_header */;
1296 
1297 	if (args->in_numargs - args->in_pages)
1298 		total_sgs += 1;
1299 
1300 	if (args->in_pages) {
1301 		size = args->in_args[args->in_numargs - 1].size;
1302 		total_sgs += sg_count_fuse_folios(ap->descs, ap->num_folios,
1303 						  size);
1304 	}
1305 
1306 	if (!test_bit(FR_ISREPLY, &req->flags))
1307 		return total_sgs;
1308 
1309 	total_sgs += 1 /* fuse_out_header */;
1310 
1311 	if (args->out_numargs - args->out_pages)
1312 		total_sgs += 1;
1313 
1314 	if (args->out_pages) {
1315 		size = args->out_args[args->out_numargs - 1].size;
1316 		total_sgs += sg_count_fuse_folios(ap->descs, ap->num_folios,
1317 						  size);
1318 	}
1319 
1320 	return total_sgs;
1321 }
1322 
1323 /* Add folios to scatter-gather list and return number of elements used */
1324 static unsigned int sg_init_fuse_folios(struct scatterlist *sg,
1325 					struct folio **folios,
1326 					struct fuse_folio_desc *folio_descs,
1327 					unsigned int num_folios,
1328 				        unsigned int total_len)
1329 {
1330 	unsigned int i;
1331 	unsigned int this_len;
1332 
1333 	for (i = 0; i < num_folios && total_len; i++) {
1334 		sg_init_table(&sg[i], 1);
1335 		this_len =  min(folio_descs[i].length, total_len);
1336 		sg_set_folio(&sg[i], folios[i], this_len, folio_descs[i].offset);
1337 		total_len -= this_len;
1338 	}
1339 
1340 	return i;
1341 }
1342 
1343 /* Add args to scatter-gather list and return number of elements used */
1344 static unsigned int sg_init_fuse_args(struct scatterlist *sg,
1345 				      struct fuse_req *req,
1346 				      struct fuse_arg *args,
1347 				      unsigned int numargs,
1348 				      bool argpages,
1349 				      void *argbuf,
1350 				      unsigned int *len_used)
1351 {
1352 	struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1353 	unsigned int total_sgs = 0;
1354 	unsigned int len;
1355 
1356 	len = fuse_len_args(numargs - argpages, args);
1357 	if (len)
1358 		sg_init_one(&sg[total_sgs++], argbuf, len);
1359 
1360 	if (argpages)
1361 		total_sgs += sg_init_fuse_folios(&sg[total_sgs],
1362 						 ap->folios, ap->descs,
1363 						 ap->num_folios,
1364 						 args[numargs - 1].size);
1365 
1366 	if (len_used)
1367 		*len_used = len;
1368 
1369 	return total_sgs;
1370 }
1371 
1372 /* Add a request to a virtqueue and kick the device */
1373 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
1374 				 struct fuse_req *req, bool in_flight,
1375 				 gfp_t gfp)
1376 {
1377 	/* requests need at least 4 elements */
1378 	struct scatterlist *stack_sgs[6];
1379 	struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
1380 	struct scatterlist **sgs = stack_sgs;
1381 	struct scatterlist *sg = stack_sg;
1382 	struct virtqueue *vq;
1383 	struct fuse_args *args = req->args;
1384 	unsigned int argbuf_used = 0;
1385 	unsigned int out_sgs = 0;
1386 	unsigned int in_sgs = 0;
1387 	unsigned int total_sgs;
1388 	unsigned int i;
1389 	int ret;
1390 	bool notify;
1391 	struct fuse_pqueue *fpq;
1392 
1393 	/* Does the sglist fit on the stack? */
1394 	total_sgs = sg_count_fuse_req(req);
1395 	if (total_sgs > ARRAY_SIZE(stack_sgs)) {
1396 		sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), gfp);
1397 		sg = kmalloc_array(total_sgs, sizeof(sg[0]), gfp);
1398 		if (!sgs || !sg) {
1399 			ret = -ENOMEM;
1400 			goto out;
1401 		}
1402 	}
1403 
1404 	/* Use a bounce buffer since stack args cannot be mapped */
1405 	ret = copy_args_to_argbuf(req, gfp);
1406 	if (ret < 0)
1407 		goto out;
1408 
1409 	/* Request elements */
1410 	sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
1411 	out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
1412 				     (struct fuse_arg *)args->in_args,
1413 				     args->in_numargs, args->in_pages,
1414 				     req->argbuf, &argbuf_used);
1415 
1416 	/* Reply elements */
1417 	if (test_bit(FR_ISREPLY, &req->flags)) {
1418 		sg_init_one(&sg[out_sgs + in_sgs++],
1419 			    &req->out.h, sizeof(req->out.h));
1420 		in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
1421 					    args->out_args, args->out_numargs,
1422 					    args->out_pages,
1423 					    req->argbuf + argbuf_used, NULL);
1424 	}
1425 
1426 	WARN_ON(out_sgs + in_sgs != total_sgs);
1427 
1428 	for (i = 0; i < total_sgs; i++)
1429 		sgs[i] = &sg[i];
1430 
1431 	spin_lock(&fsvq->lock);
1432 
1433 	if (!fsvq->connected) {
1434 		spin_unlock(&fsvq->lock);
1435 		ret = -ENOTCONN;
1436 		goto out;
1437 	}
1438 
1439 	vq = fsvq->vq;
1440 	ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
1441 	if (ret < 0) {
1442 		spin_unlock(&fsvq->lock);
1443 		goto out;
1444 	}
1445 
1446 	/* Request successfully sent. */
1447 	fpq = &fsvq->fud->pq;
1448 	spin_lock(&fpq->lock);
1449 	list_add_tail(&req->list, fpq->processing);
1450 	spin_unlock(&fpq->lock);
1451 	set_bit(FR_SENT, &req->flags);
1452 	/* matches barrier in request_wait_answer() */
1453 	smp_mb__after_atomic();
1454 
1455 	if (!in_flight)
1456 		inc_in_flight_req(fsvq);
1457 	notify = virtqueue_kick_prepare(vq);
1458 
1459 	spin_unlock(&fsvq->lock);
1460 
1461 	if (notify)
1462 		virtqueue_notify(vq);
1463 
1464 out:
1465 	if (ret < 0 && req->argbuf) {
1466 		kfree(req->argbuf);
1467 		req->argbuf = NULL;
1468 	}
1469 	if (sgs != stack_sgs) {
1470 		kfree(sgs);
1471 		kfree(sg);
1472 	}
1473 
1474 	return ret;
1475 }
1476 
1477 static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
1478 {
1479 	unsigned int queue_id;
1480 	struct virtio_fs *fs;
1481 	struct virtio_fs_vq *fsvq;
1482 	int ret;
1483 
1484 	if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
1485 		req->in.h.unique = fuse_get_unique(fiq);
1486 
1487 	clear_bit(FR_PENDING, &req->flags);
1488 
1489 	fs = fiq->priv;
1490 	queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()];
1491 
1492 	pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n",
1493 		 __func__, req->in.h.opcode, req->in.h.unique,
1494 		 req->in.h.nodeid, req->in.h.len,
1495 		 fuse_len_args(req->args->out_numargs, req->args->out_args),
1496 		 queue_id);
1497 
1498 	fsvq = &fs->vqs[queue_id];
1499 	ret = virtio_fs_enqueue_req(fsvq, req, false, GFP_ATOMIC);
1500 	if (ret < 0) {
1501 		if (ret == -ENOSPC) {
1502 			/*
1503 			 * Virtqueue full. Retry submission from worker
1504 			 * context as we might be holding fc->bg_lock.
1505 			 */
1506 			spin_lock(&fsvq->lock);
1507 			list_add_tail(&req->list, &fsvq->queued_reqs);
1508 			inc_in_flight_req(fsvq);
1509 			spin_unlock(&fsvq->lock);
1510 			return;
1511 		}
1512 		req->out.h.error = ret;
1513 		pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
1514 
1515 		/* Can't end request in submission context. Use a worker */
1516 		spin_lock(&fsvq->lock);
1517 		list_add_tail(&req->list, &fsvq->end_reqs);
1518 		schedule_work(&fsvq->dispatch_work);
1519 		spin_unlock(&fsvq->lock);
1520 		return;
1521 	}
1522 }
1523 
1524 static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
1525 	.send_forget	= virtio_fs_send_forget,
1526 	.send_interrupt	= virtio_fs_send_interrupt,
1527 	.send_req	= virtio_fs_send_req,
1528 	.release	= virtio_fs_fiq_release,
1529 };
1530 
1531 static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
1532 {
1533 	ctx->rootmode = S_IFDIR;
1534 	ctx->default_permissions = 1;
1535 	ctx->allow_other = 1;
1536 	ctx->max_read = UINT_MAX;
1537 	ctx->blksize = 512;
1538 	ctx->destroy = true;
1539 	ctx->no_control = true;
1540 	ctx->no_force_umount = true;
1541 }
1542 
1543 static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
1544 {
1545 	struct fuse_mount *fm = get_fuse_mount_super(sb);
1546 	struct fuse_conn *fc = fm->fc;
1547 	struct virtio_fs *fs = fc->iq.priv;
1548 	struct fuse_fs_context *ctx = fsc->fs_private;
1549 	unsigned int i;
1550 	int err;
1551 
1552 	virtio_fs_ctx_set_defaults(ctx);
1553 	mutex_lock(&virtio_fs_mutex);
1554 
1555 	/* After holding mutex, make sure virtiofs device is still there.
1556 	 * Though we are holding a reference to it, drive ->remove might
1557 	 * still have cleaned up virtual queues. In that case bail out.
1558 	 */
1559 	err = -EINVAL;
1560 	if (list_empty(&fs->list)) {
1561 		pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
1562 		goto err;
1563 	}
1564 
1565 	err = -ENOMEM;
1566 	/* Allocate fuse_dev for hiprio and notification queues */
1567 	for (i = 0; i < fs->nvqs; i++) {
1568 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
1569 
1570 		fsvq->fud = fuse_dev_alloc();
1571 		if (!fsvq->fud)
1572 			goto err_free_fuse_devs;
1573 	}
1574 
1575 	/* virtiofs allocates and installs its own fuse devices */
1576 	ctx->fudptr = NULL;
1577 	if (ctx->dax_mode != FUSE_DAX_NEVER) {
1578 		if (ctx->dax_mode == FUSE_DAX_ALWAYS && !fs->dax_dev) {
1579 			err = -EINVAL;
1580 			pr_err("virtio-fs: dax can't be enabled as filesystem"
1581 			       " device does not support it.\n");
1582 			goto err_free_fuse_devs;
1583 		}
1584 		ctx->dax_dev = fs->dax_dev;
1585 	}
1586 	err = fuse_fill_super_common(sb, ctx);
1587 	if (err < 0)
1588 		goto err_free_fuse_devs;
1589 
1590 	for (i = 0; i < fs->nvqs; i++) {
1591 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
1592 
1593 		fuse_dev_install(fsvq->fud, fc);
1594 	}
1595 
1596 	/* Previous unmount will stop all queues. Start these again */
1597 	virtio_fs_start_all_queues(fs);
1598 	fuse_send_init(fm);
1599 	mutex_unlock(&virtio_fs_mutex);
1600 	return 0;
1601 
1602 err_free_fuse_devs:
1603 	virtio_fs_free_devs(fs);
1604 err:
1605 	mutex_unlock(&virtio_fs_mutex);
1606 	return err;
1607 }
1608 
1609 static void virtio_fs_conn_destroy(struct fuse_mount *fm)
1610 {
1611 	struct fuse_conn *fc = fm->fc;
1612 	struct virtio_fs *vfs = fc->iq.priv;
1613 	struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
1614 
1615 	/* Stop dax worker. Soon evict_inodes() will be called which
1616 	 * will free all memory ranges belonging to all inodes.
1617 	 */
1618 	if (IS_ENABLED(CONFIG_FUSE_DAX))
1619 		fuse_dax_cancel_work(fc);
1620 
1621 	/* Stop forget queue. Soon destroy will be sent */
1622 	spin_lock(&fsvq->lock);
1623 	fsvq->connected = false;
1624 	spin_unlock(&fsvq->lock);
1625 	virtio_fs_drain_all_queues(vfs);
1626 
1627 	fuse_conn_destroy(fm);
1628 
1629 	/* fuse_conn_destroy() must have sent destroy. Stop all queues
1630 	 * and drain one more time and free fuse devices. Freeing fuse
1631 	 * devices will drop their reference on fuse_conn and that in
1632 	 * turn will drop its reference on virtio_fs object.
1633 	 */
1634 	virtio_fs_stop_all_queues(vfs);
1635 	virtio_fs_drain_all_queues(vfs);
1636 	virtio_fs_free_devs(vfs);
1637 }
1638 
1639 static void virtio_kill_sb(struct super_block *sb)
1640 {
1641 	struct fuse_mount *fm = get_fuse_mount_super(sb);
1642 	bool last;
1643 
1644 	/* If mount failed, we can still be called without any fc */
1645 	if (sb->s_root) {
1646 		last = fuse_mount_remove(fm);
1647 		if (last)
1648 			virtio_fs_conn_destroy(fm);
1649 	}
1650 	kill_anon_super(sb);
1651 	fuse_mount_destroy(fm);
1652 }
1653 
1654 static int virtio_fs_test_super(struct super_block *sb,
1655 				struct fs_context *fsc)
1656 {
1657 	struct fuse_mount *fsc_fm = fsc->s_fs_info;
1658 	struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
1659 
1660 	return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
1661 }
1662 
1663 static int virtio_fs_get_tree(struct fs_context *fsc)
1664 {
1665 	struct virtio_fs *fs;
1666 	struct super_block *sb;
1667 	struct fuse_conn *fc = NULL;
1668 	struct fuse_mount *fm;
1669 	unsigned int virtqueue_size;
1670 	int err = -EIO;
1671 
1672 	/* This gets a reference on virtio_fs object. This ptr gets installed
1673 	 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
1674 	 * to drop the reference to this object.
1675 	 */
1676 	fs = virtio_fs_find_instance(fsc->source);
1677 	if (!fs) {
1678 		pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
1679 		return -EINVAL;
1680 	}
1681 
1682 	virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq);
1683 	if (WARN_ON(virtqueue_size <= FUSE_HEADER_OVERHEAD))
1684 		goto out_err;
1685 
1686 	err = -ENOMEM;
1687 	fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
1688 	if (!fc)
1689 		goto out_err;
1690 
1691 	fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
1692 	if (!fm)
1693 		goto out_err;
1694 
1695 	fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
1696 	fc->release = fuse_free_conn;
1697 	fc->delete_stale = true;
1698 	fc->auto_submounts = true;
1699 	fc->sync_fs = true;
1700 	fc->use_pages_for_kvec_io = true;
1701 
1702 	/* Tell FUSE to split requests that exceed the virtqueue's size */
1703 	fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
1704 				    virtqueue_size - FUSE_HEADER_OVERHEAD);
1705 
1706 	fsc->s_fs_info = fm;
1707 	sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
1708 	if (fsc->s_fs_info)
1709 		fuse_mount_destroy(fm);
1710 	if (IS_ERR(sb))
1711 		return PTR_ERR(sb);
1712 
1713 	if (!sb->s_root) {
1714 		err = virtio_fs_fill_super(sb, fsc);
1715 		if (err) {
1716 			deactivate_locked_super(sb);
1717 			return err;
1718 		}
1719 
1720 		sb->s_flags |= SB_ACTIVE;
1721 	}
1722 
1723 	WARN_ON(fsc->root);
1724 	fsc->root = dget(sb->s_root);
1725 	return 0;
1726 
1727 out_err:
1728 	kfree(fc);
1729 	virtio_fs_put(fs);
1730 	return err;
1731 }
1732 
1733 static const struct fs_context_operations virtio_fs_context_ops = {
1734 	.free		= virtio_fs_free_fsc,
1735 	.parse_param	= virtio_fs_parse_param,
1736 	.get_tree	= virtio_fs_get_tree,
1737 };
1738 
1739 static int virtio_fs_init_fs_context(struct fs_context *fsc)
1740 {
1741 	struct fuse_fs_context *ctx;
1742 
1743 	if (fsc->purpose == FS_CONTEXT_FOR_SUBMOUNT)
1744 		return fuse_init_fs_context_submount(fsc);
1745 
1746 	ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
1747 	if (!ctx)
1748 		return -ENOMEM;
1749 	fsc->fs_private = ctx;
1750 	fsc->ops = &virtio_fs_context_ops;
1751 	return 0;
1752 }
1753 
1754 static struct file_system_type virtio_fs_type = {
1755 	.owner		= THIS_MODULE,
1756 	.name		= "virtiofs",
1757 	.init_fs_context = virtio_fs_init_fs_context,
1758 	.kill_sb	= virtio_kill_sb,
1759 	.fs_flags	= FS_ALLOW_IDMAP,
1760 };
1761 
1762 static int virtio_fs_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
1763 {
1764 	const struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj);
1765 
1766 	add_uevent_var(env, "TAG=%s", fs->tag);
1767 	return 0;
1768 }
1769 
1770 static const struct kset_uevent_ops virtio_fs_uevent_ops = {
1771 	.uevent = virtio_fs_uevent,
1772 };
1773 
1774 static int __init virtio_fs_sysfs_init(void)
1775 {
1776 	virtio_fs_kset = kset_create_and_add("virtiofs", &virtio_fs_uevent_ops,
1777 					     fs_kobj);
1778 	if (!virtio_fs_kset)
1779 		return -ENOMEM;
1780 	return 0;
1781 }
1782 
1783 static void virtio_fs_sysfs_exit(void)
1784 {
1785 	kset_unregister(virtio_fs_kset);
1786 	virtio_fs_kset = NULL;
1787 }
1788 
1789 static int __init virtio_fs_init(void)
1790 {
1791 	int ret;
1792 
1793 	ret = virtio_fs_sysfs_init();
1794 	if (ret < 0)
1795 		return ret;
1796 
1797 	ret = register_virtio_driver(&virtio_fs_driver);
1798 	if (ret < 0)
1799 		goto sysfs_exit;
1800 
1801 	ret = register_filesystem(&virtio_fs_type);
1802 	if (ret < 0)
1803 		goto unregister_virtio_driver;
1804 
1805 	return 0;
1806 
1807 unregister_virtio_driver:
1808 	unregister_virtio_driver(&virtio_fs_driver);
1809 sysfs_exit:
1810 	virtio_fs_sysfs_exit();
1811 	return ret;
1812 }
1813 module_init(virtio_fs_init);
1814 
1815 static void __exit virtio_fs_exit(void)
1816 {
1817 	unregister_filesystem(&virtio_fs_type);
1818 	unregister_virtio_driver(&virtio_fs_driver);
1819 	virtio_fs_sysfs_exit();
1820 }
1821 module_exit(virtio_fs_exit);
1822 
1823 MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
1824 MODULE_DESCRIPTION("Virtio Filesystem");
1825 MODULE_LICENSE("GPL");
1826 MODULE_ALIAS_FS(KBUILD_MODNAME);
1827 MODULE_DEVICE_TABLE(virtio, id_table);
1828