1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * virtio-fs: Virtio Filesystem
4 * Copyright (C) 2018 Red Hat, Inc.
5 */
6
7 #include <linux/fs.h>
8 #include <linux/dax.h>
9 #include <linux/pci.h>
10 #include <linux/interrupt.h>
11 #include <linux/group_cpus.h>
12 #include <linux/memremap.h>
13 #include <linux/module.h>
14 #include <linux/virtio.h>
15 #include <linux/virtio_fs.h>
16 #include <linux/delay.h>
17 #include <linux/fs_context.h>
18 #include <linux/fs_parser.h>
19 #include <linux/highmem.h>
20 #include <linux/cleanup.h>
21 #include <linux/uio.h>
22 #include "fuse_i.h"
23 #include "fuse_dev_i.h"
24
25 /* Used to help calculate the FUSE connection's max_pages limit for a request's
26 * size. Parts of the struct fuse_req are sliced into scattergather lists in
27 * addition to the pages used, so this can help account for that overhead.
28 */
29 #define FUSE_HEADER_OVERHEAD 4
30
31 /* List of virtio-fs device instances and a lock for the list. Also provides
32 * mutual exclusion in device removal and mounting path
33 */
34 static DEFINE_MUTEX(virtio_fs_mutex);
35 static LIST_HEAD(virtio_fs_instances);
36
37 /* The /sys/fs/virtio_fs/ kset */
38 static struct kset *virtio_fs_kset;
39
40 enum {
41 VQ_HIPRIO,
42 VQ_REQUEST
43 };
44
45 #define VQ_NAME_LEN 24
46
47 /* Per-virtqueue state */
48 struct virtio_fs_vq {
49 spinlock_t lock;
50 struct virtqueue *vq; /* protected by ->lock */
51 struct work_struct done_work;
52 struct list_head queued_reqs;
53 struct list_head end_reqs; /* End these requests */
54 struct work_struct dispatch_work;
55 struct fuse_dev *fud;
56 bool connected;
57 long in_flight;
58 struct completion in_flight_zero; /* No inflight requests */
59 struct kobject *kobj;
60 char name[VQ_NAME_LEN];
61 } ____cacheline_aligned_in_smp;
62
63 /* A virtio-fs device instance */
64 struct virtio_fs {
65 struct kobject kobj;
66 struct kobject *mqs_kobj;
67 struct list_head list; /* on virtio_fs_instances */
68 char *tag;
69 struct virtio_fs_vq *vqs;
70 unsigned int nvqs; /* number of virtqueues */
71 unsigned int num_request_queues; /* number of request queues */
72 struct dax_device *dax_dev;
73
74 unsigned int *mq_map; /* index = cpu id, value = request vq id */
75
76 /* DAX memory window where file contents are mapped */
77 void *window_kaddr;
78 phys_addr_t window_phys_addr;
79 size_t window_len;
80 };
81
82 struct virtio_fs_forget_req {
83 struct fuse_in_header ih;
84 struct fuse_forget_in arg;
85 };
86
87 struct virtio_fs_forget {
88 /* This request can be temporarily queued on virt queue */
89 struct list_head list;
90 struct virtio_fs_forget_req req;
91 };
92
93 struct virtio_fs_req_work {
94 struct fuse_req *req;
95 struct virtio_fs_vq *fsvq;
96 struct work_struct done_work;
97 };
98
99 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
100 struct fuse_req *req, bool in_flight,
101 gfp_t gfp);
102
103 static const struct constant_table dax_param_enums[] = {
104 {"always", FUSE_DAX_ALWAYS },
105 {"never", FUSE_DAX_NEVER },
106 {"inode", FUSE_DAX_INODE_USER },
107 {}
108 };
109
110 enum {
111 OPT_DAX,
112 OPT_DAX_ENUM,
113 };
114
115 static const struct fs_parameter_spec virtio_fs_parameters[] = {
116 fsparam_flag("dax", OPT_DAX),
117 fsparam_enum("dax", OPT_DAX_ENUM, dax_param_enums),
118 {}
119 };
120
virtio_fs_parse_param(struct fs_context * fsc,struct fs_parameter * param)121 static int virtio_fs_parse_param(struct fs_context *fsc,
122 struct fs_parameter *param)
123 {
124 struct fs_parse_result result;
125 struct fuse_fs_context *ctx = fsc->fs_private;
126 int opt;
127
128 opt = fs_parse(fsc, virtio_fs_parameters, param, &result);
129 if (opt < 0)
130 return opt;
131
132 switch (opt) {
133 case OPT_DAX:
134 ctx->dax_mode = FUSE_DAX_ALWAYS;
135 break;
136 case OPT_DAX_ENUM:
137 ctx->dax_mode = result.uint_32;
138 break;
139 default:
140 return -EINVAL;
141 }
142
143 return 0;
144 }
145
virtio_fs_free_fsc(struct fs_context * fsc)146 static void virtio_fs_free_fsc(struct fs_context *fsc)
147 {
148 struct fuse_fs_context *ctx = fsc->fs_private;
149
150 kfree(ctx);
151 }
152
vq_to_fsvq(struct virtqueue * vq)153 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
154 {
155 struct virtio_fs *fs = vq->vdev->priv;
156
157 return &fs->vqs[vq->index];
158 }
159
160 /* Should be called with fsvq->lock held. */
inc_in_flight_req(struct virtio_fs_vq * fsvq)161 static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
162 {
163 fsvq->in_flight++;
164 }
165
166 /* Should be called with fsvq->lock held. */
dec_in_flight_req(struct virtio_fs_vq * fsvq)167 static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
168 {
169 WARN_ON(fsvq->in_flight <= 0);
170 fsvq->in_flight--;
171 if (!fsvq->in_flight)
172 complete(&fsvq->in_flight_zero);
173 }
174
tag_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)175 static ssize_t tag_show(struct kobject *kobj,
176 struct kobj_attribute *attr, char *buf)
177 {
178 struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj);
179
180 return sysfs_emit(buf, "%s\n", fs->tag);
181 }
182
183 static struct kobj_attribute virtio_fs_tag_attr = __ATTR_RO(tag);
184
185 static struct attribute *virtio_fs_attrs[] = {
186 &virtio_fs_tag_attr.attr,
187 NULL
188 };
189 ATTRIBUTE_GROUPS(virtio_fs);
190
virtio_fs_ktype_release(struct kobject * kobj)191 static void virtio_fs_ktype_release(struct kobject *kobj)
192 {
193 struct virtio_fs *vfs = container_of(kobj, struct virtio_fs, kobj);
194
195 kfree(vfs->mq_map);
196 kfree(vfs->vqs);
197 kfree(vfs);
198 }
199
200 static const struct kobj_type virtio_fs_ktype = {
201 .release = virtio_fs_ktype_release,
202 .sysfs_ops = &kobj_sysfs_ops,
203 .default_groups = virtio_fs_groups,
204 };
205
virtio_fs_kobj_to_vq(struct virtio_fs * fs,struct kobject * kobj)206 static struct virtio_fs_vq *virtio_fs_kobj_to_vq(struct virtio_fs *fs,
207 struct kobject *kobj)
208 {
209 int i;
210
211 for (i = 0; i < fs->nvqs; i++) {
212 if (kobj == fs->vqs[i].kobj)
213 return &fs->vqs[i];
214 }
215 return NULL;
216 }
217
name_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)218 static ssize_t name_show(struct kobject *kobj,
219 struct kobj_attribute *attr, char *buf)
220 {
221 struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
222 struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
223
224 if (!fsvq)
225 return -EINVAL;
226 return sysfs_emit(buf, "%s\n", fsvq->name);
227 }
228
229 static struct kobj_attribute virtio_fs_vq_name_attr = __ATTR_RO(name);
230
cpu_list_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)231 static ssize_t cpu_list_show(struct kobject *kobj,
232 struct kobj_attribute *attr, char *buf)
233 {
234 struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj);
235 struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj);
236 unsigned int cpu, qid;
237 const size_t size = PAGE_SIZE - 1;
238 bool first = true;
239 int ret = 0, pos = 0;
240
241 if (!fsvq)
242 return -EINVAL;
243
244 qid = fsvq->vq->index;
245 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
246 if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid)) {
247 if (first)
248 ret = snprintf(buf + pos, size - pos, "%u", cpu);
249 else
250 ret = snprintf(buf + pos, size - pos, ", %u", cpu);
251
252 if (ret >= size - pos)
253 break;
254 first = false;
255 pos += ret;
256 }
257 }
258 ret = snprintf(buf + pos, size + 1 - pos, "\n");
259 return pos + ret;
260 }
261
262 static struct kobj_attribute virtio_fs_vq_cpu_list_attr = __ATTR_RO(cpu_list);
263
264 static struct attribute *virtio_fs_vq_attrs[] = {
265 &virtio_fs_vq_name_attr.attr,
266 &virtio_fs_vq_cpu_list_attr.attr,
267 NULL
268 };
269
270 static struct attribute_group virtio_fs_vq_attr_group = {
271 .attrs = virtio_fs_vq_attrs,
272 };
273
274 /* Make sure virtiofs_mutex is held */
virtio_fs_put_locked(struct virtio_fs * fs)275 static void virtio_fs_put_locked(struct virtio_fs *fs)
276 {
277 lockdep_assert_held(&virtio_fs_mutex);
278
279 kobject_put(&fs->kobj);
280 }
281
virtio_fs_put(struct virtio_fs * fs)282 static void virtio_fs_put(struct virtio_fs *fs)
283 {
284 mutex_lock(&virtio_fs_mutex);
285 virtio_fs_put_locked(fs);
286 mutex_unlock(&virtio_fs_mutex);
287 }
288
virtio_fs_fiq_release(struct fuse_iqueue * fiq)289 static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
290 {
291 struct virtio_fs *vfs = fiq->priv;
292
293 virtio_fs_put(vfs);
294 }
295
virtio_fs_drain_queue(struct virtio_fs_vq * fsvq)296 static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
297 {
298 WARN_ON(fsvq->in_flight < 0);
299
300 /* Wait for in flight requests to finish.*/
301 spin_lock(&fsvq->lock);
302 if (fsvq->in_flight) {
303 /* We are holding virtio_fs_mutex. There should not be any
304 * waiters waiting for completion.
305 */
306 reinit_completion(&fsvq->in_flight_zero);
307 spin_unlock(&fsvq->lock);
308 wait_for_completion(&fsvq->in_flight_zero);
309 } else {
310 spin_unlock(&fsvq->lock);
311 }
312
313 flush_work(&fsvq->done_work);
314 flush_work(&fsvq->dispatch_work);
315 }
316
virtio_fs_drain_all_queues_locked(struct virtio_fs * fs)317 static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
318 {
319 struct virtio_fs_vq *fsvq;
320 int i;
321
322 for (i = 0; i < fs->nvqs; i++) {
323 fsvq = &fs->vqs[i];
324 virtio_fs_drain_queue(fsvq);
325 }
326 }
327
virtio_fs_drain_all_queues(struct virtio_fs * fs)328 static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
329 {
330 /* Provides mutual exclusion between ->remove and ->kill_sb
331 * paths. We don't want both of these draining queue at the
332 * same time. Current completion logic reinits completion
333 * and that means there should not be any other thread
334 * doing reinit or waiting for completion already.
335 */
336 mutex_lock(&virtio_fs_mutex);
337 virtio_fs_drain_all_queues_locked(fs);
338 mutex_unlock(&virtio_fs_mutex);
339 }
340
virtio_fs_start_all_queues(struct virtio_fs * fs)341 static void virtio_fs_start_all_queues(struct virtio_fs *fs)
342 {
343 struct virtio_fs_vq *fsvq;
344 int i;
345
346 for (i = 0; i < fs->nvqs; i++) {
347 fsvq = &fs->vqs[i];
348 spin_lock(&fsvq->lock);
349 fsvq->connected = true;
350 spin_unlock(&fsvq->lock);
351 }
352 }
353
virtio_fs_delete_queues_sysfs(struct virtio_fs * fs)354 static void virtio_fs_delete_queues_sysfs(struct virtio_fs *fs)
355 {
356 struct virtio_fs_vq *fsvq;
357 int i;
358
359 for (i = 0; i < fs->nvqs; i++) {
360 fsvq = &fs->vqs[i];
361 kobject_put(fsvq->kobj);
362 }
363 }
364
virtio_fs_add_queues_sysfs(struct virtio_fs * fs)365 static int virtio_fs_add_queues_sysfs(struct virtio_fs *fs)
366 {
367 struct virtio_fs_vq *fsvq;
368 char buff[12];
369 int i, j, ret;
370
371 for (i = 0; i < fs->nvqs; i++) {
372 fsvq = &fs->vqs[i];
373
374 sprintf(buff, "%d", i);
375 fsvq->kobj = kobject_create_and_add(buff, fs->mqs_kobj);
376 if (!fs->mqs_kobj) {
377 ret = -ENOMEM;
378 goto out_del;
379 }
380
381 ret = sysfs_create_group(fsvq->kobj, &virtio_fs_vq_attr_group);
382 if (ret) {
383 kobject_put(fsvq->kobj);
384 goto out_del;
385 }
386 }
387
388 return 0;
389
390 out_del:
391 for (j = 0; j < i; j++) {
392 fsvq = &fs->vqs[j];
393 kobject_put(fsvq->kobj);
394 }
395 return ret;
396 }
397
398 /* Add a new instance to the list or return -EEXIST if tag name exists*/
virtio_fs_add_instance(struct virtio_device * vdev,struct virtio_fs * fs)399 static int virtio_fs_add_instance(struct virtio_device *vdev,
400 struct virtio_fs *fs)
401 {
402 struct virtio_fs *fs2;
403 int ret;
404
405 mutex_lock(&virtio_fs_mutex);
406
407 list_for_each_entry(fs2, &virtio_fs_instances, list) {
408 if (strcmp(fs->tag, fs2->tag) == 0) {
409 mutex_unlock(&virtio_fs_mutex);
410 return -EEXIST;
411 }
412 }
413
414 /* Use the virtio_device's index as a unique identifier, there is no
415 * need to allocate our own identifiers because the virtio_fs instance
416 * is only visible to userspace as long as the underlying virtio_device
417 * exists.
418 */
419 fs->kobj.kset = virtio_fs_kset;
420 ret = kobject_add(&fs->kobj, NULL, "%d", vdev->index);
421 if (ret < 0)
422 goto out_unlock;
423
424 fs->mqs_kobj = kobject_create_and_add("mqs", &fs->kobj);
425 if (!fs->mqs_kobj) {
426 ret = -ENOMEM;
427 goto out_del;
428 }
429
430 ret = sysfs_create_link(&fs->kobj, &vdev->dev.kobj, "device");
431 if (ret < 0)
432 goto out_put;
433
434 ret = virtio_fs_add_queues_sysfs(fs);
435 if (ret)
436 goto out_remove;
437
438 list_add_tail(&fs->list, &virtio_fs_instances);
439
440 mutex_unlock(&virtio_fs_mutex);
441
442 kobject_uevent(&fs->kobj, KOBJ_ADD);
443
444 return 0;
445
446 out_remove:
447 sysfs_remove_link(&fs->kobj, "device");
448 out_put:
449 kobject_put(fs->mqs_kobj);
450 out_del:
451 kobject_del(&fs->kobj);
452 out_unlock:
453 mutex_unlock(&virtio_fs_mutex);
454 return ret;
455 }
456
457 /* Return the virtio_fs with a given tag, or NULL */
virtio_fs_find_instance(const char * tag)458 static struct virtio_fs *virtio_fs_find_instance(const char *tag)
459 {
460 struct virtio_fs *fs;
461
462 mutex_lock(&virtio_fs_mutex);
463
464 list_for_each_entry(fs, &virtio_fs_instances, list) {
465 if (strcmp(fs->tag, tag) == 0) {
466 kobject_get(&fs->kobj);
467 goto found;
468 }
469 }
470
471 fs = NULL; /* not found */
472
473 found:
474 mutex_unlock(&virtio_fs_mutex);
475
476 return fs;
477 }
478
virtio_fs_free_devs(struct virtio_fs * fs)479 static void virtio_fs_free_devs(struct virtio_fs *fs)
480 {
481 unsigned int i;
482
483 for (i = 0; i < fs->nvqs; i++) {
484 struct virtio_fs_vq *fsvq = &fs->vqs[i];
485
486 if (!fsvq->fud)
487 continue;
488
489 fuse_dev_free(fsvq->fud);
490 fsvq->fud = NULL;
491 }
492 }
493
494 /* Read filesystem name from virtio config into fs->tag (must kfree()). */
virtio_fs_read_tag(struct virtio_device * vdev,struct virtio_fs * fs)495 static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
496 {
497 char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
498 char *end;
499 size_t len;
500
501 virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
502 &tag_buf, sizeof(tag_buf));
503 end = memchr(tag_buf, '\0', sizeof(tag_buf));
504 if (end == tag_buf)
505 return -EINVAL; /* empty tag */
506 if (!end)
507 end = &tag_buf[sizeof(tag_buf)];
508
509 len = end - tag_buf;
510 fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
511 if (!fs->tag)
512 return -ENOMEM;
513 memcpy(fs->tag, tag_buf, len);
514 fs->tag[len] = '\0';
515
516 /* While the VIRTIO specification allows any character, newlines are
517 * awkward on mount(8) command-lines and cause problems in the sysfs
518 * "tag" attr and uevent TAG= properties. Forbid them.
519 */
520 if (strchr(fs->tag, '\n')) {
521 dev_dbg(&vdev->dev, "refusing virtiofs tag with newline character\n");
522 return -EINVAL;
523 }
524
525 dev_info(&vdev->dev, "discovered new tag: %s\n", fs->tag);
526 return 0;
527 }
528
529 /* Work function for hiprio completion */
virtio_fs_hiprio_done_work(struct work_struct * work)530 static void virtio_fs_hiprio_done_work(struct work_struct *work)
531 {
532 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
533 done_work);
534 struct virtqueue *vq = fsvq->vq;
535
536 /* Free completed FUSE_FORGET requests */
537 spin_lock(&fsvq->lock);
538 do {
539 unsigned int len;
540 void *req;
541
542 virtqueue_disable_cb(vq);
543
544 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
545 kfree(req);
546 dec_in_flight_req(fsvq);
547 }
548 } while (!virtqueue_enable_cb(vq));
549
550 if (!list_empty(&fsvq->queued_reqs))
551 schedule_work(&fsvq->dispatch_work);
552
553 spin_unlock(&fsvq->lock);
554 }
555
virtio_fs_request_dispatch_work(struct work_struct * work)556 static void virtio_fs_request_dispatch_work(struct work_struct *work)
557 {
558 struct fuse_req *req;
559 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
560 dispatch_work);
561 int ret;
562
563 pr_debug("virtio-fs: worker %s called.\n", __func__);
564 while (1) {
565 spin_lock(&fsvq->lock);
566 req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
567 list);
568 if (!req) {
569 spin_unlock(&fsvq->lock);
570 break;
571 }
572
573 list_del_init(&req->list);
574 spin_unlock(&fsvq->lock);
575 fuse_request_end(req);
576 }
577
578 /* Dispatch pending requests */
579 while (1) {
580 unsigned int flags;
581
582 spin_lock(&fsvq->lock);
583 req = list_first_entry_or_null(&fsvq->queued_reqs,
584 struct fuse_req, list);
585 if (!req) {
586 spin_unlock(&fsvq->lock);
587 return;
588 }
589 list_del_init(&req->list);
590 spin_unlock(&fsvq->lock);
591
592 flags = memalloc_nofs_save();
593 ret = virtio_fs_enqueue_req(fsvq, req, true, GFP_KERNEL);
594 memalloc_nofs_restore(flags);
595 if (ret < 0) {
596 if (ret == -ENOSPC) {
597 spin_lock(&fsvq->lock);
598 list_add_tail(&req->list, &fsvq->queued_reqs);
599 spin_unlock(&fsvq->lock);
600 return;
601 }
602 req->out.h.error = ret;
603 spin_lock(&fsvq->lock);
604 dec_in_flight_req(fsvq);
605 spin_unlock(&fsvq->lock);
606 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
607 ret);
608 fuse_request_end(req);
609 }
610 }
611 }
612
613 /*
614 * Returns 1 if queue is full and sender should wait a bit before sending
615 * next request, 0 otherwise.
616 */
send_forget_request(struct virtio_fs_vq * fsvq,struct virtio_fs_forget * forget,bool in_flight)617 static int send_forget_request(struct virtio_fs_vq *fsvq,
618 struct virtio_fs_forget *forget,
619 bool in_flight)
620 {
621 struct scatterlist sg;
622 struct virtqueue *vq;
623 int ret = 0;
624 bool notify;
625 struct virtio_fs_forget_req *req = &forget->req;
626
627 spin_lock(&fsvq->lock);
628 if (!fsvq->connected) {
629 if (in_flight)
630 dec_in_flight_req(fsvq);
631 kfree(forget);
632 goto out;
633 }
634
635 sg_init_one(&sg, req, sizeof(*req));
636 vq = fsvq->vq;
637 dev_dbg(&vq->vdev->dev, "%s\n", __func__);
638
639 ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
640 if (ret < 0) {
641 if (ret == -ENOSPC) {
642 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
643 ret);
644 list_add_tail(&forget->list, &fsvq->queued_reqs);
645 if (!in_flight)
646 inc_in_flight_req(fsvq);
647 /* Queue is full */
648 ret = 1;
649 } else {
650 pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
651 ret);
652 kfree(forget);
653 if (in_flight)
654 dec_in_flight_req(fsvq);
655 }
656 goto out;
657 }
658
659 if (!in_flight)
660 inc_in_flight_req(fsvq);
661 notify = virtqueue_kick_prepare(vq);
662 spin_unlock(&fsvq->lock);
663
664 if (notify)
665 virtqueue_notify(vq);
666 return ret;
667 out:
668 spin_unlock(&fsvq->lock);
669 return ret;
670 }
671
virtio_fs_hiprio_dispatch_work(struct work_struct * work)672 static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
673 {
674 struct virtio_fs_forget *forget;
675 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
676 dispatch_work);
677 pr_debug("virtio-fs: worker %s called.\n", __func__);
678 while (1) {
679 spin_lock(&fsvq->lock);
680 forget = list_first_entry_or_null(&fsvq->queued_reqs,
681 struct virtio_fs_forget, list);
682 if (!forget) {
683 spin_unlock(&fsvq->lock);
684 return;
685 }
686
687 list_del(&forget->list);
688 spin_unlock(&fsvq->lock);
689 if (send_forget_request(fsvq, forget, true))
690 return;
691 }
692 }
693
694 /* Allocate and copy args into req->argbuf */
copy_args_to_argbuf(struct fuse_req * req,gfp_t gfp)695 static int copy_args_to_argbuf(struct fuse_req *req, gfp_t gfp)
696 {
697 struct fuse_args *args = req->args;
698 unsigned int offset = 0;
699 unsigned int num_in;
700 unsigned int num_out;
701 unsigned int len;
702 unsigned int i;
703
704 num_in = args->in_numargs - args->in_pages;
705 num_out = args->out_numargs - args->out_pages;
706 len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
707 fuse_len_args(num_out, args->out_args);
708
709 req->argbuf = kmalloc(len, gfp);
710 if (!req->argbuf)
711 return -ENOMEM;
712
713 for (i = 0; i < num_in; i++) {
714 memcpy(req->argbuf + offset,
715 args->in_args[i].value,
716 args->in_args[i].size);
717 offset += args->in_args[i].size;
718 }
719
720 return 0;
721 }
722
723 /* Copy args out of and free req->argbuf */
copy_args_from_argbuf(struct fuse_args * args,struct fuse_req * req)724 static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
725 {
726 unsigned int remaining;
727 unsigned int offset;
728 unsigned int num_in;
729 unsigned int num_out;
730 unsigned int i;
731
732 remaining = req->out.h.len - sizeof(req->out.h);
733 num_in = args->in_numargs - args->in_pages;
734 num_out = args->out_numargs - args->out_pages;
735 offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
736
737 for (i = 0; i < num_out; i++) {
738 unsigned int argsize = args->out_args[i].size;
739
740 if (args->out_argvar &&
741 i == args->out_numargs - 1 &&
742 argsize > remaining) {
743 argsize = remaining;
744 }
745
746 memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
747 offset += argsize;
748
749 if (i != args->out_numargs - 1)
750 remaining -= argsize;
751 }
752
753 /* Store the actual size of the variable-length arg */
754 if (args->out_argvar)
755 args->out_args[args->out_numargs - 1].size = remaining;
756
757 kfree(req->argbuf);
758 req->argbuf = NULL;
759 }
760
761 /* Work function for request completion */
virtio_fs_request_complete(struct fuse_req * req,struct virtio_fs_vq * fsvq)762 static void virtio_fs_request_complete(struct fuse_req *req,
763 struct virtio_fs_vq *fsvq)
764 {
765 struct fuse_args *args;
766 struct fuse_args_pages *ap;
767 unsigned int len, i, thislen;
768 struct folio *folio;
769
770 /*
771 * TODO verify that server properly follows FUSE protocol
772 * (oh.uniq, oh.len)
773 */
774 args = req->args;
775 copy_args_from_argbuf(args, req);
776
777 if (args->out_pages && args->page_zeroing) {
778 len = args->out_args[args->out_numargs - 1].size;
779 ap = container_of(args, typeof(*ap), args);
780 for (i = 0; i < ap->num_folios; i++) {
781 thislen = ap->descs[i].length;
782 if (len < thislen) {
783 WARN_ON(ap->descs[i].offset);
784 folio = ap->folios[i];
785 folio_zero_segment(folio, len, thislen);
786 len = 0;
787 } else {
788 len -= thislen;
789 }
790 }
791 }
792
793 clear_bit(FR_SENT, &req->flags);
794
795 fuse_request_end(req);
796 spin_lock(&fsvq->lock);
797 dec_in_flight_req(fsvq);
798 spin_unlock(&fsvq->lock);
799 }
800
virtio_fs_complete_req_work(struct work_struct * work)801 static void virtio_fs_complete_req_work(struct work_struct *work)
802 {
803 struct virtio_fs_req_work *w =
804 container_of(work, typeof(*w), done_work);
805
806 virtio_fs_request_complete(w->req, w->fsvq);
807 kfree(w);
808 }
809
virtio_fs_requests_done_work(struct work_struct * work)810 static void virtio_fs_requests_done_work(struct work_struct *work)
811 {
812 struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
813 done_work);
814 struct fuse_pqueue *fpq = &fsvq->fud->pq;
815 struct virtqueue *vq = fsvq->vq;
816 struct fuse_req *req;
817 struct fuse_req *next;
818 unsigned int len;
819 LIST_HEAD(reqs);
820
821 /* Collect completed requests off the virtqueue */
822 spin_lock(&fsvq->lock);
823 do {
824 virtqueue_disable_cb(vq);
825
826 while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
827 spin_lock(&fpq->lock);
828 list_move_tail(&req->list, &reqs);
829 spin_unlock(&fpq->lock);
830 }
831 } while (!virtqueue_enable_cb(vq));
832 spin_unlock(&fsvq->lock);
833
834 /* End requests */
835 list_for_each_entry_safe(req, next, &reqs, list) {
836 list_del_init(&req->list);
837
838 /* blocking async request completes in a worker context */
839 if (req->args->may_block) {
840 struct virtio_fs_req_work *w;
841
842 w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
843 INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
844 w->fsvq = fsvq;
845 w->req = req;
846 schedule_work(&w->done_work);
847 } else {
848 virtio_fs_request_complete(req, fsvq);
849 }
850 }
851
852 /* Try to push previously queued requests, as the queue might no longer be full */
853 spin_lock(&fsvq->lock);
854 if (!list_empty(&fsvq->queued_reqs))
855 schedule_work(&fsvq->dispatch_work);
856 spin_unlock(&fsvq->lock);
857 }
858
virtio_fs_map_queues(struct virtio_device * vdev,struct virtio_fs * fs)859 static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
860 {
861 const struct cpumask *mask, *masks;
862 unsigned int q, cpu, nr_masks;
863
864 /* First attempt to map using existing transport layer affinities
865 * e.g. PCIe MSI-X
866 */
867 if (!vdev->config->get_vq_affinity)
868 goto fallback;
869
870 for (q = 0; q < fs->num_request_queues; q++) {
871 mask = vdev->config->get_vq_affinity(vdev, VQ_REQUEST + q);
872 if (!mask)
873 goto fallback;
874
875 for_each_cpu(cpu, mask)
876 fs->mq_map[cpu] = q + VQ_REQUEST;
877 }
878
879 return;
880 fallback:
881 /* Attempt to map evenly in groups over the CPUs */
882 masks = group_cpus_evenly(fs->num_request_queues, &nr_masks);
883 /* If even this fails we default to all CPUs use first request queue */
884 if (!masks) {
885 for_each_possible_cpu(cpu)
886 fs->mq_map[cpu] = VQ_REQUEST;
887 return;
888 }
889
890 for (q = 0; q < fs->num_request_queues; q++) {
891 for_each_cpu(cpu, &masks[q % nr_masks])
892 fs->mq_map[cpu] = q + VQ_REQUEST;
893 }
894 kfree(masks);
895 }
896
897 /* Virtqueue interrupt handler */
virtio_fs_vq_done(struct virtqueue * vq)898 static void virtio_fs_vq_done(struct virtqueue *vq)
899 {
900 struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
901
902 dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
903
904 schedule_work(&fsvq->done_work);
905 }
906
virtio_fs_init_vq(struct virtio_fs_vq * fsvq,char * name,int vq_type)907 static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
908 int vq_type)
909 {
910 strscpy(fsvq->name, name, VQ_NAME_LEN);
911 spin_lock_init(&fsvq->lock);
912 INIT_LIST_HEAD(&fsvq->queued_reqs);
913 INIT_LIST_HEAD(&fsvq->end_reqs);
914 init_completion(&fsvq->in_flight_zero);
915
916 if (vq_type == VQ_REQUEST) {
917 INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
918 INIT_WORK(&fsvq->dispatch_work,
919 virtio_fs_request_dispatch_work);
920 } else {
921 INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
922 INIT_WORK(&fsvq->dispatch_work,
923 virtio_fs_hiprio_dispatch_work);
924 }
925 }
926
927 /* Initialize virtqueues */
virtio_fs_setup_vqs(struct virtio_device * vdev,struct virtio_fs * fs)928 static int virtio_fs_setup_vqs(struct virtio_device *vdev,
929 struct virtio_fs *fs)
930 {
931 struct virtqueue_info *vqs_info;
932 struct virtqueue **vqs;
933 /* Specify pre_vectors to ensure that the queues before the
934 * request queues (e.g. hiprio) don't claim any of the CPUs in
935 * the multi-queue mapping and interrupt affinities
936 */
937 struct irq_affinity desc = { .pre_vectors = VQ_REQUEST };
938 unsigned int i;
939 int ret = 0;
940
941 virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
942 &fs->num_request_queues);
943 if (fs->num_request_queues == 0)
944 return -EINVAL;
945
946 /* Truncate nr of request queues to nr_cpu_id */
947 fs->num_request_queues = min_t(unsigned int, fs->num_request_queues,
948 nr_cpu_ids);
949 fs->nvqs = VQ_REQUEST + fs->num_request_queues;
950 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
951 if (!fs->vqs)
952 return -ENOMEM;
953
954 vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
955 fs->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*fs->mq_map), GFP_KERNEL,
956 dev_to_node(&vdev->dev));
957 vqs_info = kcalloc(fs->nvqs, sizeof(*vqs_info), GFP_KERNEL);
958 if (!vqs || !vqs_info || !fs->mq_map) {
959 ret = -ENOMEM;
960 goto out;
961 }
962
963 /* Initialize the hiprio/forget request virtqueue */
964 vqs_info[VQ_HIPRIO].callback = virtio_fs_vq_done;
965 virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
966 vqs_info[VQ_HIPRIO].name = fs->vqs[VQ_HIPRIO].name;
967
968 /* Initialize the requests virtqueues */
969 for (i = VQ_REQUEST; i < fs->nvqs; i++) {
970 char vq_name[VQ_NAME_LEN];
971
972 snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
973 virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
974 vqs_info[i].callback = virtio_fs_vq_done;
975 vqs_info[i].name = fs->vqs[i].name;
976 }
977
978 ret = virtio_find_vqs(vdev, fs->nvqs, vqs, vqs_info, &desc);
979 if (ret < 0)
980 goto out;
981
982 for (i = 0; i < fs->nvqs; i++)
983 fs->vqs[i].vq = vqs[i];
984
985 virtio_fs_start_all_queues(fs);
986 out:
987 kfree(vqs_info);
988 kfree(vqs);
989 if (ret) {
990 kfree(fs->vqs);
991 kfree(fs->mq_map);
992 }
993 return ret;
994 }
995
996 /* Free virtqueues (device must already be reset) */
virtio_fs_cleanup_vqs(struct virtio_device * vdev)997 static void virtio_fs_cleanup_vqs(struct virtio_device *vdev)
998 {
999 vdev->config->del_vqs(vdev);
1000 }
1001
1002 /* Map a window offset to a page frame number. The window offset will have
1003 * been produced by .iomap_begin(), which maps a file offset to a window
1004 * offset.
1005 */
virtio_fs_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,enum dax_access_mode mode,void ** kaddr,unsigned long * pfn)1006 static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1007 long nr_pages, enum dax_access_mode mode,
1008 void **kaddr, unsigned long *pfn)
1009 {
1010 struct virtio_fs *fs = dax_get_private(dax_dev);
1011 phys_addr_t offset = PFN_PHYS(pgoff);
1012 size_t max_nr_pages = fs->window_len / PAGE_SIZE - pgoff;
1013
1014 if (kaddr)
1015 *kaddr = fs->window_kaddr + offset;
1016 if (pfn)
1017 *pfn = PHYS_PFN(fs->window_phys_addr + offset);
1018 return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
1019 }
1020
virtio_fs_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)1021 static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
1022 pgoff_t pgoff, size_t nr_pages)
1023 {
1024 long rc;
1025 void *kaddr;
1026
1027 rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, &kaddr,
1028 NULL);
1029 if (rc < 0)
1030 return dax_mem2blk_err(rc);
1031
1032 memset(kaddr, 0, nr_pages << PAGE_SHIFT);
1033 dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
1034 return 0;
1035 }
1036
1037 static const struct dax_operations virtio_fs_dax_ops = {
1038 .direct_access = virtio_fs_direct_access,
1039 .zero_page_range = virtio_fs_zero_page_range,
1040 };
1041
virtio_fs_cleanup_dax(void * data)1042 static void virtio_fs_cleanup_dax(void *data)
1043 {
1044 struct dax_device *dax_dev = data;
1045
1046 kill_dax(dax_dev);
1047 put_dax(dax_dev);
1048 }
1049
1050 DEFINE_FREE(cleanup_dax, struct dax_dev *, if (!IS_ERR_OR_NULL(_T)) virtio_fs_cleanup_dax(_T))
1051
virtio_fs_setup_dax(struct virtio_device * vdev,struct virtio_fs * fs)1052 static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
1053 {
1054 struct dax_device *dax_dev __free(cleanup_dax) = NULL;
1055 struct virtio_shm_region cache_reg;
1056 struct dev_pagemap *pgmap;
1057 bool have_cache;
1058
1059 if (!IS_ENABLED(CONFIG_FUSE_DAX))
1060 return 0;
1061
1062 dax_dev = alloc_dax(fs, &virtio_fs_dax_ops);
1063 if (IS_ERR(dax_dev)) {
1064 int rc = PTR_ERR(dax_dev);
1065 return rc == -EOPNOTSUPP ? 0 : rc;
1066 }
1067
1068 /* Get cache region */
1069 have_cache = virtio_get_shm_region(vdev, &cache_reg,
1070 (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
1071 if (!have_cache) {
1072 dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
1073 return 0;
1074 }
1075
1076 if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
1077 dev_name(&vdev->dev))) {
1078 dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
1079 cache_reg.addr, cache_reg.len);
1080 return -EBUSY;
1081 }
1082
1083 dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
1084 cache_reg.addr);
1085
1086 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
1087 if (!pgmap)
1088 return -ENOMEM;
1089
1090 pgmap->type = MEMORY_DEVICE_FS_DAX;
1091
1092 /* Ideally we would directly use the PCI BAR resource but
1093 * devm_memremap_pages() wants its own copy in pgmap. So
1094 * initialize a struct resource from scratch (only the start
1095 * and end fields will be used).
1096 */
1097 pgmap->range = (struct range) {
1098 .start = (phys_addr_t) cache_reg.addr,
1099 .end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
1100 };
1101 pgmap->nr_range = 1;
1102
1103 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
1104 if (IS_ERR(fs->window_kaddr))
1105 return PTR_ERR(fs->window_kaddr);
1106
1107 fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
1108 fs->window_len = (phys_addr_t) cache_reg.len;
1109
1110 dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
1111 __func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
1112
1113 fs->dax_dev = no_free_ptr(dax_dev);
1114 return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
1115 fs->dax_dev);
1116 }
1117
virtio_fs_probe(struct virtio_device * vdev)1118 static int virtio_fs_probe(struct virtio_device *vdev)
1119 {
1120 struct virtio_fs *fs;
1121 int ret;
1122
1123 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
1124 if (!fs)
1125 return -ENOMEM;
1126 kobject_init(&fs->kobj, &virtio_fs_ktype);
1127 vdev->priv = fs;
1128
1129 ret = virtio_fs_read_tag(vdev, fs);
1130 if (ret < 0)
1131 goto out;
1132
1133 ret = virtio_fs_setup_vqs(vdev, fs);
1134 if (ret < 0)
1135 goto out;
1136
1137 virtio_fs_map_queues(vdev, fs);
1138
1139 ret = virtio_fs_setup_dax(vdev, fs);
1140 if (ret < 0)
1141 goto out_vqs;
1142
1143 /* Bring the device online in case the filesystem is mounted and
1144 * requests need to be sent before we return.
1145 */
1146 virtio_device_ready(vdev);
1147
1148 ret = virtio_fs_add_instance(vdev, fs);
1149 if (ret < 0)
1150 goto out_vqs;
1151
1152 return 0;
1153
1154 out_vqs:
1155 virtio_reset_device(vdev);
1156 virtio_fs_cleanup_vqs(vdev);
1157
1158 out:
1159 vdev->priv = NULL;
1160 kobject_put(&fs->kobj);
1161 return ret;
1162 }
1163
virtio_fs_stop_all_queues(struct virtio_fs * fs)1164 static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
1165 {
1166 struct virtio_fs_vq *fsvq;
1167 int i;
1168
1169 for (i = 0; i < fs->nvqs; i++) {
1170 fsvq = &fs->vqs[i];
1171 spin_lock(&fsvq->lock);
1172 fsvq->connected = false;
1173 spin_unlock(&fsvq->lock);
1174 }
1175 }
1176
virtio_fs_remove(struct virtio_device * vdev)1177 static void virtio_fs_remove(struct virtio_device *vdev)
1178 {
1179 struct virtio_fs *fs = vdev->priv;
1180
1181 mutex_lock(&virtio_fs_mutex);
1182 /* This device is going away. No one should get new reference */
1183 list_del_init(&fs->list);
1184 virtio_fs_delete_queues_sysfs(fs);
1185 sysfs_remove_link(&fs->kobj, "device");
1186 kobject_put(fs->mqs_kobj);
1187 kobject_del(&fs->kobj);
1188 virtio_fs_stop_all_queues(fs);
1189 virtio_fs_drain_all_queues_locked(fs);
1190 virtio_reset_device(vdev);
1191 virtio_fs_cleanup_vqs(vdev);
1192
1193 vdev->priv = NULL;
1194 /* Put device reference on virtio_fs object */
1195 virtio_fs_put_locked(fs);
1196 mutex_unlock(&virtio_fs_mutex);
1197 }
1198
1199 #ifdef CONFIG_PM_SLEEP
virtio_fs_freeze(struct virtio_device * vdev)1200 static int virtio_fs_freeze(struct virtio_device *vdev)
1201 {
1202 /* TODO need to save state here */
1203 pr_warn("virtio-fs: suspend/resume not yet supported\n");
1204 return -EOPNOTSUPP;
1205 }
1206
virtio_fs_restore(struct virtio_device * vdev)1207 static int virtio_fs_restore(struct virtio_device *vdev)
1208 {
1209 /* TODO need to restore state here */
1210 return 0;
1211 }
1212 #endif /* CONFIG_PM_SLEEP */
1213
1214 static const struct virtio_device_id id_table[] = {
1215 { VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
1216 {},
1217 };
1218
1219 static const unsigned int feature_table[] = {};
1220
1221 static struct virtio_driver virtio_fs_driver = {
1222 .driver.name = KBUILD_MODNAME,
1223 .id_table = id_table,
1224 .feature_table = feature_table,
1225 .feature_table_size = ARRAY_SIZE(feature_table),
1226 .probe = virtio_fs_probe,
1227 .remove = virtio_fs_remove,
1228 #ifdef CONFIG_PM_SLEEP
1229 .freeze = virtio_fs_freeze,
1230 .restore = virtio_fs_restore,
1231 #endif
1232 };
1233
virtio_fs_send_forget(struct fuse_iqueue * fiq,struct fuse_forget_link * link)1234 static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link)
1235 {
1236 struct virtio_fs_forget *forget;
1237 struct virtio_fs_forget_req *req;
1238 struct virtio_fs *fs = fiq->priv;
1239 struct virtio_fs_vq *fsvq = &fs->vqs[VQ_HIPRIO];
1240 u64 unique = fuse_get_unique(fiq);
1241
1242 /* Allocate a buffer for the request */
1243 forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
1244 req = &forget->req;
1245
1246 req->ih = (struct fuse_in_header){
1247 .opcode = FUSE_FORGET,
1248 .nodeid = link->forget_one.nodeid,
1249 .unique = unique,
1250 .len = sizeof(*req),
1251 };
1252 req->arg = (struct fuse_forget_in){
1253 .nlookup = link->forget_one.nlookup,
1254 };
1255
1256 send_forget_request(fsvq, forget, false);
1257 kfree(link);
1258 }
1259
virtio_fs_send_interrupt(struct fuse_iqueue * fiq,struct fuse_req * req)1260 static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
1261 {
1262 /*
1263 * TODO interrupts.
1264 *
1265 * Normal fs operations on a local filesystems aren't interruptible.
1266 * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
1267 * with shared lock between host and guest.
1268 */
1269 }
1270
1271 /* Count number of scatter-gather elements required */
sg_count_fuse_folios(struct fuse_folio_desc * folio_descs,unsigned int num_folios,unsigned int total_len)1272 static unsigned int sg_count_fuse_folios(struct fuse_folio_desc *folio_descs,
1273 unsigned int num_folios,
1274 unsigned int total_len)
1275 {
1276 unsigned int i;
1277 unsigned int this_len;
1278
1279 for (i = 0; i < num_folios && total_len; i++) {
1280 this_len = min(folio_descs[i].length, total_len);
1281 total_len -= this_len;
1282 }
1283
1284 return i;
1285 }
1286
1287 /* Return the number of scatter-gather list elements required */
sg_count_fuse_req(struct fuse_req * req)1288 static unsigned int sg_count_fuse_req(struct fuse_req *req)
1289 {
1290 struct fuse_args *args = req->args;
1291 struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
1292 unsigned int size, total_sgs = 1 /* fuse_in_header */;
1293
1294 if (args->in_numargs - args->in_pages)
1295 total_sgs += 1;
1296
1297 if (args->in_pages) {
1298 size = args->in_args[args->in_numargs - 1].size;
1299 total_sgs += sg_count_fuse_folios(ap->descs, ap->num_folios,
1300 size);
1301 }
1302
1303 if (!test_bit(FR_ISREPLY, &req->flags))
1304 return total_sgs;
1305
1306 total_sgs += 1 /* fuse_out_header */;
1307
1308 if (args->out_numargs - args->out_pages)
1309 total_sgs += 1;
1310
1311 if (args->out_pages) {
1312 size = args->out_args[args->out_numargs - 1].size;
1313 total_sgs += sg_count_fuse_folios(ap->descs, ap->num_folios,
1314 size);
1315 }
1316
1317 return total_sgs;
1318 }
1319
1320 /* Add folios to scatter-gather list and return number of elements used */
sg_init_fuse_folios(struct scatterlist * sg,struct folio ** folios,struct fuse_folio_desc * folio_descs,unsigned int num_folios,unsigned int total_len)1321 static unsigned int sg_init_fuse_folios(struct scatterlist *sg,
1322 struct folio **folios,
1323 struct fuse_folio_desc *folio_descs,
1324 unsigned int num_folios,
1325 unsigned int total_len)
1326 {
1327 unsigned int i;
1328 unsigned int this_len;
1329
1330 for (i = 0; i < num_folios && total_len; i++) {
1331 sg_init_table(&sg[i], 1);
1332 this_len = min(folio_descs[i].length, total_len);
1333 sg_set_folio(&sg[i], folios[i], this_len, folio_descs[i].offset);
1334 total_len -= this_len;
1335 }
1336
1337 return i;
1338 }
1339
1340 /* Add args to scatter-gather list and return number of elements used */
sg_init_fuse_args(struct scatterlist * sg,struct fuse_req * req,struct fuse_arg * args,unsigned int numargs,bool argpages,void * argbuf,unsigned int * len_used)1341 static unsigned int sg_init_fuse_args(struct scatterlist *sg,
1342 struct fuse_req *req,
1343 struct fuse_arg *args,
1344 unsigned int numargs,
1345 bool argpages,
1346 void *argbuf,
1347 unsigned int *len_used)
1348 {
1349 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1350 unsigned int total_sgs = 0;
1351 unsigned int len;
1352
1353 len = fuse_len_args(numargs - argpages, args);
1354 if (len)
1355 sg_init_one(&sg[total_sgs++], argbuf, len);
1356
1357 if (argpages)
1358 total_sgs += sg_init_fuse_folios(&sg[total_sgs],
1359 ap->folios, ap->descs,
1360 ap->num_folios,
1361 args[numargs - 1].size);
1362
1363 if (len_used)
1364 *len_used = len;
1365
1366 return total_sgs;
1367 }
1368
1369 /* Add a request to a virtqueue and kick the device */
virtio_fs_enqueue_req(struct virtio_fs_vq * fsvq,struct fuse_req * req,bool in_flight,gfp_t gfp)1370 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
1371 struct fuse_req *req, bool in_flight,
1372 gfp_t gfp)
1373 {
1374 /* requests need at least 4 elements */
1375 struct scatterlist *stack_sgs[6];
1376 struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
1377 struct scatterlist **sgs = stack_sgs;
1378 struct scatterlist *sg = stack_sg;
1379 struct virtqueue *vq;
1380 struct fuse_args *args = req->args;
1381 unsigned int argbuf_used = 0;
1382 unsigned int out_sgs = 0;
1383 unsigned int in_sgs = 0;
1384 unsigned int total_sgs;
1385 unsigned int i, hash;
1386 int ret;
1387 bool notify;
1388 struct fuse_pqueue *fpq;
1389
1390 /* Does the sglist fit on the stack? */
1391 total_sgs = sg_count_fuse_req(req);
1392 if (total_sgs > ARRAY_SIZE(stack_sgs)) {
1393 sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), gfp);
1394 sg = kmalloc_array(total_sgs, sizeof(sg[0]), gfp);
1395 if (!sgs || !sg) {
1396 ret = -ENOMEM;
1397 goto out;
1398 }
1399 }
1400
1401 /* Use a bounce buffer since stack args cannot be mapped */
1402 ret = copy_args_to_argbuf(req, gfp);
1403 if (ret < 0)
1404 goto out;
1405
1406 /* Request elements */
1407 sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
1408 out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
1409 (struct fuse_arg *)args->in_args,
1410 args->in_numargs, args->in_pages,
1411 req->argbuf, &argbuf_used);
1412
1413 /* Reply elements */
1414 if (test_bit(FR_ISREPLY, &req->flags)) {
1415 sg_init_one(&sg[out_sgs + in_sgs++],
1416 &req->out.h, sizeof(req->out.h));
1417 in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
1418 args->out_args, args->out_numargs,
1419 args->out_pages,
1420 req->argbuf + argbuf_used, NULL);
1421 }
1422
1423 WARN_ON(out_sgs + in_sgs != total_sgs);
1424
1425 for (i = 0; i < total_sgs; i++)
1426 sgs[i] = &sg[i];
1427
1428 spin_lock(&fsvq->lock);
1429
1430 if (!fsvq->connected) {
1431 spin_unlock(&fsvq->lock);
1432 ret = -ENOTCONN;
1433 goto out;
1434 }
1435
1436 vq = fsvq->vq;
1437 ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
1438 if (ret < 0) {
1439 spin_unlock(&fsvq->lock);
1440 goto out;
1441 }
1442
1443 /* Request successfully sent. */
1444 fpq = &fsvq->fud->pq;
1445 hash = fuse_req_hash(req->in.h.unique);
1446 spin_lock(&fpq->lock);
1447 list_add_tail(&req->list, &fpq->processing[hash]);
1448 spin_unlock(&fpq->lock);
1449 set_bit(FR_SENT, &req->flags);
1450 /* matches barrier in request_wait_answer() */
1451 smp_mb__after_atomic();
1452
1453 if (!in_flight)
1454 inc_in_flight_req(fsvq);
1455 notify = virtqueue_kick_prepare(vq);
1456
1457 spin_unlock(&fsvq->lock);
1458
1459 if (notify)
1460 virtqueue_notify(vq);
1461
1462 out:
1463 if (ret < 0 && req->argbuf) {
1464 kfree(req->argbuf);
1465 req->argbuf = NULL;
1466 }
1467 if (sgs != stack_sgs) {
1468 kfree(sgs);
1469 kfree(sg);
1470 }
1471
1472 return ret;
1473 }
1474
virtio_fs_send_req(struct fuse_iqueue * fiq,struct fuse_req * req)1475 static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
1476 {
1477 unsigned int queue_id;
1478 struct virtio_fs *fs;
1479 struct virtio_fs_vq *fsvq;
1480 int ret;
1481
1482 fuse_request_assign_unique(fiq, req);
1483
1484 clear_bit(FR_PENDING, &req->flags);
1485
1486 fs = fiq->priv;
1487 queue_id = fs->mq_map[raw_smp_processor_id()];
1488
1489 pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n",
1490 __func__, req->in.h.opcode, req->in.h.unique,
1491 req->in.h.nodeid, req->in.h.len,
1492 fuse_len_args(req->args->out_numargs, req->args->out_args),
1493 queue_id);
1494
1495 fsvq = &fs->vqs[queue_id];
1496 ret = virtio_fs_enqueue_req(fsvq, req, false, GFP_ATOMIC);
1497 if (ret < 0) {
1498 if (ret == -ENOSPC) {
1499 /*
1500 * Virtqueue full. Retry submission from worker
1501 * context as we might be holding fc->bg_lock.
1502 */
1503 spin_lock(&fsvq->lock);
1504 list_add_tail(&req->list, &fsvq->queued_reqs);
1505 inc_in_flight_req(fsvq);
1506 spin_unlock(&fsvq->lock);
1507 return;
1508 }
1509 req->out.h.error = ret;
1510 pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
1511
1512 /* Can't end request in submission context. Use a worker */
1513 spin_lock(&fsvq->lock);
1514 list_add_tail(&req->list, &fsvq->end_reqs);
1515 schedule_work(&fsvq->dispatch_work);
1516 spin_unlock(&fsvq->lock);
1517 return;
1518 }
1519 }
1520
1521 static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
1522 .send_forget = virtio_fs_send_forget,
1523 .send_interrupt = virtio_fs_send_interrupt,
1524 .send_req = virtio_fs_send_req,
1525 .release = virtio_fs_fiq_release,
1526 };
1527
virtio_fs_ctx_set_defaults(struct fuse_fs_context * ctx)1528 static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
1529 {
1530 ctx->rootmode = S_IFDIR;
1531 ctx->default_permissions = 1;
1532 ctx->allow_other = 1;
1533 ctx->max_read = UINT_MAX;
1534 ctx->blksize = 512;
1535 ctx->destroy = true;
1536 ctx->no_control = true;
1537 ctx->no_force_umount = true;
1538 }
1539
virtio_fs_fill_super(struct super_block * sb,struct fs_context * fsc)1540 static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
1541 {
1542 struct fuse_mount *fm = get_fuse_mount_super(sb);
1543 struct fuse_conn *fc = fm->fc;
1544 struct virtio_fs *fs = fc->iq.priv;
1545 struct fuse_fs_context *ctx = fsc->fs_private;
1546 unsigned int i;
1547 int err;
1548
1549 virtio_fs_ctx_set_defaults(ctx);
1550 mutex_lock(&virtio_fs_mutex);
1551
1552 /* After holding mutex, make sure virtiofs device is still there.
1553 * Though we are holding a reference to it, drive ->remove might
1554 * still have cleaned up virtual queues. In that case bail out.
1555 */
1556 err = -EINVAL;
1557 if (list_empty(&fs->list)) {
1558 pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
1559 goto err;
1560 }
1561
1562 err = -ENOMEM;
1563 /* Allocate fuse_dev for hiprio and notification queues */
1564 for (i = 0; i < fs->nvqs; i++) {
1565 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1566
1567 fsvq->fud = fuse_dev_alloc();
1568 if (!fsvq->fud)
1569 goto err_free_fuse_devs;
1570 }
1571
1572 /* virtiofs allocates and installs its own fuse devices */
1573 ctx->fudptr = NULL;
1574 if (ctx->dax_mode != FUSE_DAX_NEVER) {
1575 if (ctx->dax_mode == FUSE_DAX_ALWAYS && !fs->dax_dev) {
1576 err = -EINVAL;
1577 pr_err("virtio-fs: dax can't be enabled as filesystem"
1578 " device does not support it.\n");
1579 goto err_free_fuse_devs;
1580 }
1581 ctx->dax_dev = fs->dax_dev;
1582 }
1583 err = fuse_fill_super_common(sb, ctx);
1584 if (err < 0)
1585 goto err_free_fuse_devs;
1586
1587 for (i = 0; i < fs->nvqs; i++) {
1588 struct virtio_fs_vq *fsvq = &fs->vqs[i];
1589
1590 fuse_dev_install(fsvq->fud, fc);
1591 }
1592
1593 /* Previous unmount will stop all queues. Start these again */
1594 virtio_fs_start_all_queues(fs);
1595 fuse_send_init(fm);
1596 mutex_unlock(&virtio_fs_mutex);
1597 return 0;
1598
1599 err_free_fuse_devs:
1600 virtio_fs_free_devs(fs);
1601 err:
1602 mutex_unlock(&virtio_fs_mutex);
1603 return err;
1604 }
1605
virtio_fs_conn_destroy(struct fuse_mount * fm)1606 static void virtio_fs_conn_destroy(struct fuse_mount *fm)
1607 {
1608 struct fuse_conn *fc = fm->fc;
1609 struct virtio_fs *vfs = fc->iq.priv;
1610 struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
1611
1612 /* Stop dax worker. Soon evict_inodes() will be called which
1613 * will free all memory ranges belonging to all inodes.
1614 */
1615 if (IS_ENABLED(CONFIG_FUSE_DAX))
1616 fuse_dax_cancel_work(fc);
1617
1618 /* Stop forget queue. Soon destroy will be sent */
1619 spin_lock(&fsvq->lock);
1620 fsvq->connected = false;
1621 spin_unlock(&fsvq->lock);
1622 virtio_fs_drain_all_queues(vfs);
1623
1624 fuse_conn_destroy(fm);
1625
1626 /* fuse_conn_destroy() must have sent destroy. Stop all queues
1627 * and drain one more time and free fuse devices. Freeing fuse
1628 * devices will drop their reference on fuse_conn and that in
1629 * turn will drop its reference on virtio_fs object.
1630 */
1631 virtio_fs_stop_all_queues(vfs);
1632 virtio_fs_drain_all_queues(vfs);
1633 virtio_fs_free_devs(vfs);
1634 }
1635
virtio_kill_sb(struct super_block * sb)1636 static void virtio_kill_sb(struct super_block *sb)
1637 {
1638 struct fuse_mount *fm = get_fuse_mount_super(sb);
1639 bool last;
1640
1641 /* If mount failed, we can still be called without any fc */
1642 if (sb->s_root) {
1643 last = fuse_mount_remove(fm);
1644 if (last)
1645 virtio_fs_conn_destroy(fm);
1646 }
1647 kill_anon_super(sb);
1648 fuse_mount_destroy(fm);
1649 }
1650
virtio_fs_test_super(struct super_block * sb,struct fs_context * fsc)1651 static int virtio_fs_test_super(struct super_block *sb,
1652 struct fs_context *fsc)
1653 {
1654 struct fuse_mount *fsc_fm = fsc->s_fs_info;
1655 struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
1656
1657 return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
1658 }
1659
virtio_fs_get_tree(struct fs_context * fsc)1660 static int virtio_fs_get_tree(struct fs_context *fsc)
1661 {
1662 struct virtio_fs *fs;
1663 struct super_block *sb;
1664 struct fuse_conn *fc = NULL;
1665 struct fuse_mount *fm;
1666 unsigned int virtqueue_size;
1667 int err = -EIO;
1668
1669 if (!fsc->source)
1670 return invalf(fsc, "No source specified");
1671
1672 /* This gets a reference on virtio_fs object. This ptr gets installed
1673 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
1674 * to drop the reference to this object.
1675 */
1676 fs = virtio_fs_find_instance(fsc->source);
1677 if (!fs) {
1678 pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
1679 return -EINVAL;
1680 }
1681
1682 virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq);
1683 if (WARN_ON(virtqueue_size <= FUSE_HEADER_OVERHEAD))
1684 goto out_err;
1685
1686 err = -ENOMEM;
1687 fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
1688 if (!fc)
1689 goto out_err;
1690
1691 fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
1692 if (!fm)
1693 goto out_err;
1694
1695 fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
1696 fc->release = fuse_free_conn;
1697 fc->delete_stale = true;
1698 fc->auto_submounts = true;
1699 fc->sync_fs = true;
1700 fc->use_pages_for_kvec_io = true;
1701
1702 /* Tell FUSE to split requests that exceed the virtqueue's size */
1703 fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit,
1704 virtqueue_size - FUSE_HEADER_OVERHEAD);
1705
1706 fsc->s_fs_info = fm;
1707 sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
1708 if (fsc->s_fs_info)
1709 fuse_mount_destroy(fm);
1710 if (IS_ERR(sb))
1711 return PTR_ERR(sb);
1712
1713 if (!sb->s_root) {
1714 err = virtio_fs_fill_super(sb, fsc);
1715 if (err) {
1716 deactivate_locked_super(sb);
1717 return err;
1718 }
1719
1720 sb->s_flags |= SB_ACTIVE;
1721 }
1722
1723 WARN_ON(fsc->root);
1724 fsc->root = dget(sb->s_root);
1725 return 0;
1726
1727 out_err:
1728 kfree(fc);
1729 virtio_fs_put(fs);
1730 return err;
1731 }
1732
1733 static const struct fs_context_operations virtio_fs_context_ops = {
1734 .free = virtio_fs_free_fsc,
1735 .parse_param = virtio_fs_parse_param,
1736 .get_tree = virtio_fs_get_tree,
1737 };
1738
virtio_fs_init_fs_context(struct fs_context * fsc)1739 static int virtio_fs_init_fs_context(struct fs_context *fsc)
1740 {
1741 struct fuse_fs_context *ctx;
1742
1743 if (fsc->purpose == FS_CONTEXT_FOR_SUBMOUNT)
1744 return fuse_init_fs_context_submount(fsc);
1745
1746 ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
1747 if (!ctx)
1748 return -ENOMEM;
1749 fsc->fs_private = ctx;
1750 fsc->ops = &virtio_fs_context_ops;
1751 return 0;
1752 }
1753
1754 static struct file_system_type virtio_fs_type = {
1755 .owner = THIS_MODULE,
1756 .name = "virtiofs",
1757 .init_fs_context = virtio_fs_init_fs_context,
1758 .kill_sb = virtio_kill_sb,
1759 .fs_flags = FS_ALLOW_IDMAP,
1760 };
1761
virtio_fs_uevent(const struct kobject * kobj,struct kobj_uevent_env * env)1762 static int virtio_fs_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
1763 {
1764 const struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj);
1765
1766 add_uevent_var(env, "TAG=%s", fs->tag);
1767 return 0;
1768 }
1769
1770 static const struct kset_uevent_ops virtio_fs_uevent_ops = {
1771 .uevent = virtio_fs_uevent,
1772 };
1773
virtio_fs_sysfs_init(void)1774 static int __init virtio_fs_sysfs_init(void)
1775 {
1776 virtio_fs_kset = kset_create_and_add("virtiofs", &virtio_fs_uevent_ops,
1777 fs_kobj);
1778 if (!virtio_fs_kset)
1779 return -ENOMEM;
1780 return 0;
1781 }
1782
virtio_fs_sysfs_exit(void)1783 static void virtio_fs_sysfs_exit(void)
1784 {
1785 kset_unregister(virtio_fs_kset);
1786 virtio_fs_kset = NULL;
1787 }
1788
virtio_fs_init(void)1789 static int __init virtio_fs_init(void)
1790 {
1791 int ret;
1792
1793 ret = virtio_fs_sysfs_init();
1794 if (ret < 0)
1795 return ret;
1796
1797 ret = register_virtio_driver(&virtio_fs_driver);
1798 if (ret < 0)
1799 goto sysfs_exit;
1800
1801 ret = register_filesystem(&virtio_fs_type);
1802 if (ret < 0)
1803 goto unregister_virtio_driver;
1804
1805 return 0;
1806
1807 unregister_virtio_driver:
1808 unregister_virtio_driver(&virtio_fs_driver);
1809 sysfs_exit:
1810 virtio_fs_sysfs_exit();
1811 return ret;
1812 }
1813 module_init(virtio_fs_init);
1814
virtio_fs_exit(void)1815 static void __exit virtio_fs_exit(void)
1816 {
1817 unregister_filesystem(&virtio_fs_type);
1818 unregister_virtio_driver(&virtio_fs_driver);
1819 virtio_fs_sysfs_exit();
1820 }
1821 module_exit(virtio_fs_exit);
1822
1823 MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
1824 MODULE_DESCRIPTION("Virtio Filesystem");
1825 MODULE_LICENSE("GPL");
1826 MODULE_ALIAS_FS(KBUILD_MODNAME);
1827 MODULE_DEVICE_TABLE(virtio, id_table);
1828