1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/9p/trans_xen
4 *
5 * Xen transport layer.
6 *
7 * Copyright (C) 2017 by Stefano Stabellini <stefano@aporeto.com>
8 */
9
10 #include <xen/events.h>
11 #include <xen/grant_table.h>
12 #include <xen/xen.h>
13 #include <xen/xenbus.h>
14 #include <xen/interface/io/9pfs.h>
15
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs_context.h>
19 #include <net/9p/9p.h>
20 #include <net/9p/client.h>
21 #include <net/9p/transport.h>
22
23 #define XEN_9PFS_NUM_RINGS 2
24 #define XEN_9PFS_RING_ORDER 9
25 #define XEN_9PFS_RING_SIZE(ring) XEN_FLEX_RING_SIZE(ring->intf->ring_order)
26
27 struct xen_9pfs_header {
28 uint32_t size;
29 uint8_t id;
30 uint16_t tag;
31
32 /* uint8_t sdata[]; */
33 } __attribute__((packed));
34
35 /* One per ring, more than one per 9pfs share */
36 struct xen_9pfs_dataring {
37 struct xen_9pfs_front_priv *priv;
38
39 struct xen_9pfs_data_intf *intf;
40 grant_ref_t ref;
41 int evtchn;
42 int irq;
43 /* protect a ring from concurrent accesses */
44 spinlock_t lock;
45
46 struct xen_9pfs_data data;
47 wait_queue_head_t wq;
48 struct work_struct work;
49 };
50
51 /* One per 9pfs share */
52 struct xen_9pfs_front_priv {
53 struct list_head list;
54 struct xenbus_device *dev;
55 char *tag;
56 struct p9_client *client;
57
58 struct xen_9pfs_dataring *rings;
59 };
60
61 static LIST_HEAD(xen_9pfs_devs);
62 static DEFINE_RWLOCK(xen_9pfs_lock);
63
64 /* We don't currently allow canceling of requests */
p9_xen_cancel(struct p9_client * client,struct p9_req_t * req)65 static int p9_xen_cancel(struct p9_client *client, struct p9_req_t *req)
66 {
67 return 1;
68 }
69
p9_xen_create(struct p9_client * client,struct fs_context * fc)70 static int p9_xen_create(struct p9_client *client, struct fs_context *fc)
71 {
72 const char *addr = fc->source;
73 struct xen_9pfs_front_priv *priv;
74
75 if (addr == NULL)
76 return -EINVAL;
77
78 read_lock(&xen_9pfs_lock);
79 list_for_each_entry(priv, &xen_9pfs_devs, list) {
80 if (!strcmp(priv->tag, addr)) {
81 priv->client = client;
82 read_unlock(&xen_9pfs_lock);
83 return 0;
84 }
85 }
86 read_unlock(&xen_9pfs_lock);
87 return -EINVAL;
88 }
89
p9_xen_close(struct p9_client * client)90 static void p9_xen_close(struct p9_client *client)
91 {
92 struct xen_9pfs_front_priv *priv;
93
94 read_lock(&xen_9pfs_lock);
95 list_for_each_entry(priv, &xen_9pfs_devs, list) {
96 if (priv->client == client) {
97 priv->client = NULL;
98 read_unlock(&xen_9pfs_lock);
99 return;
100 }
101 }
102 read_unlock(&xen_9pfs_lock);
103 }
104
p9_xen_write_todo(struct xen_9pfs_dataring * ring,RING_IDX size)105 static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
106 {
107 RING_IDX cons, prod;
108
109 cons = ring->intf->out_cons;
110 prod = ring->intf->out_prod;
111 virt_mb();
112
113 return XEN_9PFS_RING_SIZE(ring) -
114 xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) >= size;
115 }
116
p9_xen_request(struct p9_client * client,struct p9_req_t * p9_req)117 static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
118 {
119 struct xen_9pfs_front_priv *priv;
120 RING_IDX cons, prod, masked_cons, masked_prod;
121 unsigned long flags;
122 u32 size = p9_req->tc.size;
123 struct xen_9pfs_dataring *ring;
124 int num;
125
126 read_lock(&xen_9pfs_lock);
127 list_for_each_entry(priv, &xen_9pfs_devs, list) {
128 if (priv->client == client)
129 break;
130 }
131 read_unlock(&xen_9pfs_lock);
132 if (list_entry_is_head(priv, &xen_9pfs_devs, list))
133 return -EINVAL;
134
135 num = p9_req->tc.tag % XEN_9PFS_NUM_RINGS;
136 ring = &priv->rings[num];
137
138 again:
139 while (io_wait_event_killable(ring->wq,
140 p9_xen_write_todo(ring, size)) != 0)
141 ;
142
143 spin_lock_irqsave(&ring->lock, flags);
144 cons = ring->intf->out_cons;
145 prod = ring->intf->out_prod;
146 virt_mb();
147
148 if (XEN_9PFS_RING_SIZE(ring) -
149 xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) < size) {
150 spin_unlock_irqrestore(&ring->lock, flags);
151 goto again;
152 }
153
154 masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring));
155 masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
156
157 xen_9pfs_write_packet(ring->data.out, p9_req->tc.sdata, size,
158 &masked_prod, masked_cons,
159 XEN_9PFS_RING_SIZE(ring));
160
161 WRITE_ONCE(p9_req->status, REQ_STATUS_SENT);
162 virt_wmb(); /* write ring before updating pointer */
163 prod += size;
164 ring->intf->out_prod = prod;
165 spin_unlock_irqrestore(&ring->lock, flags);
166 notify_remote_via_irq(ring->irq);
167 p9_req_put(client, p9_req);
168
169 return 0;
170 }
171
p9_xen_response(struct work_struct * work)172 static void p9_xen_response(struct work_struct *work)
173 {
174 struct xen_9pfs_front_priv *priv;
175 struct xen_9pfs_dataring *ring;
176 RING_IDX cons, prod, masked_cons, masked_prod;
177 struct xen_9pfs_header h;
178 struct p9_req_t *req;
179 int status;
180
181 ring = container_of(work, struct xen_9pfs_dataring, work);
182 priv = ring->priv;
183
184 while (1) {
185 cons = ring->intf->in_cons;
186 prod = ring->intf->in_prod;
187 virt_rmb();
188
189 if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) <
190 sizeof(h)) {
191 notify_remote_via_irq(ring->irq);
192 return;
193 }
194
195 masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring));
196 masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
197
198 /* First, read just the header */
199 xen_9pfs_read_packet(&h, ring->data.in, sizeof(h),
200 masked_prod, &masked_cons,
201 XEN_9PFS_RING_SIZE(ring));
202
203 req = p9_tag_lookup(priv->client, h.tag);
204 if (!req || req->status != REQ_STATUS_SENT) {
205 dev_warn(&priv->dev->dev, "Wrong req tag=%x\n", h.tag);
206 cons += h.size;
207 virt_mb();
208 ring->intf->in_cons = cons;
209 continue;
210 }
211
212 if (h.size > req->rc.capacity) {
213 dev_warn(&priv->dev->dev,
214 "requested packet size too big: %d for tag %d with capacity %zd\n",
215 h.size, h.tag, req->rc.capacity);
216 WRITE_ONCE(req->status, REQ_STATUS_ERROR);
217 goto recv_error;
218 }
219
220 req->rc.size = h.size;
221 req->rc.id = h.id;
222 req->rc.tag = h.tag;
223 req->rc.offset = 0;
224
225 masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
226 /* Then, read the whole packet (including the header) */
227 xen_9pfs_read_packet(req->rc.sdata, ring->data.in, h.size,
228 masked_prod, &masked_cons,
229 XEN_9PFS_RING_SIZE(ring));
230
231 recv_error:
232 virt_mb();
233 cons += h.size;
234 ring->intf->in_cons = cons;
235
236 status = (req->status != REQ_STATUS_ERROR) ?
237 REQ_STATUS_RCVD : REQ_STATUS_ERROR;
238
239 p9_client_cb(priv->client, req, status);
240 }
241 }
242
xen_9pfs_front_event_handler(int irq,void * r)243 static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r)
244 {
245 struct xen_9pfs_dataring *ring = r;
246
247 if (!ring || !ring->priv->client) {
248 /* ignore spurious interrupt */
249 return IRQ_HANDLED;
250 }
251
252 wake_up_interruptible(&ring->wq);
253 schedule_work(&ring->work);
254
255 return IRQ_HANDLED;
256 }
257
258 static struct p9_trans_module p9_xen_trans = {
259 .name = "xen",
260 .maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT - 2),
261 .pooled_rbuffers = false,
262 .def = true,
263 .supports_vmalloc = false,
264 .create = p9_xen_create,
265 .close = p9_xen_close,
266 .request = p9_xen_request,
267 .cancel = p9_xen_cancel,
268 .owner = THIS_MODULE,
269 };
270
271 static const struct xenbus_device_id xen_9pfs_front_ids[] = {
272 { "9pfs" },
273 { "" }
274 };
275
xen_9pfs_front_free(struct xen_9pfs_front_priv * priv)276 static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
277 {
278 int i, j;
279
280 if (priv->rings) {
281 for (i = 0; i < XEN_9PFS_NUM_RINGS; i++) {
282 struct xen_9pfs_dataring *ring = &priv->rings[i];
283
284 cancel_work_sync(&ring->work);
285
286 if (!priv->rings[i].intf)
287 break;
288 if (priv->rings[i].irq > 0)
289 unbind_from_irqhandler(priv->rings[i].irq, ring);
290 if (priv->rings[i].data.in) {
291 for (j = 0;
292 j < (1 << priv->rings[i].intf->ring_order);
293 j++) {
294 grant_ref_t ref;
295
296 ref = priv->rings[i].intf->ref[j];
297 gnttab_end_foreign_access(ref, NULL);
298 }
299 free_pages_exact(priv->rings[i].data.in,
300 1UL << (priv->rings[i].intf->ring_order +
301 XEN_PAGE_SHIFT));
302 }
303 gnttab_end_foreign_access(priv->rings[i].ref, NULL);
304 free_page((unsigned long)priv->rings[i].intf);
305 }
306 kfree(priv->rings);
307 }
308 kfree(priv->tag);
309 kfree(priv);
310 }
311
xen_9pfs_front_remove(struct xenbus_device * dev)312 static void xen_9pfs_front_remove(struct xenbus_device *dev)
313 {
314 struct xen_9pfs_front_priv *priv;
315
316 write_lock(&xen_9pfs_lock);
317 priv = dev_get_drvdata(&dev->dev);
318 if (priv == NULL) {
319 write_unlock(&xen_9pfs_lock);
320 return;
321 }
322 dev_set_drvdata(&dev->dev, NULL);
323 list_del(&priv->list);
324 write_unlock(&xen_9pfs_lock);
325
326 xen_9pfs_front_free(priv);
327 }
328
xen_9pfs_front_alloc_dataring(struct xenbus_device * dev,struct xen_9pfs_dataring * ring,unsigned int order)329 static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
330 struct xen_9pfs_dataring *ring,
331 unsigned int order)
332 {
333 int i = 0;
334 int ret = -ENOMEM;
335 void *bytes = NULL;
336
337 init_waitqueue_head(&ring->wq);
338 spin_lock_init(&ring->lock);
339 INIT_WORK(&ring->work, p9_xen_response);
340
341 ring->intf = (struct xen_9pfs_data_intf *)get_zeroed_page(GFP_KERNEL);
342 if (!ring->intf)
343 return ret;
344 ret = gnttab_grant_foreign_access(dev->otherend_id,
345 virt_to_gfn(ring->intf), 0);
346 if (ret < 0)
347 goto out;
348 ring->ref = ret;
349 bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT),
350 GFP_KERNEL | __GFP_ZERO);
351 if (!bytes) {
352 ret = -ENOMEM;
353 goto out;
354 }
355 for (; i < (1 << order); i++) {
356 ret = gnttab_grant_foreign_access(
357 dev->otherend_id, virt_to_gfn(bytes) + i, 0);
358 if (ret < 0)
359 goto out;
360 ring->intf->ref[i] = ret;
361 }
362 ring->intf->ring_order = order;
363 ring->data.in = bytes;
364 ring->data.out = bytes + XEN_FLEX_RING_SIZE(order);
365
366 ret = xenbus_alloc_evtchn(dev, &ring->evtchn);
367 if (ret)
368 goto out;
369 ring->irq = bind_evtchn_to_irqhandler(ring->evtchn,
370 xen_9pfs_front_event_handler,
371 0, "xen_9pfs-frontend", ring);
372 if (ring->irq >= 0)
373 return 0;
374
375 xenbus_free_evtchn(dev, ring->evtchn);
376 ret = ring->irq;
377 out:
378 if (bytes) {
379 for (i--; i >= 0; i--)
380 gnttab_end_foreign_access(ring->intf->ref[i], NULL);
381 free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
382 }
383 gnttab_end_foreign_access(ring->ref, NULL);
384 free_page((unsigned long)ring->intf);
385 return ret;
386 }
387
xen_9pfs_front_init(struct xenbus_device * dev)388 static int xen_9pfs_front_init(struct xenbus_device *dev)
389 {
390 int ret, i;
391 struct xenbus_transaction xbt;
392 struct xen_9pfs_front_priv *priv;
393 char *versions, *v;
394 unsigned int max_rings, max_ring_order, len = 0;
395
396 versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
397 if (IS_ERR(versions))
398 return PTR_ERR(versions);
399 for (v = versions; *v; v++) {
400 if (simple_strtoul(v, &v, 10) == 1) {
401 v = NULL;
402 break;
403 }
404 }
405 if (v) {
406 kfree(versions);
407 return -EINVAL;
408 }
409 kfree(versions);
410 max_rings = xenbus_read_unsigned(dev->otherend, "max-rings", 0);
411 if (max_rings < XEN_9PFS_NUM_RINGS)
412 return -EINVAL;
413 max_ring_order = xenbus_read_unsigned(dev->otherend,
414 "max-ring-page-order", 0);
415 if (max_ring_order > XEN_9PFS_RING_ORDER)
416 max_ring_order = XEN_9PFS_RING_ORDER;
417 if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order))
418 p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2;
419
420 priv = kzalloc_obj(*priv);
421 if (!priv)
422 return -ENOMEM;
423 priv->dev = dev;
424 priv->rings = kzalloc_objs(*priv->rings, XEN_9PFS_NUM_RINGS);
425 if (!priv->rings) {
426 kfree(priv);
427 return -ENOMEM;
428 }
429
430 for (i = 0; i < XEN_9PFS_NUM_RINGS; i++) {
431 priv->rings[i].priv = priv;
432 ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i],
433 max_ring_order);
434 if (ret < 0)
435 goto error;
436 }
437
438 again:
439 ret = xenbus_transaction_start(&xbt);
440 if (ret) {
441 xenbus_dev_fatal(dev, ret, "starting transaction");
442 goto error;
443 }
444 ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1);
445 if (ret)
446 goto error_xenbus;
447 ret = xenbus_printf(xbt, dev->nodename, "num-rings", "%u",
448 XEN_9PFS_NUM_RINGS);
449 if (ret)
450 goto error_xenbus;
451
452 for (i = 0; i < XEN_9PFS_NUM_RINGS; i++) {
453 char str[16];
454
455 BUILD_BUG_ON(XEN_9PFS_NUM_RINGS > 9);
456 sprintf(str, "ring-ref%d", i);
457 ret = xenbus_printf(xbt, dev->nodename, str, "%d",
458 priv->rings[i].ref);
459 if (ret)
460 goto error_xenbus;
461
462 sprintf(str, "event-channel-%d", i);
463 ret = xenbus_printf(xbt, dev->nodename, str, "%u",
464 priv->rings[i].evtchn);
465 if (ret)
466 goto error_xenbus;
467 }
468 priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL);
469 if (IS_ERR(priv->tag)) {
470 ret = PTR_ERR(priv->tag);
471 goto error_xenbus;
472 }
473 ret = xenbus_transaction_end(xbt, 0);
474 if (ret) {
475 if (ret == -EAGAIN)
476 goto again;
477 xenbus_dev_fatal(dev, ret, "completing transaction");
478 goto error;
479 }
480
481 write_lock(&xen_9pfs_lock);
482 dev_set_drvdata(&dev->dev, priv);
483 list_add_tail(&priv->list, &xen_9pfs_devs);
484 write_unlock(&xen_9pfs_lock);
485
486 xenbus_switch_state(dev, XenbusStateInitialised);
487 return 0;
488
489 error_xenbus:
490 xenbus_transaction_end(xbt, 1);
491 xenbus_dev_fatal(dev, ret, "writing xenstore");
492 error:
493 xen_9pfs_front_free(priv);
494 return ret;
495 }
496
xen_9pfs_front_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)497 static int xen_9pfs_front_probe(struct xenbus_device *dev,
498 const struct xenbus_device_id *id)
499 {
500 return 0;
501 }
502
xen_9pfs_front_resume(struct xenbus_device * dev)503 static int xen_9pfs_front_resume(struct xenbus_device *dev)
504 {
505 dev_warn(&dev->dev, "suspend/resume unsupported\n");
506 return 0;
507 }
508
xen_9pfs_front_changed(struct xenbus_device * dev,enum xenbus_state backend_state)509 static void xen_9pfs_front_changed(struct xenbus_device *dev,
510 enum xenbus_state backend_state)
511 {
512 switch (backend_state) {
513 case XenbusStateReconfiguring:
514 case XenbusStateReconfigured:
515 case XenbusStateInitialising:
516 case XenbusStateInitialised:
517 case XenbusStateUnknown:
518 break;
519
520 case XenbusStateInitWait:
521 if (dev->state != XenbusStateInitialising)
522 break;
523
524 xen_9pfs_front_init(dev);
525 break;
526
527 case XenbusStateConnected:
528 xenbus_switch_state(dev, XenbusStateConnected);
529 break;
530
531 case XenbusStateClosed:
532 if (dev->state == XenbusStateClosed)
533 break;
534 fallthrough; /* Missed the backend's CLOSING state */
535 case XenbusStateClosing:
536 xenbus_frontend_closed(dev);
537 break;
538 }
539 }
540
541 static struct xenbus_driver xen_9pfs_front_driver = {
542 .ids = xen_9pfs_front_ids,
543 .probe = xen_9pfs_front_probe,
544 .remove = xen_9pfs_front_remove,
545 .resume = xen_9pfs_front_resume,
546 .otherend_changed = xen_9pfs_front_changed,
547 };
548
p9_trans_xen_init(void)549 static int __init p9_trans_xen_init(void)
550 {
551 int rc;
552
553 if (!xen_domain())
554 return -ENODEV;
555
556 pr_info("Initialising Xen transport for 9pfs\n");
557
558 v9fs_register_trans(&p9_xen_trans);
559 rc = xenbus_register_frontend(&xen_9pfs_front_driver);
560 if (rc)
561 v9fs_unregister_trans(&p9_xen_trans);
562
563 return rc;
564 }
565 module_init(p9_trans_xen_init);
566 MODULE_ALIAS_9P("xen");
567
p9_trans_xen_exit(void)568 static void __exit p9_trans_xen_exit(void)
569 {
570 v9fs_unregister_trans(&p9_xen_trans);
571 return xenbus_unregister_driver(&xen_9pfs_front_driver);
572 }
573 module_exit(p9_trans_xen_exit);
574
575 MODULE_ALIAS("xen:9pfs");
576 MODULE_AUTHOR("Stefano Stabellini <stefano@aporeto.com>");
577 MODULE_DESCRIPTION("Xen Transport for 9P");
578 MODULE_LICENSE("GPL");
579