xref: /freebsd/sys/dev/virtio/p9fs/virtio_p9fs.c (revision 9cbf1de7e34a6fced041388fad5d9180cb7705fe)
1 /*-
2  * Copyright (c) 2017 Juniper Networks, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *	notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *	notice, this list of conditions and the following disclaimer in the
12  *	documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  *
25  */
26 /*
27  * The Virtio 9P transport driver. This file contains all functions related to
28  * the virtqueue infrastructure which include creating the virtqueue, host
29  * interactions, interrupts etc.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/module.h>
35 #include <sys/sglist.h>
36 #include <sys/queue.h>
37 #include <sys/bus.h>
38 #include <sys/kthread.h>
39 #include <sys/condvar.h>
40 #include <sys/sysctl.h>
41 
42 #include <machine/bus.h>
43 
44 #include <fs/p9fs/p9_client.h>
45 #include <fs/p9fs/p9_debug.h>
46 #include <fs/p9fs/p9_protocol.h>
47 #include <fs/p9fs/p9_transport.h>
48 
49 #include <dev/virtio/virtio.h>
50 #include <dev/virtio/virtqueue.h>
51 #include <dev/virtio/virtio_ring.h>
52 #include <dev/virtio/p9fs/virtio_p9fs.h>
53 
54 #define VT9P_MTX(_sc) (&(_sc)->vt9p_mtx)
55 #define VT9P_LOCK(_sc) mtx_lock(VT9P_MTX(_sc))
56 #define VT9P_UNLOCK(_sc) mtx_unlock(VT9P_MTX(_sc))
57 #define VT9P_LOCK_INIT(_sc) mtx_init(VT9P_MTX(_sc), \
58     "VIRTIO 9P CHAN lock", NULL, MTX_DEF)
59 #define VT9P_LOCK_DESTROY(_sc) mtx_destroy(VT9P_MTX(_sc))
60 #define MAX_SUPPORTED_SGS 20
61 static MALLOC_DEFINE(M_P9FS_MNTTAG, "p9fs_mount_tag", "P9fs Mounttag");
62 
63 struct vt9p_softc {
64 	device_t vt9p_dev;
65 	struct mtx vt9p_mtx;
66 	struct sglist *vt9p_sglist;
67 	struct cv submit_cv;
68 	bool busy;
69 	struct virtqueue *vt9p_vq;
70 	int max_nsegs;
71 	uint16_t mount_tag_len;
72 	char *mount_tag;
73 	STAILQ_ENTRY(vt9p_softc) chan_next;
74 };
75 
76 /* Global channel list, Each channel will correspond to a mount point */
77 static STAILQ_HEAD( ,vt9p_softc) global_chan_list;
78 struct mtx global_chan_list_mtx;
79 
80 static struct virtio_feature_desc virtio_9p_feature_desc[] = {
81 	{ VIRTIO_9PNET_F_MOUNT_TAG,	"9PMountTag" },
82 	{ 0, NULL }
83 };
84 
85 static void
86 global_chan_list_init(void)
87 {
88 
89 	mtx_init(&global_chan_list_mtx, "9pglobal",
90 	    NULL, MTX_DEF);
91 	STAILQ_INIT(&global_chan_list);
92 }
93 SYSINIT(global_chan_list_init, SI_SUB_KLD, SI_ORDER_FIRST,
94     global_chan_list_init, NULL);
95 
96 /* We don't currently allow canceling of virtio requests */
97 static int
98 vt9p_cancel(void *handle, struct p9_req_t *req)
99 {
100 
101 	return (1);
102 }
103 
104 SYSCTL_NODE(_vfs, OID_AUTO, 9p, CTLFLAG_RW, 0, "9P File System Protocol");
105 
106 /*
107  * Maximum number of seconds vt9p_request thread sleep waiting for an
108  * ack from the host, before exiting
109  */
110 static unsigned int vt9p_ackmaxidle = 120;
111 
112 SYSCTL_UINT(_vfs_9p, OID_AUTO, ackmaxidle, CTLFLAG_RW, &vt9p_ackmaxidle, 0,
113     "Maximum time request thread waits for ack from host");
114 
115 /*
116  * Wait for completion of a p9 request.
117  *
118  * This routine will sleep and release the chan mtx during the period.
119  * chan mtx will be acquired again upon return.
120  */
121 static int
122 vt9p_req_wait(struct vt9p_softc *chan, struct p9_req_t *req)
123 {
124 	if (req->tc->tag != req->rc->tag) {
125 		if (msleep(req, VT9P_MTX(chan), 0, "chan lock",
126 		    vt9p_ackmaxidle * hz)) {
127 			/*
128 			 * Waited for 120s. No response from host.
129 			 * Can't wait for ever..
130 			 */
131 			P9_DEBUG(ERROR, "Timeout after waiting %u seconds"
132 			    "for an ack from host\n", vt9p_ackmaxidle);
133 			return (EIO);
134 		}
135 		KASSERT(req->tc->tag == req->rc->tag,
136 		    ("Spurious event on p9 req"));
137 	}
138 	return (0);
139 }
140 
141 /*
142  * Request handler. This is called for every request submitted to the host
143  * It basically maps the tc/rc buffers to sg lists and submits the requests
144  * into the virtqueue. Since we have implemented a synchronous version, the
145  * submission thread sleeps until the ack in the interrupt wakes it up. Once
146  * it wakes up, it returns back to the P9fs layer. The rc buffer is then
147  * processed and completed to its upper layers.
148  */
149 static int
150 vt9p_request(void *handle, struct p9_req_t *req)
151 {
152 	int error;
153 	struct vt9p_softc *chan;
154 	int readable, writable;
155 	struct sglist *sg;
156 	struct virtqueue *vq;
157 
158 	chan = handle;
159 	sg = chan->vt9p_sglist;
160 	vq = chan->vt9p_vq;
161 
162 	P9_DEBUG(TRANS, "%s: req=%p\n", __func__, req);
163 
164 	/* Grab the channel lock*/
165 	VT9P_LOCK(chan);
166 	sglist_reset(sg);
167 	/* Handle out VirtIO ring buffers */
168 	error = sglist_append(sg, req->tc->sdata, req->tc->size);
169 	if (error != 0) {
170 		P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__);
171 		VT9P_UNLOCK(chan);
172 		return (error);
173 	}
174 	readable = sg->sg_nseg;
175 
176 	error = sglist_append(sg, req->rc->sdata, req->rc->capacity);
177 	if (error != 0) {
178 		P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__);
179 		VT9P_UNLOCK(chan);
180 		return (error);
181 	}
182 	writable = sg->sg_nseg - readable;
183 
184 req_retry:
185 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
186 
187 	if (error != 0) {
188 		if (error == ENOSPC) {
189 			/*
190 			 * Condvar for the submit queue. Unlock the chan
191 			 * since wakeup needs one.
192 			 */
193 			cv_wait(&chan->submit_cv, VT9P_MTX(chan));
194 			P9_DEBUG(TRANS, "%s: retry virtio request\n", __func__);
195 			goto req_retry;
196 		} else {
197 			P9_DEBUG(ERROR, "%s: virtio enuqueue failed \n", __func__);
198 			VT9P_UNLOCK(chan);
199 			return (EIO);
200 		}
201 	}
202 
203 	/* We have to notify */
204 	virtqueue_notify(vq);
205 
206 	error = vt9p_req_wait(chan, req);
207 	if (error != 0) {
208 		VT9P_UNLOCK(chan);
209 		return (error);
210 	}
211 
212 	VT9P_UNLOCK(chan);
213 
214 	P9_DEBUG(TRANS, "%s: virtio request kicked\n", __func__);
215 
216 	return (0);
217 }
218 
219 /*
220  * Completion of the request from the virtqueue. This interrupt handler is
221  * setup at initialization and is called for every completing request. It
222  * just wakes up the sleeping submission requests.
223  */
224 static void
225 vt9p_intr_complete(void *xsc)
226 {
227 	struct vt9p_softc *chan;
228 	struct virtqueue *vq;
229 	struct p9_req_t *curreq;
230 
231 	chan = (struct vt9p_softc *)xsc;
232 	vq = chan->vt9p_vq;
233 
234 	P9_DEBUG(TRANS, "%s: completing\n", __func__);
235 
236 	VT9P_LOCK(chan);
237 	while ((curreq = virtqueue_dequeue(vq, NULL)) != NULL) {
238 		curreq->rc->tag = curreq->tc->tag;
239 		wakeup_one(curreq);
240 	}
241 	virtqueue_enable_intr(vq);
242 	cv_signal(&chan->submit_cv);
243 	VT9P_UNLOCK(chan);
244 }
245 
246 /*
247  * Allocation of the virtqueue with interrupt complete routines.
248  */
249 static int
250 vt9p_alloc_virtqueue(struct vt9p_softc *sc)
251 {
252 	struct vq_alloc_info vq_info;
253 	device_t dev;
254 
255 	dev = sc->vt9p_dev;
256 
257 	VQ_ALLOC_INFO_INIT(&vq_info, sc->max_nsegs,
258 	    vt9p_intr_complete, sc, &sc->vt9p_vq,
259 	    "%s request", device_get_nameunit(dev));
260 
261 	return (virtio_alloc_virtqueues(dev, 1, &vq_info));
262 }
263 
264 /* Probe for existence of 9P virtio channels */
265 static int
266 vt9p_probe(device_t dev)
267 {
268 
269 	/* If the virtio device type is a 9P device, then we claim and attach it */
270 	if (virtio_get_device_type(dev) != VIRTIO_ID_9P)
271 		return (ENXIO);
272 	device_set_desc(dev, "VirtIO 9P Transport");
273 
274 	return (BUS_PROBE_DEFAULT);
275 }
276 
277 static void
278 vt9p_stop(struct vt9p_softc *sc)
279 {
280 
281 	/* Device specific stops .*/
282 	virtqueue_disable_intr(sc->vt9p_vq);
283 	virtio_stop(sc->vt9p_dev);
284 }
285 
286 /* Detach the 9P virtio PCI device */
287 static int
288 vt9p_detach(device_t dev)
289 {
290 	struct vt9p_softc *sc;
291 
292 	sc = device_get_softc(dev);
293 	VT9P_LOCK(sc);
294 	vt9p_stop(sc);
295 	VT9P_UNLOCK(sc);
296 
297 	if (sc->vt9p_sglist) {
298 		sglist_free(sc->vt9p_sglist);
299 		sc->vt9p_sglist = NULL;
300 	}
301 	if (sc->mount_tag) {
302 		free(sc->mount_tag, M_P9FS_MNTTAG);
303 		sc->mount_tag = NULL;
304 	}
305 	mtx_lock(&global_chan_list_mtx);
306 	STAILQ_REMOVE(&global_chan_list, sc, vt9p_softc, chan_next);
307 	mtx_unlock(&global_chan_list_mtx);
308 
309 	VT9P_LOCK_DESTROY(sc);
310 	cv_destroy(&sc->submit_cv);
311 
312 	return (0);
313 }
314 
315 /* Attach the 9P virtio PCI device */
316 static int
317 vt9p_attach(device_t dev)
318 {
319 	struct sysctl_ctx_list *ctx;
320 	struct sysctl_oid *tree;
321 	struct vt9p_softc *chan;
322 	char *mount_tag;
323 	int error;
324 	uint16_t mount_tag_len;
325 
326 	chan = device_get_softc(dev);
327 	chan->vt9p_dev = dev;
328 
329 	/* Init the channel lock. */
330 	VT9P_LOCK_INIT(chan);
331 	/* Initialize the condition variable */
332 	cv_init(&chan->submit_cv, "Conditional variable for submit queue" );
333 	chan->max_nsegs = MAX_SUPPORTED_SGS;
334 	chan->vt9p_sglist = sglist_alloc(chan->max_nsegs, M_NOWAIT);
335 	if (chan->vt9p_sglist == NULL) {
336 		error = ENOMEM;
337 		P9_DEBUG(ERROR, "%s: Cannot allocate sglist\n", __func__);
338 		goto out;
339 	}
340 
341 	/* Negotiate the features from the host */
342 	virtio_set_feature_desc(dev, virtio_9p_feature_desc);
343 	virtio_negotiate_features(dev, VIRTIO_9PNET_F_MOUNT_TAG);
344 
345 	/*
346 	 * If mount tag feature is supported read the mount tag
347 	 * from device config
348 	 */
349 	if (virtio_with_feature(dev, VIRTIO_9PNET_F_MOUNT_TAG))
350 		mount_tag_len = virtio_read_dev_config_2(dev,
351 		    offsetof(struct virtio_9pnet_config, mount_tag_len));
352 	else {
353 		error = EINVAL;
354 		P9_DEBUG(ERROR, "%s: Mount tag feature not supported by host\n", __func__);
355 		goto out;
356 	}
357 	mount_tag = malloc(mount_tag_len + 1, M_P9FS_MNTTAG,
358 	    M_WAITOK | M_ZERO);
359 
360 	virtio_read_device_config(dev,
361 	    offsetof(struct virtio_9pnet_config, mount_tag),
362 	    mount_tag, mount_tag_len);
363 
364 	device_printf(dev, "Mount tag: %s\n", mount_tag);
365 
366 	mount_tag_len++;
367 	chan->mount_tag_len = mount_tag_len;
368 	chan->mount_tag = mount_tag;
369 
370 	ctx = device_get_sysctl_ctx(dev);
371 	tree = device_get_sysctl_tree(dev);
372 	SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "p9fs_mount_tag",
373 	    CTLFLAG_RD, chan->mount_tag, 0, "Mount tag");
374 
375 	/* We expect one virtqueue, for requests. */
376 	error = vt9p_alloc_virtqueue(chan);
377 
378 	if (error != 0) {
379 		P9_DEBUG(ERROR, "%s: Allocating the virtqueue failed \n", __func__);
380 		goto out;
381 	}
382 
383 	error = virtio_setup_intr(dev, INTR_TYPE_MISC|INTR_MPSAFE);
384 
385 	if (error != 0) {
386 		P9_DEBUG(ERROR, "%s: Cannot setup virtqueue interrupt\n", __func__);
387 		goto out;
388 	}
389 	error = virtqueue_enable_intr(chan->vt9p_vq);
390 
391 	if (error != 0) {
392 		P9_DEBUG(ERROR, "%s: Cannot enable virtqueue interrupt\n", __func__);
393 		goto out;
394 	}
395 
396 	mtx_lock(&global_chan_list_mtx);
397 	/* Insert the channel in global channel list */
398 	STAILQ_INSERT_HEAD(&global_chan_list, chan, chan_next);
399 	mtx_unlock(&global_chan_list_mtx);
400 
401 	return (0);
402 out:
403 	/* Something went wrong, detach the device */
404 	vt9p_detach(dev);
405 	return (error);
406 }
407 
408 /*
409  * Allocate a new virtio channel. This sets up a transport channel
410  * for 9P communication
411  */
412 static int
413 vt9p_create(const char *mount_tag, void **handlep)
414 {
415 	struct vt9p_softc *sc, *chan;
416 
417 	chan = NULL;
418 
419 	/*
420 	 * Find out the corresponding channel for a client from global list
421 	 * of channels based on mount tag and attach it to client
422 	 */
423 	mtx_lock(&global_chan_list_mtx);
424 	STAILQ_FOREACH(sc, &global_chan_list, chan_next) {
425 		if (!strcmp(sc->mount_tag, mount_tag)) {
426 			chan = sc;
427 			break;
428 		}
429 	}
430 	mtx_unlock(&global_chan_list_mtx);
431 
432 	/*
433 	 * If chan is already attached to a client then it cannot be used for
434 	 * another client.
435 	 */
436 	if (chan && chan->busy) {
437 		//p9_debug(TRANS, "Channel busy: used by clnt=%p\n", chan->client);
438 		return (EBUSY);
439 	}
440 
441 	/* If we dont have one, for now bail out.*/
442 	if (chan) {
443 		*handlep = (void *)chan;
444 		chan->busy = TRUE;
445 	} else {
446 		P9_DEBUG(TRANS, "%s: No Global channel with mount_tag=%s\n",
447 		    __func__, mount_tag);
448 		return (EINVAL);
449 	}
450 
451 	return (0);
452 }
453 
454 static void
455 vt9p_close(void *handle)
456 {
457 	struct vt9p_softc *chan = handle;
458 	chan->busy = FALSE;
459 }
460 
461 static struct p9_trans_module vt9p_trans = {
462 	.name = "virtio",
463 	.create = vt9p_create,
464 	.close = vt9p_close,
465 	.request = vt9p_request,
466 	.cancel = vt9p_cancel,
467 };
468 
469 static device_method_t vt9p_mthds[] = {
470 	/* Device methods. */
471 	DEVMETHOD(device_probe,	 vt9p_probe),
472 	DEVMETHOD(device_attach, vt9p_attach),
473 	DEVMETHOD(device_detach, vt9p_detach),
474 	DEVMETHOD_END
475 };
476 
477 static driver_t vt9p_drv = {
478 	"virtio_p9fs",
479 	vt9p_mthds,
480 	sizeof(struct vt9p_softc)
481 };
482 
483 static int
484 vt9p_modevent(module_t mod, int type, void *unused)
485 {
486 	int error;
487 
488 	error = 0;
489 
490 	switch (type) {
491 	case MOD_LOAD:
492 		p9_init_zones();
493 		p9_register_trans(&vt9p_trans);
494 		break;
495 	case MOD_UNLOAD:
496 		p9_destroy_zones();
497 		break;
498 	case MOD_SHUTDOWN:
499 		break;
500 	default:
501 		error = EOPNOTSUPP;
502 		break;
503 	}
504 	return (error);
505 }
506 
507 DRIVER_MODULE(virtio_p9fs, virtio_pci, vt9p_drv, vt9p_modevent, 0);
508 MODULE_VERSION(virtio_p9fs, 1);
509 MODULE_DEPEND(virtio_p9fs, virtio, 1, 1, 1);
510 MODULE_DEPEND(virtio_p9fs, p9fs, 1, 1, 1);
511