xref: /freebsd/sys/dev/virtio/p9fs/virtio_p9fs.c (revision 7937bfbc0ca53fe7cdd0d54414f9296e273a518e)
1 /*-
2  * Copyright (c) 2017 Juniper Networks, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *	notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *	notice, this list of conditions and the following disclaimer in the
12  *	documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  *
25  */
26 /*
27  * The Virtio 9P transport driver. This file contains all functions related to
28  * the virtqueue infrastructure which include creating the virtqueue, host
29  * interactions, interrupts etc.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/module.h>
35 #include <sys/sglist.h>
36 #include <sys/queue.h>
37 #include <sys/bus.h>
38 #include <sys/kthread.h>
39 #include <sys/condvar.h>
40 #include <sys/sysctl.h>
41 
42 #include <machine/bus.h>
43 
44 #include <fs/p9fs/p9_client.h>
45 #include <fs/p9fs/p9_debug.h>
46 #include <fs/p9fs/p9_protocol.h>
47 #include <fs/p9fs/p9_transport.h>
48 
49 #include <dev/virtio/virtio.h>
50 #include <dev/virtio/virtqueue.h>
51 #include <dev/virtio/virtio_ring.h>
52 #include <dev/virtio/p9fs/virtio_p9fs.h>
53 
54 #define VT9P_MTX(_sc) (&(_sc)->vt9p_mtx)
55 #define VT9P_LOCK(_sc) mtx_lock(VT9P_MTX(_sc))
56 #define VT9P_UNLOCK(_sc) mtx_unlock(VT9P_MTX(_sc))
57 #define VT9P_LOCK_INIT(_sc) mtx_init(VT9P_MTX(_sc), \
58     "VIRTIO 9P CHAN lock", NULL, MTX_DEF)
59 #define VT9P_LOCK_DESTROY(_sc) mtx_destroy(VT9P_MTX(_sc))
60 #define MAX_SUPPORTED_SGS 20
61 static MALLOC_DEFINE(M_P9FS_MNTTAG, "p9fs_mount_tag", "P9fs Mounttag");
62 
63 struct vt9p_softc {
64 	device_t vt9p_dev;
65 	struct mtx vt9p_mtx;
66 	struct sglist *vt9p_sglist;
67 	struct cv submit_cv;
68 	bool busy;
69 	struct virtqueue *vt9p_vq;
70 	int max_nsegs;
71 	uint16_t mount_tag_len;
72 	char *mount_tag;
73 	STAILQ_ENTRY(vt9p_softc) chan_next;
74 };
75 
76 /* Global channel list, Each channel will correspond to a mount point */
77 static STAILQ_HEAD( ,vt9p_softc) global_chan_list =
78     STAILQ_HEAD_INITIALIZER(global_chan_list);
79 struct mtx global_chan_list_mtx;
80 MTX_SYSINIT(global_chan_list_mtx, &global_chan_list_mtx, "9pglobal", MTX_DEF);
81 
82 static struct virtio_feature_desc virtio_9p_feature_desc[] = {
83 	{ VIRTIO_9PNET_F_MOUNT_TAG,	"9PMountTag" },
84 	{ 0, NULL }
85 };
86 
87 /* We don't currently allow canceling of virtio requests */
88 static int
89 vt9p_cancel(void *handle, struct p9_req_t *req)
90 {
91 	return (1);
92 }
93 
94 SYSCTL_NODE(_vfs, OID_AUTO, 9p, CTLFLAG_RW, 0, "9P File System Protocol");
95 
96 /*
97  * Maximum number of seconds vt9p_request thread sleep waiting for an
98  * ack from the host, before exiting
99  */
100 static unsigned int vt9p_ackmaxidle = 120;
101 SYSCTL_UINT(_vfs_9p, OID_AUTO, ackmaxidle, CTLFLAG_RW, &vt9p_ackmaxidle, 0,
102     "Maximum time request thread waits for ack from host");
103 
104 /*
105  * Wait for completion of a p9 request.
106  *
107  * This routine will sleep and release the chan mtx during the period.
108  * chan mtx will be acquired again upon return.
109  */
110 static int
111 vt9p_req_wait(struct vt9p_softc *chan, struct p9_req_t *req)
112 {
113 	KASSERT(req->tc->tag != req->rc->tag,
114 	    ("%s: request %p already completed", __func__, req));
115 
116 	if (msleep(req, VT9P_MTX(chan), 0, "chan lock", vt9p_ackmaxidle * hz)) {
117 		/*
118 		 * Waited for 120s. No response from host.
119 		 * Can't wait for ever..
120 		 */
121 		P9_DEBUG(ERROR, "Timeout after waiting %u seconds"
122 		    "for an ack from host\n", vt9p_ackmaxidle);
123 		return (EIO);
124 	}
125 	KASSERT(req->tc->tag == req->rc->tag,
126 	    ("%s spurious event on request %p", __func__, req));
127 	return (0);
128 }
129 
130 /*
131  * Request handler. This is called for every request submitted to the host
132  * It basically maps the tc/rc buffers to sg lists and submits the requests
133  * into the virtqueue. Since we have implemented a synchronous version, the
134  * submission thread sleeps until the ack in the interrupt wakes it up. Once
135  * it wakes up, it returns back to the P9fs layer. The rc buffer is then
136  * processed and completed to its upper layers.
137  */
138 static int
139 vt9p_request(void *handle, struct p9_req_t *req)
140 {
141 	int error;
142 	struct vt9p_softc *chan;
143 	int readable, writable;
144 	struct sglist *sg;
145 	struct virtqueue *vq;
146 
147 	chan = handle;
148 	sg = chan->vt9p_sglist;
149 	vq = chan->vt9p_vq;
150 
151 	P9_DEBUG(TRANS, "%s: req=%p\n", __func__, req);
152 
153 	/* Grab the channel lock*/
154 	VT9P_LOCK(chan);
155 req_retry:
156 	sglist_reset(sg);
157 	/* Handle out VirtIO ring buffers */
158 	error = sglist_append(sg, req->tc->sdata, req->tc->size);
159 	if (error != 0) {
160 		P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__);
161 		VT9P_UNLOCK(chan);
162 		return (error);
163 	}
164 	readable = sg->sg_nseg;
165 
166 	error = sglist_append(sg, req->rc->sdata, req->rc->capacity);
167 	if (error != 0) {
168 		P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__);
169 		VT9P_UNLOCK(chan);
170 		return (error);
171 	}
172 	writable = sg->sg_nseg - readable;
173 
174 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
175 	if (error != 0) {
176 		if (error == ENOSPC) {
177 			/*
178 			 * Condvar for the submit queue. Unlock the chan
179 			 * since wakeup needs one.
180 			 */
181 			cv_wait(&chan->submit_cv, VT9P_MTX(chan));
182 			P9_DEBUG(TRANS, "%s: retry virtio request\n", __func__);
183 			goto req_retry;
184 		} else {
185 			P9_DEBUG(ERROR, "%s: virtio enuqueue failed \n", __func__);
186 			VT9P_UNLOCK(chan);
187 			return (EIO);
188 		}
189 	}
190 
191 	/* We have to notify */
192 	virtqueue_notify(vq);
193 
194 	error = vt9p_req_wait(chan, req);
195 	if (error != 0) {
196 		VT9P_UNLOCK(chan);
197 		return (error);
198 	}
199 
200 	VT9P_UNLOCK(chan);
201 
202 	P9_DEBUG(TRANS, "%s: virtio request kicked\n", __func__);
203 
204 	return (0);
205 }
206 
207 /*
208  * Completion of the request from the virtqueue. This interrupt handler is
209  * setup at initialization and is called for every completing request. It
210  * just wakes up the sleeping submission requests.
211  */
212 static void
213 vt9p_intr_complete(void *xsc)
214 {
215 	struct vt9p_softc *chan;
216 	struct virtqueue *vq;
217 	struct p9_req_t *curreq;
218 
219 	chan = (struct vt9p_softc *)xsc;
220 	vq = chan->vt9p_vq;
221 
222 	P9_DEBUG(TRANS, "%s: completing\n", __func__);
223 
224 	VT9P_LOCK(chan);
225 again:
226 	while ((curreq = virtqueue_dequeue(vq, NULL)) != NULL) {
227 		curreq->rc->tag = curreq->tc->tag;
228 		wakeup_one(curreq);
229 	}
230 	if (virtqueue_enable_intr(vq) != 0) {
231 		virtqueue_disable_intr(vq);
232 		goto again;
233 	}
234 	cv_signal(&chan->submit_cv);
235 	VT9P_UNLOCK(chan);
236 }
237 
238 /*
239  * Allocation of the virtqueue with interrupt complete routines.
240  */
241 static int
242 vt9p_alloc_virtqueue(struct vt9p_softc *sc)
243 {
244 	struct vq_alloc_info vq_info;
245 	device_t dev;
246 
247 	dev = sc->vt9p_dev;
248 
249 	VQ_ALLOC_INFO_INIT(&vq_info, sc->max_nsegs,
250 	    vt9p_intr_complete, sc, &sc->vt9p_vq,
251 	    "%s request", device_get_nameunit(dev));
252 
253 	return (virtio_alloc_virtqueues(dev, 1, &vq_info));
254 }
255 
256 /* Probe for existence of 9P virtio channels */
257 static int
258 vt9p_probe(device_t dev)
259 {
260 
261 	/* If the virtio device type is a 9P device, then we claim and attach it */
262 	if (virtio_get_device_type(dev) != VIRTIO_ID_9P)
263 		return (ENXIO);
264 	device_set_desc(dev, "VirtIO 9P Transport");
265 
266 	return (BUS_PROBE_DEFAULT);
267 }
268 
269 static void
270 vt9p_stop(struct vt9p_softc *sc)
271 {
272 
273 	/* Device specific stops .*/
274 	virtqueue_disable_intr(sc->vt9p_vq);
275 	virtio_stop(sc->vt9p_dev);
276 }
277 
278 /* Detach the 9P virtio PCI device */
279 static int
280 vt9p_detach(device_t dev)
281 {
282 	struct vt9p_softc *sc;
283 
284 	sc = device_get_softc(dev);
285 	VT9P_LOCK(sc);
286 	vt9p_stop(sc);
287 	VT9P_UNLOCK(sc);
288 
289 	if (sc->vt9p_sglist) {
290 		sglist_free(sc->vt9p_sglist);
291 		sc->vt9p_sglist = NULL;
292 	}
293 	if (sc->mount_tag) {
294 		free(sc->mount_tag, M_P9FS_MNTTAG);
295 		sc->mount_tag = NULL;
296 	}
297 	mtx_lock(&global_chan_list_mtx);
298 	STAILQ_REMOVE(&global_chan_list, sc, vt9p_softc, chan_next);
299 	mtx_unlock(&global_chan_list_mtx);
300 
301 	VT9P_LOCK_DESTROY(sc);
302 	cv_destroy(&sc->submit_cv);
303 
304 	return (0);
305 }
306 
307 /* Attach the 9P virtio PCI device */
308 static int
309 vt9p_attach(device_t dev)
310 {
311 	struct sysctl_ctx_list *ctx;
312 	struct sysctl_oid *tree;
313 	struct vt9p_softc *chan;
314 	char *mount_tag;
315 	int error;
316 	uint16_t mount_tag_len;
317 
318 	chan = device_get_softc(dev);
319 	chan->vt9p_dev = dev;
320 
321 	/* Init the channel lock. */
322 	VT9P_LOCK_INIT(chan);
323 	/* Initialize the condition variable */
324 	cv_init(&chan->submit_cv, "Conditional variable for submit queue" );
325 	chan->max_nsegs = MAX_SUPPORTED_SGS;
326 	chan->vt9p_sglist = sglist_alloc(chan->max_nsegs, M_WAITOK);
327 
328 	/* Negotiate the features from the host */
329 	virtio_set_feature_desc(dev, virtio_9p_feature_desc);
330 	virtio_negotiate_features(dev, VIRTIO_9PNET_F_MOUNT_TAG);
331 
332 	/*
333 	 * If mount tag feature is supported read the mount tag
334 	 * from device config
335 	 */
336 	if (virtio_with_feature(dev, VIRTIO_9PNET_F_MOUNT_TAG))
337 		mount_tag_len = virtio_read_dev_config_2(dev,
338 		    offsetof(struct virtio_9pnet_config, mount_tag_len));
339 	else {
340 		error = EINVAL;
341 		P9_DEBUG(ERROR, "%s: Mount tag feature not supported by host\n", __func__);
342 		goto out;
343 	}
344 	mount_tag = malloc(mount_tag_len + 1, M_P9FS_MNTTAG,
345 	    M_WAITOK | M_ZERO);
346 
347 	virtio_read_device_config_array(dev,
348 	    offsetof(struct virtio_9pnet_config, mount_tag),
349 	    mount_tag, 1, mount_tag_len);
350 
351 	device_printf(dev, "Mount tag: %s\n", mount_tag);
352 
353 	mount_tag_len++;
354 	chan->mount_tag_len = mount_tag_len;
355 	chan->mount_tag = mount_tag;
356 
357 	ctx = device_get_sysctl_ctx(dev);
358 	tree = device_get_sysctl_tree(dev);
359 	SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "p9fs_mount_tag",
360 	    CTLFLAG_RD, chan->mount_tag, 0, "Mount tag");
361 
362 	/* We expect one virtqueue, for requests. */
363 	error = vt9p_alloc_virtqueue(chan);
364 	if (error != 0) {
365 		P9_DEBUG(ERROR, "%s: Allocating the virtqueue failed \n", __func__);
366 		goto out;
367 	}
368 	error = virtio_setup_intr(dev, INTR_TYPE_MISC|INTR_MPSAFE);
369 	if (error != 0) {
370 		P9_DEBUG(ERROR, "%s: Cannot setup virtqueue interrupt\n", __func__);
371 		goto out;
372 	}
373 	error = virtqueue_enable_intr(chan->vt9p_vq);
374 	if (error != 0) {
375 		P9_DEBUG(ERROR, "%s: Cannot enable virtqueue interrupt\n", __func__);
376 		goto out;
377 	}
378 
379 	mtx_lock(&global_chan_list_mtx);
380 	/* Insert the channel in global channel list */
381 	STAILQ_INSERT_HEAD(&global_chan_list, chan, chan_next);
382 	mtx_unlock(&global_chan_list_mtx);
383 
384 	return (0);
385 out:
386 	/* Something went wrong, detach the device */
387 	vt9p_detach(dev);
388 	return (error);
389 }
390 
391 /*
392  * Allocate a new virtio channel. This sets up a transport channel
393  * for 9P communication
394  */
395 static int
396 vt9p_create(const char *mount_tag, void **handlep)
397 {
398 	struct vt9p_softc *sc, *chan;
399 
400 	chan = NULL;
401 
402 	/*
403 	 * Find out the corresponding channel for a client from global list
404 	 * of channels based on mount tag and attach it to client
405 	 */
406 	mtx_lock(&global_chan_list_mtx);
407 	STAILQ_FOREACH(sc, &global_chan_list, chan_next) {
408 		if (!strcmp(sc->mount_tag, mount_tag)) {
409 			chan = sc;
410 			break;
411 		}
412 	}
413 	mtx_unlock(&global_chan_list_mtx);
414 
415 	/*
416 	 * If chan is already attached to a client then it cannot be used for
417 	 * another client.
418 	 */
419 	if (chan && chan->busy) {
420 		//p9_debug(TRANS, "Channel busy: used by clnt=%p\n", chan->client);
421 		return (EBUSY);
422 	}
423 
424 	/* If we dont have one, for now bail out.*/
425 	if (chan) {
426 		*handlep = (void *)chan;
427 		chan->busy = true;
428 	} else {
429 		P9_DEBUG(TRANS, "%s: No Global channel with mount_tag=%s\n",
430 		    __func__, mount_tag);
431 		return (EINVAL);
432 	}
433 
434 	return (0);
435 }
436 
437 static void
438 vt9p_close(void *handle)
439 {
440 	struct vt9p_softc *chan = handle;
441 
442 	chan->busy = false;
443 }
444 
445 static struct p9_trans_module vt9p_trans = {
446 	.name = "virtio",
447 	.create = vt9p_create,
448 	.close = vt9p_close,
449 	.request = vt9p_request,
450 	.cancel = vt9p_cancel,
451 };
452 
453 static device_method_t vt9p_mthds[] = {
454 	/* Device methods. */
455 	DEVMETHOD(device_probe,	 vt9p_probe),
456 	DEVMETHOD(device_attach, vt9p_attach),
457 	DEVMETHOD(device_detach, vt9p_detach),
458 	DEVMETHOD_END
459 };
460 
461 static driver_t vt9p_drv = {
462 	"virtio_p9fs",
463 	vt9p_mthds,
464 	sizeof(struct vt9p_softc)
465 };
466 
467 static int
468 vt9p_modevent(module_t mod, int type, void *unused)
469 {
470 	int error;
471 
472 	error = 0;
473 
474 	switch (type) {
475 	case MOD_LOAD:
476 		p9_init_zones();
477 		p9_register_trans(&vt9p_trans);
478 		break;
479 	case MOD_UNLOAD:
480 		p9_destroy_zones();
481 		break;
482 	case MOD_SHUTDOWN:
483 		break;
484 	default:
485 		error = EOPNOTSUPP;
486 		break;
487 	}
488 	return (error);
489 }
490 
491 DRIVER_MODULE(virtio_p9fs, virtio_pci, vt9p_drv, vt9p_modevent, 0);
492 MODULE_VERSION(virtio_p9fs, 1);
493 MODULE_DEPEND(virtio_p9fs, virtio, 1, 1, 1);
494 MODULE_DEPEND(virtio_p9fs, p9fs, 1, 1, 1);
495