xref: /freebsd/sys/dev/virtio/p9fs/virtio_p9fs.c (revision 357378bbdedf24ce2b90e9bd831af4a9db3ec70a)
1 /*-
2  * Copyright (c) 2017 Juniper Networks, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *	notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *	notice, this list of conditions and the following disclaimer in the
12  *	documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  *
25  */
26 /*
27  * The Virtio 9P transport driver. This file contains all functions related to
28  * the virtqueue infrastructure which include creating the virtqueue, host
29  * interactions, interrupts etc.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/module.h>
35 #include <sys/sglist.h>
36 #include <sys/queue.h>
37 #include <sys/bus.h>
38 #include <sys/kthread.h>
39 #include <sys/condvar.h>
40 #include <sys/sysctl.h>
41 
42 #include <machine/bus.h>
43 
44 #include <fs/p9fs/p9_client.h>
45 #include <fs/p9fs/p9_debug.h>
46 #include <fs/p9fs/p9_protocol.h>
47 #include <fs/p9fs/p9_transport.h>
48 
49 #include <dev/virtio/virtio.h>
50 #include <dev/virtio/virtqueue.h>
51 #include <dev/virtio/virtio_ring.h>
52 #include <dev/virtio/p9fs/virtio_p9fs.h>
53 
54 #define VT9P_MTX(_sc) (&(_sc)->vt9p_mtx)
55 #define VT9P_LOCK(_sc) mtx_lock(VT9P_MTX(_sc))
56 #define VT9P_UNLOCK(_sc) mtx_unlock(VT9P_MTX(_sc))
57 #define VT9P_LOCK_INIT(_sc) mtx_init(VT9P_MTX(_sc), \
58     "VIRTIO 9P CHAN lock", NULL, MTX_DEF)
59 #define VT9P_LOCK_DESTROY(_sc) mtx_destroy(VT9P_MTX(_sc))
60 #define MAX_SUPPORTED_SGS 20
61 static MALLOC_DEFINE(M_P9FS_MNTTAG, "p9fs_mount_tag", "P9fs Mounttag");
62 
63 struct vt9p_softc {
64 	device_t vt9p_dev;
65 	struct mtx vt9p_mtx;
66 	struct sglist *vt9p_sglist;
67 	struct cv submit_cv;
68 	bool busy;
69 	struct virtqueue *vt9p_vq;
70 	int max_nsegs;
71 	uint16_t mount_tag_len;
72 	char *mount_tag;
73 	STAILQ_ENTRY(vt9p_softc) chan_next;
74 };
75 
76 /* Global channel list, Each channel will correspond to a mount point */
77 static STAILQ_HEAD( ,vt9p_softc) global_chan_list;
78 struct mtx global_chan_list_mtx;
79 
80 static struct virtio_feature_desc virtio_9p_feature_desc[] = {
81 	{ VIRTIO_9PNET_F_MOUNT_TAG,	"9PMountTag" },
82 	{ 0, NULL }
83 };
84 
85 static void
86 global_chan_list_init(void)
87 {
88 
89 	mtx_init(&global_chan_list_mtx, "9pglobal",
90 	    NULL, MTX_DEF);
91 	STAILQ_INIT(&global_chan_list);
92 }
93 SYSINIT(global_chan_list_init, SI_SUB_KLD, SI_ORDER_FIRST,
94     global_chan_list_init, NULL);
95 
96 /* We don't currently allow canceling of virtio requests */
97 static int
98 vt9p_cancel(void *handle, struct p9_req_t *req)
99 {
100 
101 	return (1);
102 }
103 
104 SYSCTL_NODE(_vfs, OID_AUTO, 9p, CTLFLAG_RW, 0, "9P File System Protocol");
105 
106 /*
107  * Maximum number of seconds vt9p_request thread sleep waiting for an
108  * ack from the host, before exiting
109  */
110 static unsigned int vt9p_ackmaxidle = 120;
111 
112 SYSCTL_UINT(_vfs_9p, OID_AUTO, ackmaxidle, CTLFLAG_RW, &vt9p_ackmaxidle, 0,
113     "Maximum time request thread waits for ack from host");
114 
115 /*
116  * Wait for completion of a p9 request.
117  *
118  * This routine will sleep and release the chan mtx during the period.
119  * chan mtx will be acquired again upon return.
120  */
121 static int
122 vt9p_req_wait(struct vt9p_softc *chan, struct p9_req_t *req)
123 {
124 	if (req->tc->tag != req->rc->tag) {
125 		if (msleep(req, VT9P_MTX(chan), 0, "chan lock",
126 		    vt9p_ackmaxidle * hz)) {
127 			/*
128 			 * Waited for 120s. No response from host.
129 			 * Can't wait for ever..
130 			 */
131 			P9_DEBUG(ERROR, "Timeout after waiting %u seconds"
132 			    "for an ack from host\n", vt9p_ackmaxidle);
133 			return (EIO);
134 		}
135 		KASSERT(req->tc->tag == req->rc->tag,
136 		    ("Spurious event on p9 req"));
137 	}
138 	return (0);
139 }
140 
141 /*
142  * Request handler. This is called for every request submitted to the host
143  * It basically maps the tc/rc buffers to sg lists and submits the requests
144  * into the virtqueue. Since we have implemented a synchronous version, the
145  * submission thread sleeps until the ack in the interrupt wakes it up. Once
146  * it wakes up, it returns back to the P9fs layer. The rc buffer is then
147  * processed and completed to its upper layers.
148  */
149 static int
150 vt9p_request(void *handle, struct p9_req_t *req)
151 {
152 	int error;
153 	struct vt9p_softc *chan;
154 	int readable, writable;
155 	struct sglist *sg;
156 	struct virtqueue *vq;
157 
158 	chan = handle;
159 	sg = chan->vt9p_sglist;
160 	vq = chan->vt9p_vq;
161 
162 	P9_DEBUG(TRANS, "%s: req=%p\n", __func__, req);
163 
164 	/* Grab the channel lock*/
165 	VT9P_LOCK(chan);
166 	sglist_reset(sg);
167 	/* Handle out VirtIO ring buffers */
168 	error = sglist_append(sg, req->tc->sdata, req->tc->size);
169 	if (error != 0) {
170 		P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__);
171 		VT9P_UNLOCK(chan);
172 		return (error);
173 	}
174 	readable = sg->sg_nseg;
175 
176 	error = sglist_append(sg, req->rc->sdata, req->rc->capacity);
177 	if (error != 0) {
178 		P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__);
179 		VT9P_UNLOCK(chan);
180 		return (error);
181 	}
182 	writable = sg->sg_nseg - readable;
183 
184 req_retry:
185 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
186 
187 	if (error != 0) {
188 		if (error == ENOSPC) {
189 			/*
190 			 * Condvar for the submit queue. Unlock the chan
191 			 * since wakeup needs one.
192 			 */
193 			cv_wait(&chan->submit_cv, VT9P_MTX(chan));
194 			P9_DEBUG(TRANS, "%s: retry virtio request\n", __func__);
195 			goto req_retry;
196 		} else {
197 			P9_DEBUG(ERROR, "%s: virtio enuqueue failed \n", __func__);
198 			VT9P_UNLOCK(chan);
199 			return (EIO);
200 		}
201 	}
202 
203 	/* We have to notify */
204 	virtqueue_notify(vq);
205 
206 	error = vt9p_req_wait(chan, req);
207 	if (error != 0) {
208 		VT9P_UNLOCK(chan);
209 		return (error);
210 	}
211 
212 	VT9P_UNLOCK(chan);
213 
214 	P9_DEBUG(TRANS, "%s: virtio request kicked\n", __func__);
215 
216 	return (0);
217 }
218 
219 /*
220  * Completion of the request from the virtqueue. This interrupt handler is
221  * setup at initialization and is called for every completing request. It
222  * just wakes up the sleeping submission requests.
223  */
224 static void
225 vt9p_intr_complete(void *xsc)
226 {
227 	struct vt9p_softc *chan;
228 	struct virtqueue *vq;
229 	struct p9_req_t *curreq;
230 
231 	chan = (struct vt9p_softc *)xsc;
232 	vq = chan->vt9p_vq;
233 
234 	P9_DEBUG(TRANS, "%s: completing\n", __func__);
235 
236 	VT9P_LOCK(chan);
237 	while ((curreq = virtqueue_dequeue(vq, NULL)) != NULL) {
238 		curreq->rc->tag = curreq->tc->tag;
239 		wakeup_one(curreq);
240 	}
241 	virtqueue_enable_intr(vq);
242 	cv_signal(&chan->submit_cv);
243 	VT9P_UNLOCK(chan);
244 }
245 
246 /*
247  * Allocation of the virtqueue with interrupt complete routines.
248  */
249 static int
250 vt9p_alloc_virtqueue(struct vt9p_softc *sc)
251 {
252 	struct vq_alloc_info vq_info;
253 	device_t dev;
254 
255 	dev = sc->vt9p_dev;
256 
257 	VQ_ALLOC_INFO_INIT(&vq_info, sc->max_nsegs,
258 	    vt9p_intr_complete, sc, &sc->vt9p_vq,
259 	    "%s request", device_get_nameunit(dev));
260 
261 	return (virtio_alloc_virtqueues(dev, 1, &vq_info));
262 }
263 
264 /* Probe for existence of 9P virtio channels */
265 static int
266 vt9p_probe(device_t dev)
267 {
268 
269 	/* If the virtio device type is a 9P device, then we claim and attach it */
270 	if (virtio_get_device_type(dev) != VIRTIO_ID_9P)
271 		return (ENXIO);
272 	device_set_desc(dev, "VirtIO 9P Transport");
273 
274 	return (BUS_PROBE_DEFAULT);
275 }
276 
277 static void
278 vt9p_stop(struct vt9p_softc *sc)
279 {
280 
281 	/* Device specific stops .*/
282 	virtqueue_disable_intr(sc->vt9p_vq);
283 	virtio_stop(sc->vt9p_dev);
284 }
285 
286 /* Detach the 9P virtio PCI device */
287 static int
288 vt9p_detach(device_t dev)
289 {
290 	struct vt9p_softc *sc;
291 
292 	sc = device_get_softc(dev);
293 	VT9P_LOCK(sc);
294 	vt9p_stop(sc);
295 	VT9P_UNLOCK(sc);
296 
297 	if (sc->vt9p_sglist) {
298 		sglist_free(sc->vt9p_sglist);
299 		sc->vt9p_sglist = NULL;
300 	}
301 	if (sc->mount_tag) {
302 		free(sc->mount_tag, M_P9FS_MNTTAG);
303 		sc->mount_tag = NULL;
304 	}
305 	mtx_lock(&global_chan_list_mtx);
306 	STAILQ_REMOVE(&global_chan_list, sc, vt9p_softc, chan_next);
307 	mtx_unlock(&global_chan_list_mtx);
308 
309 	VT9P_LOCK_DESTROY(sc);
310 	cv_destroy(&sc->submit_cv);
311 
312 	return (0);
313 }
314 
315 /* Attach the 9P virtio PCI device */
316 static int
317 vt9p_attach(device_t dev)
318 {
319 	struct sysctl_ctx_list *ctx;
320 	struct sysctl_oid *tree;
321 	struct vt9p_softc *chan;
322 	char *mount_tag;
323 	int error;
324 	uint16_t mount_tag_len;
325 
326 	chan = device_get_softc(dev);
327 	chan->vt9p_dev = dev;
328 
329 	/* Init the channel lock. */
330 	VT9P_LOCK_INIT(chan);
331 	/* Initialize the condition variable */
332 	cv_init(&chan->submit_cv, "Conditional variable for submit queue" );
333 	chan->max_nsegs = MAX_SUPPORTED_SGS;
334 	chan->vt9p_sglist = sglist_alloc(chan->max_nsegs, M_WAITOK);
335 
336 	/* Negotiate the features from the host */
337 	virtio_set_feature_desc(dev, virtio_9p_feature_desc);
338 	virtio_negotiate_features(dev, VIRTIO_9PNET_F_MOUNT_TAG);
339 
340 	/*
341 	 * If mount tag feature is supported read the mount tag
342 	 * from device config
343 	 */
344 	if (virtio_with_feature(dev, VIRTIO_9PNET_F_MOUNT_TAG))
345 		mount_tag_len = virtio_read_dev_config_2(dev,
346 		    offsetof(struct virtio_9pnet_config, mount_tag_len));
347 	else {
348 		error = EINVAL;
349 		P9_DEBUG(ERROR, "%s: Mount tag feature not supported by host\n", __func__);
350 		goto out;
351 	}
352 	mount_tag = malloc(mount_tag_len + 1, M_P9FS_MNTTAG,
353 	    M_WAITOK | M_ZERO);
354 
355 	virtio_read_device_config_array(dev,
356 	    offsetof(struct virtio_9pnet_config, mount_tag),
357 	    mount_tag, 1, mount_tag_len);
358 
359 	device_printf(dev, "Mount tag: %s\n", mount_tag);
360 
361 	mount_tag_len++;
362 	chan->mount_tag_len = mount_tag_len;
363 	chan->mount_tag = mount_tag;
364 
365 	ctx = device_get_sysctl_ctx(dev);
366 	tree = device_get_sysctl_tree(dev);
367 	SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "p9fs_mount_tag",
368 	    CTLFLAG_RD, chan->mount_tag, 0, "Mount tag");
369 
370 	/* We expect one virtqueue, for requests. */
371 	error = vt9p_alloc_virtqueue(chan);
372 
373 	if (error != 0) {
374 		P9_DEBUG(ERROR, "%s: Allocating the virtqueue failed \n", __func__);
375 		goto out;
376 	}
377 
378 	error = virtio_setup_intr(dev, INTR_TYPE_MISC|INTR_MPSAFE);
379 
380 	if (error != 0) {
381 		P9_DEBUG(ERROR, "%s: Cannot setup virtqueue interrupt\n", __func__);
382 		goto out;
383 	}
384 	error = virtqueue_enable_intr(chan->vt9p_vq);
385 
386 	if (error != 0) {
387 		P9_DEBUG(ERROR, "%s: Cannot enable virtqueue interrupt\n", __func__);
388 		goto out;
389 	}
390 
391 	mtx_lock(&global_chan_list_mtx);
392 	/* Insert the channel in global channel list */
393 	STAILQ_INSERT_HEAD(&global_chan_list, chan, chan_next);
394 	mtx_unlock(&global_chan_list_mtx);
395 
396 	return (0);
397 out:
398 	/* Something went wrong, detach the device */
399 	vt9p_detach(dev);
400 	return (error);
401 }
402 
403 /*
404  * Allocate a new virtio channel. This sets up a transport channel
405  * for 9P communication
406  */
407 static int
408 vt9p_create(const char *mount_tag, void **handlep)
409 {
410 	struct vt9p_softc *sc, *chan;
411 
412 	chan = NULL;
413 
414 	/*
415 	 * Find out the corresponding channel for a client from global list
416 	 * of channels based on mount tag and attach it to client
417 	 */
418 	mtx_lock(&global_chan_list_mtx);
419 	STAILQ_FOREACH(sc, &global_chan_list, chan_next) {
420 		if (!strcmp(sc->mount_tag, mount_tag)) {
421 			chan = sc;
422 			break;
423 		}
424 	}
425 	mtx_unlock(&global_chan_list_mtx);
426 
427 	/*
428 	 * If chan is already attached to a client then it cannot be used for
429 	 * another client.
430 	 */
431 	if (chan && chan->busy) {
432 		//p9_debug(TRANS, "Channel busy: used by clnt=%p\n", chan->client);
433 		return (EBUSY);
434 	}
435 
436 	/* If we dont have one, for now bail out.*/
437 	if (chan) {
438 		*handlep = (void *)chan;
439 		chan->busy = TRUE;
440 	} else {
441 		P9_DEBUG(TRANS, "%s: No Global channel with mount_tag=%s\n",
442 		    __func__, mount_tag);
443 		return (EINVAL);
444 	}
445 
446 	return (0);
447 }
448 
449 static void
450 vt9p_close(void *handle)
451 {
452 	struct vt9p_softc *chan = handle;
453 	chan->busy = FALSE;
454 }
455 
456 static struct p9_trans_module vt9p_trans = {
457 	.name = "virtio",
458 	.create = vt9p_create,
459 	.close = vt9p_close,
460 	.request = vt9p_request,
461 	.cancel = vt9p_cancel,
462 };
463 
464 static device_method_t vt9p_mthds[] = {
465 	/* Device methods. */
466 	DEVMETHOD(device_probe,	 vt9p_probe),
467 	DEVMETHOD(device_attach, vt9p_attach),
468 	DEVMETHOD(device_detach, vt9p_detach),
469 	DEVMETHOD_END
470 };
471 
472 static driver_t vt9p_drv = {
473 	"virtio_p9fs",
474 	vt9p_mthds,
475 	sizeof(struct vt9p_softc)
476 };
477 
478 static int
479 vt9p_modevent(module_t mod, int type, void *unused)
480 {
481 	int error;
482 
483 	error = 0;
484 
485 	switch (type) {
486 	case MOD_LOAD:
487 		p9_init_zones();
488 		p9_register_trans(&vt9p_trans);
489 		break;
490 	case MOD_UNLOAD:
491 		p9_destroy_zones();
492 		break;
493 	case MOD_SHUTDOWN:
494 		break;
495 	default:
496 		error = EOPNOTSUPP;
497 		break;
498 	}
499 	return (error);
500 }
501 
502 DRIVER_MODULE(virtio_p9fs, virtio_pci, vt9p_drv, vt9p_modevent, 0);
503 MODULE_VERSION(virtio_p9fs, 1);
504 MODULE_DEPEND(virtio_p9fs, virtio, 1, 1, 1);
505 MODULE_DEPEND(virtio_p9fs, p9fs, 1, 1, 1);
506