xref: /freebsd/sys/dev/virtio/scsi/virtio_scsi.c (revision 67ca7330cf34a789afbbff9ae7e4cdc4a4917ae3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /* Driver for VirtIO SCSI devices. */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sglist.h>
41 #include <sys/sysctl.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/callout.h>
45 #include <sys/queue.h>
46 #include <sys/sbuf.h>
47 
48 #include <machine/stdarg.h>
49 
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 #include <sys/bus.h>
53 #include <sys/rman.h>
54 
55 #include <cam/cam.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_xpt_sim.h>
60 #include <cam/cam_debug.h>
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63 
64 #include <dev/virtio/virtio.h>
65 #include <dev/virtio/virtqueue.h>
66 #include <dev/virtio/scsi/virtio_scsi.h>
67 #include <dev/virtio/scsi/virtio_scsivar.h>
68 
69 #include "virtio_if.h"
70 
71 static int	vtscsi_modevent(module_t, int, void *);
72 
73 static int	vtscsi_probe(device_t);
74 static int	vtscsi_attach(device_t);
75 static int	vtscsi_detach(device_t);
76 static int	vtscsi_suspend(device_t);
77 static int	vtscsi_resume(device_t);
78 
79 static void	vtscsi_negotiate_features(struct vtscsi_softc *);
80 static void	vtscsi_read_config(struct vtscsi_softc *,
81 		    struct virtio_scsi_config *);
82 static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
83 static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
84 static void	vtscsi_write_device_config(struct vtscsi_softc *);
85 static int	vtscsi_reinit(struct vtscsi_softc *);
86 
87 static int	vtscsi_alloc_cam(struct vtscsi_softc *);
88 static int	vtscsi_register_cam(struct vtscsi_softc *);
89 static void	vtscsi_free_cam(struct vtscsi_softc *);
90 static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
91 static int	vtscsi_register_async(struct vtscsi_softc *);
92 static void	vtscsi_deregister_async(struct vtscsi_softc *);
93 static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
94 static void	vtscsi_cam_poll(struct cam_sim *);
95 
96 static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
97 		    union ccb *);
98 static void	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
99 		    union ccb *);
100 static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
101 static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
102 static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
103 static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
104 		    struct cam_sim *, union ccb *);
105 
106 static int	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
107 		    struct sglist *, struct ccb_scsiio *);
108 static int	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
109 		    struct vtscsi_request *, int *, int *);
110 static int	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
111 		    struct vtscsi_request *);
112 static int	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
113 static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
114 		    struct vtscsi_request *);
115 static int	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
116 		    struct vtscsi_request *);
117 static void	vtscsi_timedout_scsi_cmd(void *);
118 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
119 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
120 		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
121 static void	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
122 		    struct vtscsi_request *);
123 
124 static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
125 		    struct vtscsi_request *);
126 static int	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
127 		    struct vtscsi_request *, struct sglist *, int, int, int);
128 static void	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
129 		    struct vtscsi_request *);
130 static int	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
131 		    struct vtscsi_request *);
132 static int	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
133 		    struct vtscsi_request *);
134 
135 static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
136 static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
137 static void	vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
138 		    struct virtio_scsi_cmd_req *);
139 static void	vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
140 		    uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
141 
142 static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
143 static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
144 
145 static void	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
146 		    lun_id_t);
147 static void	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
148 		    lun_id_t);
149 static void	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
150 
151 static void	vtscsi_handle_event(struct vtscsi_softc *,
152 		    struct virtio_scsi_event *);
153 static int	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
154 		    struct virtio_scsi_event *);
155 static int	vtscsi_init_event_vq(struct vtscsi_softc *);
156 static void	vtscsi_reinit_event_vq(struct vtscsi_softc *);
157 static void	vtscsi_drain_event_vq(struct vtscsi_softc *);
158 
159 static void	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
160 static void	vtscsi_complete_vqs(struct vtscsi_softc *);
161 static void	vtscsi_drain_vqs(struct vtscsi_softc *);
162 static void	vtscsi_cancel_request(struct vtscsi_softc *,
163 		    struct vtscsi_request *);
164 static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
165 static void	vtscsi_stop(struct vtscsi_softc *);
166 static int	vtscsi_reset_bus(struct vtscsi_softc *);
167 
168 static void	vtscsi_init_request(struct vtscsi_softc *,
169 		    struct vtscsi_request *);
170 static int	vtscsi_alloc_requests(struct vtscsi_softc *);
171 static void	vtscsi_free_requests(struct vtscsi_softc *);
172 static void	vtscsi_enqueue_request(struct vtscsi_softc *,
173 		    struct vtscsi_request *);
174 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
175 
176 static void	vtscsi_complete_request(struct vtscsi_request *);
177 static void	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
178 
179 static void	vtscsi_control_vq_intr(void *);
180 static void	vtscsi_event_vq_intr(void *);
181 static void	vtscsi_request_vq_intr(void *);
182 static void	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
183 static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
184 
185 static void	vtscsi_get_tunables(struct vtscsi_softc *);
186 static void	vtscsi_add_sysctl(struct vtscsi_softc *);
187 
188 static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
189 		    const char *, ...);
190 
191 /* Global tunables. */
192 /*
193  * The current QEMU VirtIO SCSI implementation does not cancel in-flight
194  * IO during virtio_stop(). So in-flight requests still complete after the
195  * device reset. We would have to wait for all the in-flight IO to complete,
196  * which defeats the typical purpose of a bus reset. We could simulate the
197  * bus reset with either I_T_NEXUS_RESET of all the targets, or with
198  * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
199  * control virtqueue). But this isn't very useful if things really go off
200  * the rails, so default to disabled for now.
201  */
202 static int vtscsi_bus_reset_disable = 1;
203 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
204 
205 static struct virtio_feature_desc vtscsi_feature_desc[] = {
206 	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
207 	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
208 
209 	{ 0, NULL }
210 };
211 
212 static device_method_t vtscsi_methods[] = {
213 	/* Device methods. */
214 	DEVMETHOD(device_probe,		vtscsi_probe),
215 	DEVMETHOD(device_attach,	vtscsi_attach),
216 	DEVMETHOD(device_detach,	vtscsi_detach),
217 	DEVMETHOD(device_suspend,	vtscsi_suspend),
218 	DEVMETHOD(device_resume,	vtscsi_resume),
219 
220 	DEVMETHOD_END
221 };
222 
223 static driver_t vtscsi_driver = {
224 	"vtscsi",
225 	vtscsi_methods,
226 	sizeof(struct vtscsi_softc)
227 };
228 static devclass_t vtscsi_devclass;
229 
230 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
231     vtscsi_modevent, 0);
232 MODULE_VERSION(virtio_scsi, 1);
233 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
234 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
235 
236 VIRTIO_SIMPLE_PNPTABLE(virtio_scsi, VIRTIO_ID_SCSI, "VirtIO SCSI Adapter");
237 VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_scsi);
238 
239 static int
240 vtscsi_modevent(module_t mod, int type, void *unused)
241 {
242 	int error;
243 
244 	switch (type) {
245 	case MOD_LOAD:
246 	case MOD_QUIESCE:
247 	case MOD_UNLOAD:
248 	case MOD_SHUTDOWN:
249 		error = 0;
250 		break;
251 	default:
252 		error = EOPNOTSUPP;
253 		break;
254 	}
255 
256 	return (error);
257 }
258 
259 static int
260 vtscsi_probe(device_t dev)
261 {
262 	return (VIRTIO_SIMPLE_PROBE(dev, virtio_scsi));
263 }
264 
265 static int
266 vtscsi_attach(device_t dev)
267 {
268 	struct vtscsi_softc *sc;
269 	struct virtio_scsi_config scsicfg;
270 	int error;
271 
272 	sc = device_get_softc(dev);
273 	sc->vtscsi_dev = dev;
274 
275 	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
276 	TAILQ_INIT(&sc->vtscsi_req_free);
277 
278 	vtscsi_get_tunables(sc);
279 	vtscsi_add_sysctl(sc);
280 
281 	virtio_set_feature_desc(dev, vtscsi_feature_desc);
282 	vtscsi_negotiate_features(sc);
283 
284 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
285 		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
286 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
287 		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
288 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
289 		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
290 
291 	vtscsi_read_config(sc, &scsicfg);
292 
293 	sc->vtscsi_max_channel = scsicfg.max_channel;
294 	sc->vtscsi_max_target = scsicfg.max_target;
295 	sc->vtscsi_max_lun = scsicfg.max_lun;
296 	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
297 
298 	vtscsi_write_device_config(sc);
299 
300 	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
301 	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
302 	if (sc->vtscsi_sglist == NULL) {
303 		error = ENOMEM;
304 		device_printf(dev, "cannot allocate sglist\n");
305 		goto fail;
306 	}
307 
308 	error = vtscsi_alloc_virtqueues(sc);
309 	if (error) {
310 		device_printf(dev, "cannot allocate virtqueues\n");
311 		goto fail;
312 	}
313 
314 	error = vtscsi_init_event_vq(sc);
315 	if (error) {
316 		device_printf(dev, "cannot populate the eventvq\n");
317 		goto fail;
318 	}
319 
320 	error = vtscsi_alloc_requests(sc);
321 	if (error) {
322 		device_printf(dev, "cannot allocate requests\n");
323 		goto fail;
324 	}
325 
326 	error = vtscsi_alloc_cam(sc);
327 	if (error) {
328 		device_printf(dev, "cannot allocate CAM structures\n");
329 		goto fail;
330 	}
331 
332 	error = virtio_setup_intr(dev, INTR_TYPE_CAM);
333 	if (error) {
334 		device_printf(dev, "cannot setup virtqueue interrupts\n");
335 		goto fail;
336 	}
337 
338 	vtscsi_enable_vqs_intr(sc);
339 
340 	/*
341 	 * Register with CAM after interrupts are enabled so we will get
342 	 * notified of the probe responses.
343 	 */
344 	error = vtscsi_register_cam(sc);
345 	if (error) {
346 		device_printf(dev, "cannot register with CAM\n");
347 		goto fail;
348 	}
349 
350 fail:
351 	if (error)
352 		vtscsi_detach(dev);
353 
354 	return (error);
355 }
356 
357 static int
358 vtscsi_detach(device_t dev)
359 {
360 	struct vtscsi_softc *sc;
361 
362 	sc = device_get_softc(dev);
363 
364 	VTSCSI_LOCK(sc);
365 	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
366 	if (device_is_attached(dev))
367 		vtscsi_stop(sc);
368 	VTSCSI_UNLOCK(sc);
369 
370 	vtscsi_complete_vqs(sc);
371 	vtscsi_drain_vqs(sc);
372 
373 	vtscsi_free_cam(sc);
374 	vtscsi_free_requests(sc);
375 
376 	if (sc->vtscsi_sglist != NULL) {
377 		sglist_free(sc->vtscsi_sglist);
378 		sc->vtscsi_sglist = NULL;
379 	}
380 
381 	VTSCSI_LOCK_DESTROY(sc);
382 
383 	return (0);
384 }
385 
386 static int
387 vtscsi_suspend(device_t dev)
388 {
389 
390 	return (0);
391 }
392 
393 static int
394 vtscsi_resume(device_t dev)
395 {
396 
397 	return (0);
398 }
399 
400 static void
401 vtscsi_negotiate_features(struct vtscsi_softc *sc)
402 {
403 	device_t dev;
404 	uint64_t features;
405 
406 	dev = sc->vtscsi_dev;
407 	features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
408 	sc->vtscsi_features = features;
409 }
410 
411 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
412 	virtio_read_device_config(_dev,				\
413 	    offsetof(struct virtio_scsi_config, _field),	\
414 	    &(_cfg)->_field, sizeof((_cfg)->_field))		\
415 
416 static void
417 vtscsi_read_config(struct vtscsi_softc *sc,
418     struct virtio_scsi_config *scsicfg)
419 {
420 	device_t dev;
421 
422 	dev = sc->vtscsi_dev;
423 
424 	bzero(scsicfg, sizeof(struct virtio_scsi_config));
425 
426 	VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
427 	VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
428 	VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
429 	VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
430 	VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
431 	VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
432 	VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
433 	VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
434 	VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
435 	VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
436 }
437 
438 #undef VTSCSI_GET_CONFIG
439 
440 static int
441 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
442 {
443 	int nsegs;
444 
445 	nsegs = VTSCSI_MIN_SEGMENTS;
446 
447 	if (seg_max > 0) {
448 		nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1);
449 		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
450 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
451 	} else
452 		nsegs += 1;
453 
454 	return (nsegs);
455 }
456 
457 static int
458 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
459 {
460 	device_t dev;
461 	struct vq_alloc_info vq_info[3];
462 	int nvqs;
463 
464 	dev = sc->vtscsi_dev;
465 	nvqs = 3;
466 
467 	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
468 	    &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
469 
470 	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
471 	    &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
472 
473 	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
474 	    vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
475 	    "%s request", device_get_nameunit(dev));
476 
477 	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
478 }
479 
480 static void
481 vtscsi_write_device_config(struct vtscsi_softc *sc)
482 {
483 
484 	virtio_write_dev_config_4(sc->vtscsi_dev,
485 	    offsetof(struct virtio_scsi_config, sense_size),
486 	    VIRTIO_SCSI_SENSE_SIZE);
487 
488 	/*
489 	 * This is the size in the virtio_scsi_cmd_req structure. Note
490 	 * this value (32) is larger than the maximum CAM CDB size (16).
491 	 */
492 	virtio_write_dev_config_4(sc->vtscsi_dev,
493 	    offsetof(struct virtio_scsi_config, cdb_size),
494 	    VIRTIO_SCSI_CDB_SIZE);
495 }
496 
497 static int
498 vtscsi_reinit(struct vtscsi_softc *sc)
499 {
500 	device_t dev;
501 	int error;
502 
503 	dev = sc->vtscsi_dev;
504 
505 	error = virtio_reinit(dev, sc->vtscsi_features);
506 	if (error == 0) {
507 		vtscsi_write_device_config(sc);
508 		vtscsi_reinit_event_vq(sc);
509 		virtio_reinit_complete(dev);
510 
511 		vtscsi_enable_vqs_intr(sc);
512 	}
513 
514 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
515 
516 	return (error);
517 }
518 
519 static int
520 vtscsi_alloc_cam(struct vtscsi_softc *sc)
521 {
522 	device_t dev;
523 	struct cam_devq *devq;
524 	int openings;
525 
526 	dev = sc->vtscsi_dev;
527 	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
528 
529 	devq = cam_simq_alloc(openings);
530 	if (devq == NULL) {
531 		device_printf(dev, "cannot allocate SIM queue\n");
532 		return (ENOMEM);
533 	}
534 
535 	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
536 	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
537 	    openings, devq);
538 	if (sc->vtscsi_sim == NULL) {
539 		cam_simq_free(devq);
540 		device_printf(dev, "cannot allocate SIM\n");
541 		return (ENOMEM);
542 	}
543 
544 	return (0);
545 }
546 
547 static int
548 vtscsi_register_cam(struct vtscsi_softc *sc)
549 {
550 	device_t dev;
551 	int registered, error;
552 
553 	dev = sc->vtscsi_dev;
554 	registered = 0;
555 
556 	VTSCSI_LOCK(sc);
557 
558 	if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
559 		error = ENOMEM;
560 		device_printf(dev, "cannot register XPT bus\n");
561 		goto fail;
562 	}
563 
564 	registered = 1;
565 
566 	if (xpt_create_path(&sc->vtscsi_path, NULL,
567 	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
568 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
569 		error = ENOMEM;
570 		device_printf(dev, "cannot create bus path\n");
571 		goto fail;
572 	}
573 
574 	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
575 		error = EIO;
576 		device_printf(dev, "cannot register async callback\n");
577 		goto fail;
578 	}
579 
580 	VTSCSI_UNLOCK(sc);
581 
582 	return (0);
583 
584 fail:
585 	if (sc->vtscsi_path != NULL) {
586 		xpt_free_path(sc->vtscsi_path);
587 		sc->vtscsi_path = NULL;
588 	}
589 
590 	if (registered != 0)
591 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
592 
593 	VTSCSI_UNLOCK(sc);
594 
595 	return (error);
596 }
597 
598 static void
599 vtscsi_free_cam(struct vtscsi_softc *sc)
600 {
601 
602 	VTSCSI_LOCK(sc);
603 
604 	if (sc->vtscsi_path != NULL) {
605 		vtscsi_deregister_async(sc);
606 
607 		xpt_free_path(sc->vtscsi_path);
608 		sc->vtscsi_path = NULL;
609 
610 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
611 	}
612 
613 	if (sc->vtscsi_sim != NULL) {
614 		cam_sim_free(sc->vtscsi_sim, 1);
615 		sc->vtscsi_sim = NULL;
616 	}
617 
618 	VTSCSI_UNLOCK(sc);
619 }
620 
621 static void
622 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
623 {
624 	struct cam_sim *sim;
625 	struct vtscsi_softc *sc;
626 
627 	sim = cb_arg;
628 	sc = cam_sim_softc(sim);
629 
630 	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
631 
632 	/*
633 	 * TODO Once QEMU supports event reporting, we should
634 	 *      (un)subscribe to events here.
635 	 */
636 	switch (code) {
637 	case AC_FOUND_DEVICE:
638 		break;
639 	case AC_LOST_DEVICE:
640 		break;
641 	}
642 }
643 
644 static int
645 vtscsi_register_async(struct vtscsi_softc *sc)
646 {
647 	struct ccb_setasync csa;
648 
649 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
650 	csa.ccb_h.func_code = XPT_SASYNC_CB;
651 	csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
652 	csa.callback = vtscsi_cam_async;
653 	csa.callback_arg = sc->vtscsi_sim;
654 
655 	xpt_action((union ccb *) &csa);
656 
657 	return (csa.ccb_h.status);
658 }
659 
660 static void
661 vtscsi_deregister_async(struct vtscsi_softc *sc)
662 {
663 	struct ccb_setasync csa;
664 
665 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
666 	csa.ccb_h.func_code = XPT_SASYNC_CB;
667 	csa.event_enable = 0;
668 	csa.callback = vtscsi_cam_async;
669 	csa.callback_arg = sc->vtscsi_sim;
670 
671 	xpt_action((union ccb *) &csa);
672 }
673 
674 static void
675 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
676 {
677 	struct vtscsi_softc *sc;
678 	struct ccb_hdr *ccbh;
679 
680 	sc = cam_sim_softc(sim);
681 	ccbh = &ccb->ccb_h;
682 
683 	VTSCSI_LOCK_OWNED(sc);
684 
685 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
686 		/*
687 		 * The VTSCSI_MTX is briefly dropped between setting
688 		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
689 		 * drop any CCBs that come in during that window.
690 		 */
691 		ccbh->status = CAM_NO_HBA;
692 		xpt_done(ccb);
693 		return;
694 	}
695 
696 	switch (ccbh->func_code) {
697 	case XPT_SCSI_IO:
698 		vtscsi_cam_scsi_io(sc, sim, ccb);
699 		break;
700 
701 	case XPT_SET_TRAN_SETTINGS:
702 		ccbh->status = CAM_FUNC_NOTAVAIL;
703 		xpt_done(ccb);
704 		break;
705 
706 	case XPT_GET_TRAN_SETTINGS:
707 		vtscsi_cam_get_tran_settings(sc, ccb);
708 		break;
709 
710 	case XPT_RESET_BUS:
711 		vtscsi_cam_reset_bus(sc, ccb);
712 		break;
713 
714 	case XPT_RESET_DEV:
715 		vtscsi_cam_reset_dev(sc, ccb);
716 		break;
717 
718 	case XPT_ABORT:
719 		vtscsi_cam_abort(sc, ccb);
720 		break;
721 
722 	case XPT_CALC_GEOMETRY:
723 		cam_calc_geometry(&ccb->ccg, 1);
724 		xpt_done(ccb);
725 		break;
726 
727 	case XPT_PATH_INQ:
728 		vtscsi_cam_path_inquiry(sc, sim, ccb);
729 		break;
730 
731 	default:
732 		vtscsi_dprintf(sc, VTSCSI_ERROR,
733 		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
734 
735 		ccbh->status = CAM_REQ_INVALID;
736 		xpt_done(ccb);
737 		break;
738 	}
739 }
740 
741 static void
742 vtscsi_cam_poll(struct cam_sim *sim)
743 {
744 	struct vtscsi_softc *sc;
745 
746 	sc = cam_sim_softc(sim);
747 
748 	vtscsi_complete_vqs_locked(sc);
749 }
750 
751 static void
752 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
753     union ccb *ccb)
754 {
755 	struct ccb_hdr *ccbh;
756 	struct ccb_scsiio *csio;
757 	int error;
758 
759 	ccbh = &ccb->ccb_h;
760 	csio = &ccb->csio;
761 
762 	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
763 		error = EINVAL;
764 		ccbh->status = CAM_REQ_INVALID;
765 		goto done;
766 	}
767 
768 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
769 	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
770 		error = EINVAL;
771 		ccbh->status = CAM_REQ_INVALID;
772 		goto done;
773 	}
774 
775 	error = vtscsi_start_scsi_cmd(sc, ccb);
776 
777 done:
778 	if (error) {
779 		vtscsi_dprintf(sc, VTSCSI_ERROR,
780 		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
781 		xpt_done(ccb);
782 	}
783 }
784 
785 static void
786 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
787 {
788 	struct ccb_trans_settings *cts;
789 	struct ccb_trans_settings_scsi *scsi;
790 
791 	cts = &ccb->cts;
792 	scsi = &cts->proto_specific.scsi;
793 
794 	cts->protocol = PROTO_SCSI;
795 	cts->protocol_version = SCSI_REV_SPC3;
796 	cts->transport = XPORT_SAS;
797 	cts->transport_version = 0;
798 
799 	scsi->valid = CTS_SCSI_VALID_TQ;
800 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
801 
802 	ccb->ccb_h.status = CAM_REQ_CMP;
803 	xpt_done(ccb);
804 }
805 
806 static void
807 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
808 {
809 	int error;
810 
811 	error = vtscsi_reset_bus(sc);
812 	if (error == 0)
813 		ccb->ccb_h.status = CAM_REQ_CMP;
814 	else
815 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
816 
817 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
818 	    error, ccb, ccb->ccb_h.status);
819 
820 	xpt_done(ccb);
821 }
822 
823 static void
824 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
825 {
826 	struct ccb_hdr *ccbh;
827 	struct vtscsi_request *req;
828 	int error;
829 
830 	ccbh = &ccb->ccb_h;
831 
832 	req = vtscsi_dequeue_request(sc);
833 	if (req == NULL) {
834 		error = EAGAIN;
835 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
836 		goto fail;
837 	}
838 
839 	req->vsr_ccb = ccb;
840 
841 	error = vtscsi_execute_reset_dev_cmd(sc, req);
842 	if (error == 0)
843 		return;
844 
845 	vtscsi_enqueue_request(sc, req);
846 
847 fail:
848 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
849 	    error, req, ccb);
850 
851 	if (error == EAGAIN)
852 		ccbh->status = CAM_RESRC_UNAVAIL;
853 	else
854 		ccbh->status = CAM_REQ_CMP_ERR;
855 
856 	xpt_done(ccb);
857 }
858 
859 static void
860 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
861 {
862 	struct vtscsi_request *req;
863 	struct ccb_hdr *ccbh;
864 	int error;
865 
866 	ccbh = &ccb->ccb_h;
867 
868 	req = vtscsi_dequeue_request(sc);
869 	if (req == NULL) {
870 		error = EAGAIN;
871 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
872 		goto fail;
873 	}
874 
875 	req->vsr_ccb = ccb;
876 
877 	error = vtscsi_execute_abort_task_cmd(sc, req);
878 	if (error == 0)
879 		return;
880 
881 	vtscsi_enqueue_request(sc, req);
882 
883 fail:
884 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
885 	    error, req, ccb);
886 
887 	if (error == EAGAIN)
888 		ccbh->status = CAM_RESRC_UNAVAIL;
889 	else
890 		ccbh->status = CAM_REQ_CMP_ERR;
891 
892 	xpt_done(ccb);
893 }
894 
895 static void
896 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
897     union ccb *ccb)
898 {
899 	device_t dev;
900 	struct ccb_pathinq *cpi;
901 
902 	dev = sc->vtscsi_dev;
903 	cpi = &ccb->cpi;
904 
905 	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
906 
907 	cpi->version_num = 1;
908 	cpi->hba_inquiry = PI_TAG_ABLE;
909 	cpi->target_sprt = 0;
910 	cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
911 	if (vtscsi_bus_reset_disable != 0)
912 		cpi->hba_misc |= PIM_NOBUSRESET;
913 	cpi->hba_eng_cnt = 0;
914 
915 	cpi->max_target = sc->vtscsi_max_target;
916 	cpi->max_lun = sc->vtscsi_max_lun;
917 	cpi->initiator_id = VTSCSI_INITIATOR_ID;
918 
919 	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
920 	strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
921 	strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
922 
923 	cpi->unit_number = cam_sim_unit(sim);
924 	cpi->bus_id = cam_sim_bus(sim);
925 
926 	cpi->base_transfer_speed = 300000;
927 
928 	cpi->protocol = PROTO_SCSI;
929 	cpi->protocol_version = SCSI_REV_SPC3;
930 	cpi->transport = XPORT_SAS;
931 	cpi->transport_version = 0;
932 
933 	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
934 	    PAGE_SIZE;
935 
936 	cpi->hba_vendor = virtio_get_vendor(dev);
937 	cpi->hba_device = virtio_get_device(dev);
938 	cpi->hba_subvendor = virtio_get_subvendor(dev);
939 	cpi->hba_subdevice = virtio_get_subdevice(dev);
940 
941 	ccb->ccb_h.status = CAM_REQ_CMP;
942 	xpt_done(ccb);
943 }
944 
945 static int
946 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
947     struct ccb_scsiio *csio)
948 {
949 	struct ccb_hdr *ccbh;
950 	struct bus_dma_segment *dseg;
951 	int i, error;
952 
953 	ccbh = &csio->ccb_h;
954 	error = 0;
955 
956 	switch ((ccbh->flags & CAM_DATA_MASK)) {
957 	case CAM_DATA_VADDR:
958 		error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
959 		break;
960 	case CAM_DATA_PADDR:
961 		error = sglist_append_phys(sg,
962 		    (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
963 		break;
964 	case CAM_DATA_SG:
965 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
966 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
967 			error = sglist_append(sg,
968 			    (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
969 		}
970 		break;
971 	case CAM_DATA_SG_PADDR:
972 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
973 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
974 			error = sglist_append_phys(sg,
975 			    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
976 		}
977 		break;
978 	case CAM_DATA_BIO:
979 		error = sglist_append_bio(sg, (struct bio *) csio->data_ptr);
980 		break;
981 	default:
982 		error = EINVAL;
983 		break;
984 	}
985 
986 	return (error);
987 }
988 
989 static int
990 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
991     int *readable, int *writable)
992 {
993 	struct sglist *sg;
994 	struct ccb_hdr *ccbh;
995 	struct ccb_scsiio *csio;
996 	struct virtio_scsi_cmd_req *cmd_req;
997 	struct virtio_scsi_cmd_resp *cmd_resp;
998 	int error;
999 
1000 	sg = sc->vtscsi_sglist;
1001 	csio = &req->vsr_ccb->csio;
1002 	ccbh = &csio->ccb_h;
1003 	cmd_req = &req->vsr_cmd_req;
1004 	cmd_resp = &req->vsr_cmd_resp;
1005 
1006 	sglist_reset(sg);
1007 
1008 	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1009 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1010 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1011 		/* At least one segment must be left for the response. */
1012 		if (error || sg->sg_nseg == sg->sg_maxseg)
1013 			goto fail;
1014 	}
1015 
1016 	*readable = sg->sg_nseg;
1017 
1018 	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1019 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1020 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1021 		if (error)
1022 			goto fail;
1023 	}
1024 
1025 	*writable = sg->sg_nseg - *readable;
1026 
1027 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1028 	    "writable=%d\n", req, ccbh, *readable, *writable);
1029 
1030 	return (0);
1031 
1032 fail:
1033 	/*
1034 	 * This should never happen unless maxio was incorrectly set.
1035 	 */
1036 	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1037 
1038 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1039 	    "nseg=%d maxseg=%d\n",
1040 	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1041 
1042 	return (EFBIG);
1043 }
1044 
1045 static int
1046 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1047 {
1048 	struct sglist *sg;
1049 	struct virtqueue *vq;
1050 	struct ccb_scsiio *csio;
1051 	struct ccb_hdr *ccbh;
1052 	struct virtio_scsi_cmd_req *cmd_req;
1053 	struct virtio_scsi_cmd_resp *cmd_resp;
1054 	int readable, writable, error;
1055 
1056 	sg = sc->vtscsi_sglist;
1057 	vq = sc->vtscsi_request_vq;
1058 	csio = &req->vsr_ccb->csio;
1059 	ccbh = &csio->ccb_h;
1060 	cmd_req = &req->vsr_cmd_req;
1061 	cmd_resp = &req->vsr_cmd_resp;
1062 
1063 	vtscsi_init_scsi_cmd_req(csio, cmd_req);
1064 
1065 	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1066 	if (error)
1067 		return (error);
1068 
1069 	req->vsr_complete = vtscsi_complete_scsi_cmd;
1070 	cmd_resp->response = -1;
1071 
1072 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1073 	if (error) {
1074 		vtscsi_dprintf(sc, VTSCSI_ERROR,
1075 		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1076 
1077 		ccbh->status = CAM_REQUEUE_REQ;
1078 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1079 		return (error);
1080 	}
1081 
1082 	ccbh->status |= CAM_SIM_QUEUED;
1083 	ccbh->ccbh_vtscsi_req = req;
1084 
1085 	virtqueue_notify(vq);
1086 
1087 	if (ccbh->timeout != CAM_TIME_INFINITY) {
1088 		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1089 		callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout,
1090 		    0, vtscsi_timedout_scsi_cmd, req, 0);
1091 	}
1092 
1093 	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1094 	    req, ccbh);
1095 
1096 	return (0);
1097 }
1098 
1099 static int
1100 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1101 {
1102 	struct vtscsi_request *req;
1103 	int error;
1104 
1105 	req = vtscsi_dequeue_request(sc);
1106 	if (req == NULL) {
1107 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1108 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1109 		return (ENOBUFS);
1110 	}
1111 
1112 	req->vsr_ccb = ccb;
1113 
1114 	error = vtscsi_execute_scsi_cmd(sc, req);
1115 	if (error)
1116 		vtscsi_enqueue_request(sc, req);
1117 
1118 	return (error);
1119 }
1120 
1121 static void
1122 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1123     struct vtscsi_request *req)
1124 {
1125 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1126 	struct vtscsi_request *to_req;
1127 	uint8_t response;
1128 
1129 	tmf_resp = &req->vsr_tmf_resp;
1130 	response = tmf_resp->response;
1131 	to_req = req->vsr_timedout_req;
1132 
1133 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1134 	    req, to_req, response);
1135 
1136 	vtscsi_enqueue_request(sc, req);
1137 
1138 	/*
1139 	 * The timedout request could have completed between when the
1140 	 * abort task was sent and when the host processed it.
1141 	 */
1142 	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1143 		return;
1144 
1145 	/* The timedout request was successfully aborted. */
1146 	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1147 		return;
1148 
1149 	/* Don't bother if the device is going away. */
1150 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1151 		return;
1152 
1153 	/* The timedout request will be aborted by the reset. */
1154 	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1155 		return;
1156 
1157 	vtscsi_reset_bus(sc);
1158 }
1159 
1160 static int
1161 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1162     struct vtscsi_request *to_req)
1163 {
1164 	struct sglist *sg;
1165 	struct ccb_hdr *to_ccbh;
1166 	struct vtscsi_request *req;
1167 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1168 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1169 	int error;
1170 
1171 	sg = sc->vtscsi_sglist;
1172 	to_ccbh = &to_req->vsr_ccb->ccb_h;
1173 
1174 	req = vtscsi_dequeue_request(sc);
1175 	if (req == NULL) {
1176 		error = ENOBUFS;
1177 		goto fail;
1178 	}
1179 
1180 	tmf_req = &req->vsr_tmf_req;
1181 	tmf_resp = &req->vsr_tmf_resp;
1182 
1183 	vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1184 	    (uintptr_t) to_ccbh, tmf_req);
1185 
1186 	sglist_reset(sg);
1187 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1188 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1189 
1190 	req->vsr_timedout_req = to_req;
1191 	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1192 	tmf_resp->response = -1;
1193 
1194 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1195 	    VTSCSI_EXECUTE_ASYNC);
1196 	if (error == 0)
1197 		return (0);
1198 
1199 	vtscsi_enqueue_request(sc, req);
1200 
1201 fail:
1202 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1203 	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1204 
1205 	return (error);
1206 }
1207 
1208 static void
1209 vtscsi_timedout_scsi_cmd(void *xreq)
1210 {
1211 	struct vtscsi_softc *sc;
1212 	struct vtscsi_request *to_req;
1213 
1214 	to_req = xreq;
1215 	sc = to_req->vsr_softc;
1216 
1217 	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1218 	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1219 
1220 	/* Don't bother if the device is going away. */
1221 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1222 		return;
1223 
1224 	/*
1225 	 * Bail if the request is not in use. We likely raced when
1226 	 * stopping the callout handler or it has already been aborted.
1227 	 */
1228 	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1229 	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1230 		return;
1231 
1232 	/*
1233 	 * Complete the request queue in case the timedout request is
1234 	 * actually just pending.
1235 	 */
1236 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1237 	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1238 		return;
1239 
1240 	sc->vtscsi_stats.scsi_cmd_timeouts++;
1241 	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1242 
1243 	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1244 		return;
1245 
1246 	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1247 	vtscsi_reset_bus(sc);
1248 }
1249 
1250 static cam_status
1251 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1252 {
1253 	cam_status status;
1254 
1255 	switch (cmd_resp->response) {
1256 	case VIRTIO_SCSI_S_OK:
1257 		status = CAM_REQ_CMP;
1258 		break;
1259 	case VIRTIO_SCSI_S_OVERRUN:
1260 		status = CAM_DATA_RUN_ERR;
1261 		break;
1262 	case VIRTIO_SCSI_S_ABORTED:
1263 		status = CAM_REQ_ABORTED;
1264 		break;
1265 	case VIRTIO_SCSI_S_BAD_TARGET:
1266 		status = CAM_SEL_TIMEOUT;
1267 		break;
1268 	case VIRTIO_SCSI_S_RESET:
1269 		status = CAM_SCSI_BUS_RESET;
1270 		break;
1271 	case VIRTIO_SCSI_S_BUSY:
1272 		status = CAM_SCSI_BUSY;
1273 		break;
1274 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1275 	case VIRTIO_SCSI_S_TARGET_FAILURE:
1276 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1277 		status = CAM_SCSI_IT_NEXUS_LOST;
1278 		break;
1279 	default: /* VIRTIO_SCSI_S_FAILURE */
1280 		status = CAM_REQ_CMP_ERR;
1281 		break;
1282 	}
1283 
1284 	return (status);
1285 }
1286 
1287 static cam_status
1288 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1289     struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1290 {
1291 	cam_status status;
1292 
1293 	csio->scsi_status = cmd_resp->status;
1294 	csio->resid = cmd_resp->resid;
1295 
1296 	if (csio->scsi_status == SCSI_STATUS_OK)
1297 		status = CAM_REQ_CMP;
1298 	else
1299 		status = CAM_SCSI_STATUS_ERROR;
1300 
1301 	if (cmd_resp->sense_len > 0) {
1302 		status |= CAM_AUTOSNS_VALID;
1303 
1304 		if (cmd_resp->sense_len < csio->sense_len)
1305 			csio->sense_resid = csio->sense_len -
1306 			    cmd_resp->sense_len;
1307 		else
1308 			csio->sense_resid = 0;
1309 
1310 		memcpy(&csio->sense_data, cmd_resp->sense,
1311 		    csio->sense_len - csio->sense_resid);
1312 	}
1313 
1314 	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1315 	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1316 	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1317 
1318 	return (status);
1319 }
1320 
1321 static void
1322 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1323 {
1324 	struct ccb_hdr *ccbh;
1325 	struct ccb_scsiio *csio;
1326 	struct virtio_scsi_cmd_resp *cmd_resp;
1327 	cam_status status;
1328 
1329 	csio = &req->vsr_ccb->csio;
1330 	ccbh = &csio->ccb_h;
1331 	cmd_resp = &req->vsr_cmd_resp;
1332 
1333 	KASSERT(ccbh->ccbh_vtscsi_req == req,
1334 	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1335 
1336 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1337 		callout_stop(&req->vsr_callout);
1338 
1339 	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1340 	if (status == CAM_REQ_ABORTED) {
1341 		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1342 			status = CAM_CMD_TIMEOUT;
1343 	} else if (status == CAM_REQ_CMP)
1344 		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1345 
1346 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1347 		status |= CAM_DEV_QFRZN;
1348 		xpt_freeze_devq(ccbh->path, 1);
1349 	}
1350 
1351 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1352 		status |= CAM_RELEASE_SIMQ;
1353 
1354 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1355 	    req, ccbh, status);
1356 
1357 	ccbh->status = status;
1358 	xpt_done(req->vsr_ccb);
1359 	vtscsi_enqueue_request(sc, req);
1360 }
1361 
1362 static void
1363 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1364 {
1365 
1366 	/* XXX We probably shouldn't poll forever. */
1367 	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1368 	do
1369 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1370 	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1371 
1372 	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1373 }
1374 
1375 static int
1376 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1377     struct sglist *sg, int readable, int writable, int flag)
1378 {
1379 	struct virtqueue *vq;
1380 	int error;
1381 
1382 	vq = sc->vtscsi_control_vq;
1383 
1384 	MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1385 
1386 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1387 	if (error) {
1388 		/*
1389 		 * Return EAGAIN when the virtqueue does not have enough
1390 		 * descriptors available.
1391 		 */
1392 		if (error == ENOSPC || error == EMSGSIZE)
1393 			error = EAGAIN;
1394 
1395 		return (error);
1396 	}
1397 
1398 	virtqueue_notify(vq);
1399 	if (flag == VTSCSI_EXECUTE_POLL)
1400 		vtscsi_poll_ctrl_req(sc, req);
1401 
1402 	return (0);
1403 }
1404 
1405 static void
1406 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1407     struct vtscsi_request *req)
1408 {
1409 	union ccb *ccb;
1410 	struct ccb_hdr *ccbh;
1411 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1412 
1413 	ccb = req->vsr_ccb;
1414 	ccbh = &ccb->ccb_h;
1415 	tmf_resp = &req->vsr_tmf_resp;
1416 
1417 	switch (tmf_resp->response) {
1418 	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1419 		ccbh->status = CAM_REQ_CMP;
1420 		break;
1421 	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1422 		ccbh->status = CAM_UA_ABORT;
1423 		break;
1424 	default:
1425 		ccbh->status = CAM_REQ_CMP_ERR;
1426 		break;
1427 	}
1428 
1429 	xpt_done(ccb);
1430 	vtscsi_enqueue_request(sc, req);
1431 }
1432 
1433 static int
1434 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1435     struct vtscsi_request *req)
1436 {
1437 	struct sglist *sg;
1438 	struct ccb_abort *cab;
1439 	struct ccb_hdr *ccbh;
1440 	struct ccb_hdr *abort_ccbh;
1441 	struct vtscsi_request *abort_req;
1442 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1443 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1444 	int error;
1445 
1446 	sg = sc->vtscsi_sglist;
1447 	cab = &req->vsr_ccb->cab;
1448 	ccbh = &cab->ccb_h;
1449 	tmf_req = &req->vsr_tmf_req;
1450 	tmf_resp = &req->vsr_tmf_resp;
1451 
1452 	/* CCB header and request that's to be aborted. */
1453 	abort_ccbh = &cab->abort_ccb->ccb_h;
1454 	abort_req = abort_ccbh->ccbh_vtscsi_req;
1455 
1456 	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1457 		error = EINVAL;
1458 		goto fail;
1459 	}
1460 
1461 	/* Only attempt to abort requests that could be in-flight. */
1462 	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1463 		error = EALREADY;
1464 		goto fail;
1465 	}
1466 
1467 	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1468 	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1469 		callout_stop(&abort_req->vsr_callout);
1470 
1471 	vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1472 	    (uintptr_t) abort_ccbh, tmf_req);
1473 
1474 	sglist_reset(sg);
1475 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1476 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1477 
1478 	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1479 	tmf_resp->response = -1;
1480 
1481 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1482 	    VTSCSI_EXECUTE_ASYNC);
1483 
1484 fail:
1485 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1486 	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1487 
1488 	return (error);
1489 }
1490 
1491 static void
1492 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1493     struct vtscsi_request *req)
1494 {
1495 	union ccb *ccb;
1496 	struct ccb_hdr *ccbh;
1497 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1498 
1499 	ccb = req->vsr_ccb;
1500 	ccbh = &ccb->ccb_h;
1501 	tmf_resp = &req->vsr_tmf_resp;
1502 
1503 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1504 	    req, ccb, tmf_resp->response);
1505 
1506 	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1507 		ccbh->status = CAM_REQ_CMP;
1508 		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1509 		    ccbh->target_lun);
1510 	} else
1511 		ccbh->status = CAM_REQ_CMP_ERR;
1512 
1513 	xpt_done(ccb);
1514 	vtscsi_enqueue_request(sc, req);
1515 }
1516 
1517 static int
1518 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1519     struct vtscsi_request *req)
1520 {
1521 	struct sglist *sg;
1522 	struct ccb_resetdev *crd;
1523 	struct ccb_hdr *ccbh;
1524 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1525 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1526 	uint32_t subtype;
1527 	int error;
1528 
1529 	sg = sc->vtscsi_sglist;
1530 	crd = &req->vsr_ccb->crd;
1531 	ccbh = &crd->ccb_h;
1532 	tmf_req = &req->vsr_tmf_req;
1533 	tmf_resp = &req->vsr_tmf_resp;
1534 
1535 	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1536 		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1537 	else
1538 		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1539 
1540 	vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1541 
1542 	sglist_reset(sg);
1543 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1544 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1545 
1546 	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1547 	tmf_resp->response = -1;
1548 
1549 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1550 	    VTSCSI_EXECUTE_ASYNC);
1551 
1552 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1553 	    error, req, ccbh);
1554 
1555 	return (error);
1556 }
1557 
1558 static void
1559 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1560 {
1561 
1562 	*target_id = lun[1];
1563 	*lun_id = (lun[2] << 8) | lun[3];
1564 }
1565 
1566 static void
1567 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1568 {
1569 
1570 	lun[0] = 1;
1571 	lun[1] = ccbh->target_id;
1572 	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1573 	lun[3] = ccbh->target_lun & 0xFF;
1574 }
1575 
1576 static void
1577 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1578     struct virtio_scsi_cmd_req *cmd_req)
1579 {
1580 	uint8_t attr;
1581 
1582 	switch (csio->tag_action) {
1583 	case MSG_HEAD_OF_Q_TAG:
1584 		attr = VIRTIO_SCSI_S_HEAD;
1585 		break;
1586 	case MSG_ORDERED_Q_TAG:
1587 		attr = VIRTIO_SCSI_S_ORDERED;
1588 		break;
1589 	case MSG_ACA_TASK:
1590 		attr = VIRTIO_SCSI_S_ACA;
1591 		break;
1592 	default: /* MSG_SIMPLE_Q_TAG */
1593 		attr = VIRTIO_SCSI_S_SIMPLE;
1594 		break;
1595 	}
1596 
1597 	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1598 	cmd_req->tag = (uintptr_t) csio;
1599 	cmd_req->task_attr = attr;
1600 
1601 	memcpy(cmd_req->cdb,
1602 	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1603 	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1604 	    csio->cdb_len);
1605 }
1606 
1607 static void
1608 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1609     uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1610 {
1611 
1612 	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1613 
1614 	tmf_req->type = VIRTIO_SCSI_T_TMF;
1615 	tmf_req->subtype = subtype;
1616 	tmf_req->tag = tag;
1617 }
1618 
1619 static void
1620 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1621 {
1622 	int frozen;
1623 
1624 	frozen = sc->vtscsi_frozen;
1625 
1626 	if (reason & VTSCSI_REQUEST &&
1627 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1628 		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1629 
1630 	if (reason & VTSCSI_REQUEST_VQ &&
1631 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1632 		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1633 
1634 	/* Freeze the SIMQ if transitioned to frozen. */
1635 	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1636 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1637 		xpt_freeze_simq(sc->vtscsi_sim, 1);
1638 	}
1639 }
1640 
1641 static int
1642 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1643 {
1644 	int thawed;
1645 
1646 	if (sc->vtscsi_frozen == 0 || reason == 0)
1647 		return (0);
1648 
1649 	if (reason & VTSCSI_REQUEST &&
1650 	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1651 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1652 
1653 	if (reason & VTSCSI_REQUEST_VQ &&
1654 	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1655 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1656 
1657 	thawed = sc->vtscsi_frozen == 0;
1658 	if (thawed != 0)
1659 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1660 
1661 	return (thawed);
1662 }
1663 
1664 static void
1665 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1666     target_id_t target_id, lun_id_t lun_id)
1667 {
1668 	struct cam_path *path;
1669 
1670 	/* Use the wildcard path from our softc for bus announcements. */
1671 	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1672 		xpt_async(ac_code, sc->vtscsi_path, NULL);
1673 		return;
1674 	}
1675 
1676 	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1677 	    target_id, lun_id) != CAM_REQ_CMP) {
1678 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1679 		return;
1680 	}
1681 
1682 	xpt_async(ac_code, path, NULL);
1683 	xpt_free_path(path);
1684 }
1685 
1686 static void
1687 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1688     lun_id_t lun_id)
1689 {
1690 	union ccb *ccb;
1691 	cam_status status;
1692 
1693 	ccb = xpt_alloc_ccb_nowait();
1694 	if (ccb == NULL) {
1695 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1696 		return;
1697 	}
1698 
1699 	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1700 	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1701 	if (status != CAM_REQ_CMP) {
1702 		xpt_free_ccb(ccb);
1703 		return;
1704 	}
1705 
1706 	xpt_rescan(ccb);
1707 }
1708 
1709 static void
1710 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1711 {
1712 
1713 	vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1714 }
1715 
1716 static void
1717 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1718     struct virtio_scsi_event *event)
1719 {
1720 	target_id_t target_id;
1721 	lun_id_t lun_id;
1722 
1723 	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1724 
1725 	switch (event->reason) {
1726 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1727 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1728 		vtscsi_execute_rescan(sc, target_id, lun_id);
1729 		break;
1730 	default:
1731 		device_printf(sc->vtscsi_dev,
1732 		    "unhandled transport event reason: %d\n", event->reason);
1733 		break;
1734 	}
1735 }
1736 
1737 static void
1738 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1739 {
1740 	int error;
1741 
1742 	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1743 		switch (event->event) {
1744 		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1745 			vtscsi_transport_reset_event(sc, event);
1746 			break;
1747 		default:
1748 			device_printf(sc->vtscsi_dev,
1749 			    "unhandled event: %d\n", event->event);
1750 			break;
1751 		}
1752 	} else
1753 		vtscsi_execute_rescan_bus(sc);
1754 
1755 	/*
1756 	 * This should always be successful since the buffer
1757 	 * was just dequeued.
1758 	 */
1759 	error = vtscsi_enqueue_event_buf(sc, event);
1760 	KASSERT(error == 0,
1761 	    ("cannot requeue event buffer: %d", error));
1762 }
1763 
1764 static int
1765 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1766     struct virtio_scsi_event *event)
1767 {
1768 	struct sglist *sg;
1769 	struct virtqueue *vq;
1770 	int size, error;
1771 
1772 	sg = sc->vtscsi_sglist;
1773 	vq = sc->vtscsi_event_vq;
1774 	size = sc->vtscsi_event_buf_size;
1775 
1776 	bzero(event, size);
1777 
1778 	sglist_reset(sg);
1779 	error = sglist_append(sg, event, size);
1780 	if (error)
1781 		return (error);
1782 
1783 	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1784 	if (error)
1785 		return (error);
1786 
1787 	virtqueue_notify(vq);
1788 
1789 	return (0);
1790 }
1791 
1792 static int
1793 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1794 {
1795 	struct virtio_scsi_event *event;
1796 	int i, size, error;
1797 
1798 	/*
1799 	 * The first release of QEMU with VirtIO SCSI support would crash
1800 	 * when attempting to notify the event virtqueue. This was fixed
1801 	 * when hotplug support was added.
1802 	 */
1803 	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1804 		size = sc->vtscsi_event_buf_size;
1805 	else
1806 		size = 0;
1807 
1808 	if (size < sizeof(struct virtio_scsi_event))
1809 		return (0);
1810 
1811 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1812 		event = &sc->vtscsi_event_bufs[i];
1813 
1814 		error = vtscsi_enqueue_event_buf(sc, event);
1815 		if (error)
1816 			break;
1817 	}
1818 
1819 	/*
1820 	 * Even just one buffer is enough. Missed events are
1821 	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1822 	 */
1823 	if (i > 0)
1824 		error = 0;
1825 
1826 	return (error);
1827 }
1828 
1829 static void
1830 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1831 {
1832 	struct virtio_scsi_event *event;
1833 	int i, error;
1834 
1835 	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1836 	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1837 		return;
1838 
1839 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1840 		event = &sc->vtscsi_event_bufs[i];
1841 
1842 		error = vtscsi_enqueue_event_buf(sc, event);
1843 		if (error)
1844 			break;
1845 	}
1846 
1847 	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1848 }
1849 
1850 static void
1851 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1852 {
1853 	struct virtqueue *vq;
1854 	int last;
1855 
1856 	vq = sc->vtscsi_event_vq;
1857 	last = 0;
1858 
1859 	while (virtqueue_drain(vq, &last) != NULL)
1860 		;
1861 
1862 	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1863 }
1864 
1865 static void
1866 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1867 {
1868 
1869 	VTSCSI_LOCK_OWNED(sc);
1870 
1871 	if (sc->vtscsi_request_vq != NULL)
1872 		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1873 	if (sc->vtscsi_control_vq != NULL)
1874 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1875 }
1876 
1877 static void
1878 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1879 {
1880 
1881 	VTSCSI_LOCK(sc);
1882 	vtscsi_complete_vqs_locked(sc);
1883 	VTSCSI_UNLOCK(sc);
1884 }
1885 
1886 static void
1887 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1888 {
1889 	union ccb *ccb;
1890 	int detach;
1891 
1892 	ccb = req->vsr_ccb;
1893 
1894 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1895 
1896 	/*
1897 	 * The callout must be drained when detaching since the request is
1898 	 * about to be freed. The VTSCSI_MTX must not be held for this in
1899 	 * case the callout is pending because there is a deadlock potential.
1900 	 * Otherwise, the virtqueue is being drained because of a bus reset
1901 	 * so we only need to attempt to stop the callouts.
1902 	 */
1903 	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1904 	if (detach != 0)
1905 		VTSCSI_LOCK_NOTOWNED(sc);
1906 	else
1907 		VTSCSI_LOCK_OWNED(sc);
1908 
1909 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1910 		if (detach != 0)
1911 			callout_drain(&req->vsr_callout);
1912 		else
1913 			callout_stop(&req->vsr_callout);
1914 	}
1915 
1916 	if (ccb != NULL) {
1917 		if (detach != 0) {
1918 			VTSCSI_LOCK(sc);
1919 			ccb->ccb_h.status = CAM_NO_HBA;
1920 		} else
1921 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1922 		xpt_done(ccb);
1923 		if (detach != 0)
1924 			VTSCSI_UNLOCK(sc);
1925 	}
1926 
1927 	vtscsi_enqueue_request(sc, req);
1928 }
1929 
1930 static void
1931 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1932 {
1933 	struct vtscsi_request *req;
1934 	int last;
1935 
1936 	last = 0;
1937 
1938 	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1939 
1940 	while ((req = virtqueue_drain(vq, &last)) != NULL)
1941 		vtscsi_cancel_request(sc, req);
1942 
1943 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1944 }
1945 
1946 static void
1947 vtscsi_drain_vqs(struct vtscsi_softc *sc)
1948 {
1949 
1950 	if (sc->vtscsi_control_vq != NULL)
1951 		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
1952 	if (sc->vtscsi_request_vq != NULL)
1953 		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
1954 	if (sc->vtscsi_event_vq != NULL)
1955 		vtscsi_drain_event_vq(sc);
1956 }
1957 
1958 static void
1959 vtscsi_stop(struct vtscsi_softc *sc)
1960 {
1961 
1962 	vtscsi_disable_vqs_intr(sc);
1963 	virtio_stop(sc->vtscsi_dev);
1964 }
1965 
1966 static int
1967 vtscsi_reset_bus(struct vtscsi_softc *sc)
1968 {
1969 	int error;
1970 
1971 	VTSCSI_LOCK_OWNED(sc);
1972 
1973 	if (vtscsi_bus_reset_disable != 0) {
1974 		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
1975 		return (0);
1976 	}
1977 
1978 	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
1979 
1980 	/*
1981 	 * vtscsi_stop() will cause the in-flight requests to be canceled.
1982 	 * Those requests are then completed here so CAM will retry them
1983 	 * after the reset is complete.
1984 	 */
1985 	vtscsi_stop(sc);
1986 	vtscsi_complete_vqs_locked(sc);
1987 
1988 	/* Rid the virtqueues of any remaining requests. */
1989 	vtscsi_drain_vqs(sc);
1990 
1991 	/*
1992 	 * Any resource shortage that froze the SIMQ cannot persist across
1993 	 * a bus reset so ensure it gets thawed here.
1994 	 */
1995 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1996 		xpt_release_simq(sc->vtscsi_sim, 0);
1997 
1998 	error = vtscsi_reinit(sc);
1999 	if (error) {
2000 		device_printf(sc->vtscsi_dev,
2001 		    "reinitialization failed, stopping device...\n");
2002 		vtscsi_stop(sc);
2003 	} else
2004 		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2005 		    CAM_LUN_WILDCARD);
2006 
2007 	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2008 
2009 	return (error);
2010 }
2011 
2012 static void
2013 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2014 {
2015 
2016 #ifdef INVARIANTS
2017 	int req_nsegs, resp_nsegs;
2018 
2019 	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2020 	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2021 
2022 	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2023 	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2024 #endif
2025 
2026 	req->vsr_softc = sc;
2027 	callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2028 }
2029 
2030 static int
2031 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2032 {
2033 	struct vtscsi_request *req;
2034 	int i, nreqs;
2035 
2036 	/*
2037 	 * Commands destined for either the request or control queues come
2038 	 * from the same SIM queue. Use the size of the request virtqueue
2039 	 * as it (should) be much more frequently used. Some additional
2040 	 * requests are allocated for internal (TMF) use.
2041 	 */
2042 	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2043 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2044 		nreqs /= VTSCSI_MIN_SEGMENTS;
2045 	nreqs += VTSCSI_RESERVED_REQUESTS;
2046 
2047 	for (i = 0; i < nreqs; i++) {
2048 		req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2049 		    M_NOWAIT);
2050 		if (req == NULL)
2051 			return (ENOMEM);
2052 
2053 		vtscsi_init_request(sc, req);
2054 
2055 		sc->vtscsi_nrequests++;
2056 		vtscsi_enqueue_request(sc, req);
2057 	}
2058 
2059 	return (0);
2060 }
2061 
2062 static void
2063 vtscsi_free_requests(struct vtscsi_softc *sc)
2064 {
2065 	struct vtscsi_request *req;
2066 
2067 	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2068 		KASSERT(callout_active(&req->vsr_callout) == 0,
2069 		    ("request callout still active"));
2070 
2071 		sc->vtscsi_nrequests--;
2072 		free(req, M_DEVBUF);
2073 	}
2074 
2075 	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2076 	    sc->vtscsi_nrequests));
2077 }
2078 
2079 static void
2080 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2081 {
2082 
2083 	KASSERT(req->vsr_softc == sc,
2084 	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2085 
2086 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2087 
2088 	/* A request is available so the SIMQ could be released. */
2089 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2090 		xpt_release_simq(sc->vtscsi_sim, 1);
2091 
2092 	req->vsr_ccb = NULL;
2093 	req->vsr_complete = NULL;
2094 	req->vsr_ptr0 = NULL;
2095 	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2096 	req->vsr_flags = 0;
2097 
2098 	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2099 	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2100 
2101 	/*
2102 	 * We insert at the tail of the queue in order to make it
2103 	 * very unlikely a request will be reused if we race with
2104 	 * stopping its callout handler.
2105 	 */
2106 	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2107 }
2108 
2109 static struct vtscsi_request *
2110 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2111 {
2112 	struct vtscsi_request *req;
2113 
2114 	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2115 	if (req != NULL) {
2116 		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2117 		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2118 	} else
2119 		sc->vtscsi_stats.dequeue_no_requests++;
2120 
2121 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2122 
2123 	return (req);
2124 }
2125 
2126 static void
2127 vtscsi_complete_request(struct vtscsi_request *req)
2128 {
2129 
2130 	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2131 		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2132 
2133 	if (req->vsr_complete != NULL)
2134 		req->vsr_complete(req->vsr_softc, req);
2135 }
2136 
2137 static void
2138 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2139 {
2140 	struct vtscsi_request *req;
2141 
2142 	VTSCSI_LOCK_OWNED(sc);
2143 
2144 	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2145 		vtscsi_complete_request(req);
2146 }
2147 
2148 static void
2149 vtscsi_control_vq_intr(void *xsc)
2150 {
2151 	struct vtscsi_softc *sc;
2152 	struct virtqueue *vq;
2153 
2154 	sc = xsc;
2155 	vq = sc->vtscsi_control_vq;
2156 
2157 again:
2158 	VTSCSI_LOCK(sc);
2159 
2160 	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2161 
2162 	if (virtqueue_enable_intr(vq) != 0) {
2163 		virtqueue_disable_intr(vq);
2164 		VTSCSI_UNLOCK(sc);
2165 		goto again;
2166 	}
2167 
2168 	VTSCSI_UNLOCK(sc);
2169 }
2170 
2171 static void
2172 vtscsi_event_vq_intr(void *xsc)
2173 {
2174 	struct vtscsi_softc *sc;
2175 	struct virtqueue *vq;
2176 	struct virtio_scsi_event *event;
2177 
2178 	sc = xsc;
2179 	vq = sc->vtscsi_event_vq;
2180 
2181 again:
2182 	VTSCSI_LOCK(sc);
2183 
2184 	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2185 		vtscsi_handle_event(sc, event);
2186 
2187 	if (virtqueue_enable_intr(vq) != 0) {
2188 		virtqueue_disable_intr(vq);
2189 		VTSCSI_UNLOCK(sc);
2190 		goto again;
2191 	}
2192 
2193 	VTSCSI_UNLOCK(sc);
2194 }
2195 
2196 static void
2197 vtscsi_request_vq_intr(void *xsc)
2198 {
2199 	struct vtscsi_softc *sc;
2200 	struct virtqueue *vq;
2201 
2202 	sc = xsc;
2203 	vq = sc->vtscsi_request_vq;
2204 
2205 again:
2206 	VTSCSI_LOCK(sc);
2207 
2208 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2209 
2210 	if (virtqueue_enable_intr(vq) != 0) {
2211 		virtqueue_disable_intr(vq);
2212 		VTSCSI_UNLOCK(sc);
2213 		goto again;
2214 	}
2215 
2216 	VTSCSI_UNLOCK(sc);
2217 }
2218 
2219 static void
2220 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2221 {
2222 
2223 	virtqueue_disable_intr(sc->vtscsi_control_vq);
2224 	virtqueue_disable_intr(sc->vtscsi_event_vq);
2225 	virtqueue_disable_intr(sc->vtscsi_request_vq);
2226 }
2227 
2228 static void
2229 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2230 {
2231 
2232 	virtqueue_enable_intr(sc->vtscsi_control_vq);
2233 	virtqueue_enable_intr(sc->vtscsi_event_vq);
2234 	virtqueue_enable_intr(sc->vtscsi_request_vq);
2235 }
2236 
2237 static void
2238 vtscsi_get_tunables(struct vtscsi_softc *sc)
2239 {
2240 	char tmpstr[64];
2241 
2242 	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2243 
2244 	snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2245 	    device_get_unit(sc->vtscsi_dev));
2246 	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2247 }
2248 
2249 static void
2250 vtscsi_add_sysctl(struct vtscsi_softc *sc)
2251 {
2252 	device_t dev;
2253 	struct vtscsi_statistics *stats;
2254         struct sysctl_ctx_list *ctx;
2255 	struct sysctl_oid *tree;
2256 	struct sysctl_oid_list *child;
2257 
2258 	dev = sc->vtscsi_dev;
2259 	stats = &sc->vtscsi_stats;
2260 	ctx = device_get_sysctl_ctx(dev);
2261 	tree = device_get_sysctl_tree(dev);
2262 	child = SYSCTL_CHILDREN(tree);
2263 
2264 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2265 	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2266 	    "Debug level");
2267 
2268 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2269 	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2270 	    "SCSI command timeouts");
2271 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2272 	    CTLFLAG_RD, &stats->dequeue_no_requests,
2273 	    "No available requests to dequeue");
2274 }
2275 
2276 static void
2277 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2278     const char *fmt, ...)
2279 {
2280 	struct vtscsi_softc *sc;
2281 	union ccb *ccb;
2282 	struct sbuf sb;
2283 	va_list ap;
2284 	char str[192];
2285 	char path_str[64];
2286 
2287 	if (req == NULL)
2288 		return;
2289 
2290 	sc = req->vsr_softc;
2291 	ccb = req->vsr_ccb;
2292 
2293 	va_start(ap, fmt);
2294 	sbuf_new(&sb, str, sizeof(str), 0);
2295 
2296 	if (ccb == NULL) {
2297 		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2298 		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2299 		    cam_sim_bus(sc->vtscsi_sim));
2300 	} else {
2301 		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2302 		sbuf_cat(&sb, path_str);
2303 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2304 			scsi_command_string(&ccb->csio, &sb);
2305 			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2306 		}
2307 	}
2308 
2309 	sbuf_vprintf(&sb, fmt, ap);
2310 	va_end(ap);
2311 
2312 	sbuf_finish(&sb);
2313 	printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2314 	    sbuf_data(&sb));
2315 }
2316