xref: /freebsd/sys/dev/virtio/scsi/virtio_scsi.c (revision 1d386b48a555f61cb7325543adbbb5c3f3407a66)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /* Driver for VirtIO SCSI devices. */
30 
31 #include <sys/cdefs.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/callout.h>
43 #include <sys/queue.h>
44 #include <sys/sbuf.h>
45 
46 #include <machine/stdarg.h>
47 
48 #include <machine/bus.h>
49 #include <machine/resource.h>
50 #include <sys/bus.h>
51 #include <sys/rman.h>
52 
53 #include <cam/cam.h>
54 #include <cam/cam_ccb.h>
55 #include <cam/cam_sim.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_xpt_sim.h>
58 #include <cam/cam_debug.h>
59 #include <cam/scsi/scsi_all.h>
60 #include <cam/scsi/scsi_message.h>
61 
62 #include <dev/virtio/virtio.h>
63 #include <dev/virtio/virtqueue.h>
64 #include <dev/virtio/scsi/virtio_scsi.h>
65 #include <dev/virtio/scsi/virtio_scsivar.h>
66 
67 #include "virtio_if.h"
68 
69 static int	vtscsi_modevent(module_t, int, void *);
70 
71 static int	vtscsi_probe(device_t);
72 static int	vtscsi_attach(device_t);
73 static int	vtscsi_detach(device_t);
74 static int	vtscsi_suspend(device_t);
75 static int	vtscsi_resume(device_t);
76 
77 static int	vtscsi_negotiate_features(struct vtscsi_softc *);
78 static int	vtscsi_setup_features(struct vtscsi_softc *);
79 static void	vtscsi_read_config(struct vtscsi_softc *,
80 		    struct virtio_scsi_config *);
81 static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
82 static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
83 static void	vtscsi_check_sizes(struct vtscsi_softc *);
84 static void	vtscsi_write_device_config(struct vtscsi_softc *);
85 static int	vtscsi_reinit(struct vtscsi_softc *);
86 
87 static int	vtscsi_alloc_cam(struct vtscsi_softc *);
88 static int	vtscsi_register_cam(struct vtscsi_softc *);
89 static void	vtscsi_free_cam(struct vtscsi_softc *);
90 static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
91 static int	vtscsi_register_async(struct vtscsi_softc *);
92 static void	vtscsi_deregister_async(struct vtscsi_softc *);
93 static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
94 static void	vtscsi_cam_poll(struct cam_sim *);
95 
96 static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
97 		    union ccb *);
98 static void	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
99 		    union ccb *);
100 static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
101 static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
102 static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
103 static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
104 		    struct cam_sim *, union ccb *);
105 
106 static int	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
107 		    struct sglist *, struct ccb_scsiio *);
108 static int	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
109 		    struct vtscsi_request *, int *, int *);
110 static int	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
111 		    struct vtscsi_request *);
112 static int	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
113 static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
114 		    struct vtscsi_request *);
115 static int	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
116 		    struct vtscsi_request *);
117 static void	vtscsi_timedout_scsi_cmd(void *);
118 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
119 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
120 		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
121 static void	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
122 		    struct vtscsi_request *);
123 
124 static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
125 		    struct vtscsi_request *);
126 static int	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
127 		    struct vtscsi_request *, struct sglist *, int, int, int);
128 static void	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
129 		    struct vtscsi_request *);
130 static int	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
131 		    struct vtscsi_request *);
132 static int	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
133 		    struct vtscsi_request *);
134 
135 static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
136 static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
137 static void	vtscsi_init_scsi_cmd_req(struct vtscsi_softc *,
138 		    struct ccb_scsiio *, struct virtio_scsi_cmd_req *);
139 static void	vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *, struct ccb_hdr *,
140 		    uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
141 
142 static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
143 static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
144 
145 static void	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
146 		    lun_id_t);
147 static void	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
148 		    lun_id_t);
149 static void	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
150 
151 static void	vtscsi_handle_event(struct vtscsi_softc *,
152 		    struct virtio_scsi_event *);
153 static int	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
154 		    struct virtio_scsi_event *);
155 static int	vtscsi_init_event_vq(struct vtscsi_softc *);
156 static void	vtscsi_reinit_event_vq(struct vtscsi_softc *);
157 static void	vtscsi_drain_event_vq(struct vtscsi_softc *);
158 
159 static void	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
160 static void	vtscsi_complete_vqs(struct vtscsi_softc *);
161 static void	vtscsi_drain_vqs(struct vtscsi_softc *);
162 static void	vtscsi_cancel_request(struct vtscsi_softc *,
163 		    struct vtscsi_request *);
164 static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
165 static void	vtscsi_stop(struct vtscsi_softc *);
166 static int	vtscsi_reset_bus(struct vtscsi_softc *);
167 
168 static void	vtscsi_init_request(struct vtscsi_softc *,
169 		    struct vtscsi_request *);
170 static int	vtscsi_alloc_requests(struct vtscsi_softc *);
171 static void	vtscsi_free_requests(struct vtscsi_softc *);
172 static void	vtscsi_enqueue_request(struct vtscsi_softc *,
173 		    struct vtscsi_request *);
174 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
175 
176 static void	vtscsi_complete_request(struct vtscsi_request *);
177 static void	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
178 
179 static void	vtscsi_control_vq_intr(void *);
180 static void	vtscsi_event_vq_intr(void *);
181 static void	vtscsi_request_vq_intr(void *);
182 static void	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
183 static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
184 
185 static void	vtscsi_get_tunables(struct vtscsi_softc *);
186 static void	vtscsi_setup_sysctl(struct vtscsi_softc *);
187 
188 static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
189 		    const char *, ...);
190 
191 #define vtscsi_modern(_sc) (((_sc)->vtscsi_features & VIRTIO_F_VERSION_1) != 0)
192 #define vtscsi_htog16(_sc, _val)	virtio_htog16(vtscsi_modern(_sc), _val)
193 #define vtscsi_htog32(_sc, _val)	virtio_htog32(vtscsi_modern(_sc), _val)
194 #define vtscsi_htog64(_sc, _val)	virtio_htog64(vtscsi_modern(_sc), _val)
195 #define vtscsi_gtoh16(_sc, _val)	virtio_gtoh16(vtscsi_modern(_sc), _val)
196 #define vtscsi_gtoh32(_sc, _val)	virtio_gtoh32(vtscsi_modern(_sc), _val)
197 #define vtscsi_gtoh64(_sc, _val)	virtio_gtoh64(vtscsi_modern(_sc), _val)
198 
199 /* Global tunables. */
200 /*
201  * The current QEMU VirtIO SCSI implementation does not cancel in-flight
202  * IO during virtio_stop(). So in-flight requests still complete after the
203  * device reset. We would have to wait for all the in-flight IO to complete,
204  * which defeats the typical purpose of a bus reset. We could simulate the
205  * bus reset with either I_T_NEXUS_RESET of all the targets, or with
206  * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
207  * control virtqueue). But this isn't very useful if things really go off
208  * the rails, so default to disabled for now.
209  */
210 static int vtscsi_bus_reset_disable = 1;
211 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
212 
213 static struct virtio_feature_desc vtscsi_feature_desc[] = {
214 	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
215 	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
216 	{ VIRTIO_SCSI_F_CHANGE,		"ChangeEvent"	},
217 	{ VIRTIO_SCSI_F_T10_PI, 	"T10PI"		},
218 
219 	{ 0, NULL }
220 };
221 
222 static device_method_t vtscsi_methods[] = {
223 	/* Device methods. */
224 	DEVMETHOD(device_probe,		vtscsi_probe),
225 	DEVMETHOD(device_attach,	vtscsi_attach),
226 	DEVMETHOD(device_detach,	vtscsi_detach),
227 	DEVMETHOD(device_suspend,	vtscsi_suspend),
228 	DEVMETHOD(device_resume,	vtscsi_resume),
229 
230 	DEVMETHOD_END
231 };
232 
233 static driver_t vtscsi_driver = {
234 	"vtscsi",
235 	vtscsi_methods,
236 	sizeof(struct vtscsi_softc)
237 };
238 
239 VIRTIO_DRIVER_MODULE(virtio_scsi, vtscsi_driver, vtscsi_modevent, NULL);
240 MODULE_VERSION(virtio_scsi, 1);
241 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
242 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
243 
244 VIRTIO_SIMPLE_PNPINFO(virtio_scsi, VIRTIO_ID_SCSI, "VirtIO SCSI Adapter");
245 
246 static int
247 vtscsi_modevent(module_t mod, int type, void *unused)
248 {
249 	int error;
250 
251 	switch (type) {
252 	case MOD_LOAD:
253 	case MOD_QUIESCE:
254 	case MOD_UNLOAD:
255 	case MOD_SHUTDOWN:
256 		error = 0;
257 		break;
258 	default:
259 		error = EOPNOTSUPP;
260 		break;
261 	}
262 
263 	return (error);
264 }
265 
266 static int
267 vtscsi_probe(device_t dev)
268 {
269 	return (VIRTIO_SIMPLE_PROBE(dev, virtio_scsi));
270 }
271 
272 static int
273 vtscsi_attach(device_t dev)
274 {
275 	struct vtscsi_softc *sc;
276 	struct virtio_scsi_config scsicfg;
277 	int error;
278 
279 	sc = device_get_softc(dev);
280 	sc->vtscsi_dev = dev;
281 	virtio_set_feature_desc(dev, vtscsi_feature_desc);
282 
283 	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
284 	TAILQ_INIT(&sc->vtscsi_req_free);
285 
286 	vtscsi_get_tunables(sc);
287 	vtscsi_setup_sysctl(sc);
288 
289 	error = vtscsi_setup_features(sc);
290 	if (error) {
291 		device_printf(dev, "cannot setup features\n");
292 		goto fail;
293 	}
294 
295 	vtscsi_read_config(sc, &scsicfg);
296 
297 	sc->vtscsi_max_channel = scsicfg.max_channel;
298 	sc->vtscsi_max_target = scsicfg.max_target;
299 	sc->vtscsi_max_lun = scsicfg.max_lun;
300 	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
301 
302 	vtscsi_write_device_config(sc);
303 
304 	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
305 	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
306 	if (sc->vtscsi_sglist == NULL) {
307 		error = ENOMEM;
308 		device_printf(dev, "cannot allocate sglist\n");
309 		goto fail;
310 	}
311 
312 	error = vtscsi_alloc_virtqueues(sc);
313 	if (error) {
314 		device_printf(dev, "cannot allocate virtqueues\n");
315 		goto fail;
316 	}
317 
318 	vtscsi_check_sizes(sc);
319 
320 	error = vtscsi_init_event_vq(sc);
321 	if (error) {
322 		device_printf(dev, "cannot populate the eventvq\n");
323 		goto fail;
324 	}
325 
326 	error = vtscsi_alloc_requests(sc);
327 	if (error) {
328 		device_printf(dev, "cannot allocate requests\n");
329 		goto fail;
330 	}
331 
332 	error = vtscsi_alloc_cam(sc);
333 	if (error) {
334 		device_printf(dev, "cannot allocate CAM structures\n");
335 		goto fail;
336 	}
337 
338 	error = virtio_setup_intr(dev, INTR_TYPE_CAM);
339 	if (error) {
340 		device_printf(dev, "cannot setup virtqueue interrupts\n");
341 		goto fail;
342 	}
343 
344 	vtscsi_enable_vqs_intr(sc);
345 
346 	/*
347 	 * Register with CAM after interrupts are enabled so we will get
348 	 * notified of the probe responses.
349 	 */
350 	error = vtscsi_register_cam(sc);
351 	if (error) {
352 		device_printf(dev, "cannot register with CAM\n");
353 		goto fail;
354 	}
355 
356 fail:
357 	if (error)
358 		vtscsi_detach(dev);
359 
360 	return (error);
361 }
362 
363 static int
364 vtscsi_detach(device_t dev)
365 {
366 	struct vtscsi_softc *sc;
367 
368 	sc = device_get_softc(dev);
369 
370 	VTSCSI_LOCK(sc);
371 	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
372 	if (device_is_attached(dev))
373 		vtscsi_stop(sc);
374 	VTSCSI_UNLOCK(sc);
375 
376 	vtscsi_complete_vqs(sc);
377 	vtscsi_drain_vqs(sc);
378 
379 	vtscsi_free_cam(sc);
380 	vtscsi_free_requests(sc);
381 
382 	if (sc->vtscsi_sglist != NULL) {
383 		sglist_free(sc->vtscsi_sglist);
384 		sc->vtscsi_sglist = NULL;
385 	}
386 
387 	VTSCSI_LOCK_DESTROY(sc);
388 
389 	return (0);
390 }
391 
392 static int
393 vtscsi_suspend(device_t dev)
394 {
395 
396 	return (0);
397 }
398 
399 static int
400 vtscsi_resume(device_t dev)
401 {
402 
403 	return (0);
404 }
405 
406 static int
407 vtscsi_negotiate_features(struct vtscsi_softc *sc)
408 {
409 	device_t dev;
410 	uint64_t features;
411 
412 	dev = sc->vtscsi_dev;
413 	features = VTSCSI_FEATURES;
414 
415 	sc->vtscsi_features = virtio_negotiate_features(dev, features);
416 	return (virtio_finalize_features(dev));
417 }
418 
419 static int
420 vtscsi_setup_features(struct vtscsi_softc *sc)
421 {
422 	device_t dev;
423 	int error;
424 
425 	dev = sc->vtscsi_dev;
426 
427 	error = vtscsi_negotiate_features(sc);
428 	if (error)
429 		return (error);
430 
431 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
432 		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
433 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
434 		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
435 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
436 		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
437 
438 	return (0);
439 }
440 
441 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
442 	virtio_read_device_config(_dev,				\
443 	    offsetof(struct virtio_scsi_config, _field),	\
444 	    &(_cfg)->_field, sizeof((_cfg)->_field))		\
445 
446 static void
447 vtscsi_read_config(struct vtscsi_softc *sc,
448     struct virtio_scsi_config *scsicfg)
449 {
450 	device_t dev;
451 
452 	dev = sc->vtscsi_dev;
453 
454 	bzero(scsicfg, sizeof(struct virtio_scsi_config));
455 
456 	VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
457 	VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
458 	VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
459 	VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
460 	VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
461 	VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
462 	VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
463 	VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
464 	VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
465 	VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
466 }
467 
468 #undef VTSCSI_GET_CONFIG
469 
470 static int
471 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
472 {
473 	int nsegs;
474 
475 	nsegs = VTSCSI_MIN_SEGMENTS;
476 
477 	if (seg_max > 0) {
478 		nsegs += MIN(seg_max, maxphys / PAGE_SIZE + 1);
479 		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
480 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
481 	} else
482 		nsegs += 1;
483 
484 	return (nsegs);
485 }
486 
487 static int
488 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
489 {
490 	device_t dev;
491 	struct vq_alloc_info vq_info[3];
492 	int nvqs;
493 
494 	dev = sc->vtscsi_dev;
495 	nvqs = 3;
496 
497 	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
498 	    &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
499 
500 	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
501 	    &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
502 
503 	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
504 	    vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
505 	    "%s request", device_get_nameunit(dev));
506 
507 	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
508 }
509 
510 static void
511 vtscsi_check_sizes(struct vtscsi_softc *sc)
512 {
513 	int rqsize;
514 
515 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) {
516 		/*
517 		 * Ensure the assertions in virtqueue_enqueue(),
518 		 * even if the hypervisor reports a bad seg_max.
519 		 */
520 		rqsize = virtqueue_size(sc->vtscsi_request_vq);
521 		if (sc->vtscsi_max_nsegs > rqsize) {
522 			device_printf(sc->vtscsi_dev,
523 			    "clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs,
524 			    rqsize);
525 			sc->vtscsi_max_nsegs = rqsize;
526 		}
527 	}
528 }
529 
530 static void
531 vtscsi_write_device_config(struct vtscsi_softc *sc)
532 {
533 
534 	virtio_write_dev_config_4(sc->vtscsi_dev,
535 	    offsetof(struct virtio_scsi_config, sense_size),
536 	    VIRTIO_SCSI_SENSE_SIZE);
537 
538 	/*
539 	 * This is the size in the virtio_scsi_cmd_req structure. Note
540 	 * this value (32) is larger than the maximum CAM CDB size (16).
541 	 */
542 	virtio_write_dev_config_4(sc->vtscsi_dev,
543 	    offsetof(struct virtio_scsi_config, cdb_size),
544 	    VIRTIO_SCSI_CDB_SIZE);
545 }
546 
547 static int
548 vtscsi_reinit(struct vtscsi_softc *sc)
549 {
550 	device_t dev;
551 	int error;
552 
553 	dev = sc->vtscsi_dev;
554 
555 	error = virtio_reinit(dev, sc->vtscsi_features);
556 	if (error == 0) {
557 		vtscsi_write_device_config(sc);
558 		virtio_reinit_complete(dev);
559 		vtscsi_reinit_event_vq(sc);
560 
561 		vtscsi_enable_vqs_intr(sc);
562 	}
563 
564 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
565 
566 	return (error);
567 }
568 
569 static int
570 vtscsi_alloc_cam(struct vtscsi_softc *sc)
571 {
572 	device_t dev;
573 	struct cam_devq *devq;
574 	int openings;
575 
576 	dev = sc->vtscsi_dev;
577 	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
578 
579 	devq = cam_simq_alloc(openings);
580 	if (devq == NULL) {
581 		device_printf(dev, "cannot allocate SIM queue\n");
582 		return (ENOMEM);
583 	}
584 
585 	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
586 	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
587 	    openings, devq);
588 	if (sc->vtscsi_sim == NULL) {
589 		cam_simq_free(devq);
590 		device_printf(dev, "cannot allocate SIM\n");
591 		return (ENOMEM);
592 	}
593 
594 	return (0);
595 }
596 
597 static int
598 vtscsi_register_cam(struct vtscsi_softc *sc)
599 {
600 	device_t dev;
601 	int registered, error;
602 
603 	dev = sc->vtscsi_dev;
604 	registered = 0;
605 
606 	VTSCSI_LOCK(sc);
607 
608 	if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
609 		error = ENOMEM;
610 		device_printf(dev, "cannot register XPT bus\n");
611 		goto fail;
612 	}
613 
614 	registered = 1;
615 
616 	if (xpt_create_path(&sc->vtscsi_path, NULL,
617 	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
618 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
619 		error = ENOMEM;
620 		device_printf(dev, "cannot create bus path\n");
621 		goto fail;
622 	}
623 
624 	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
625 		error = EIO;
626 		device_printf(dev, "cannot register async callback\n");
627 		goto fail;
628 	}
629 
630 	VTSCSI_UNLOCK(sc);
631 
632 	return (0);
633 
634 fail:
635 	if (sc->vtscsi_path != NULL) {
636 		xpt_free_path(sc->vtscsi_path);
637 		sc->vtscsi_path = NULL;
638 	}
639 
640 	if (registered != 0)
641 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
642 
643 	VTSCSI_UNLOCK(sc);
644 
645 	return (error);
646 }
647 
648 static void
649 vtscsi_free_cam(struct vtscsi_softc *sc)
650 {
651 
652 	VTSCSI_LOCK(sc);
653 
654 	if (sc->vtscsi_path != NULL) {
655 		vtscsi_deregister_async(sc);
656 
657 		xpt_free_path(sc->vtscsi_path);
658 		sc->vtscsi_path = NULL;
659 
660 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
661 	}
662 
663 	if (sc->vtscsi_sim != NULL) {
664 		cam_sim_free(sc->vtscsi_sim, 1);
665 		sc->vtscsi_sim = NULL;
666 	}
667 
668 	VTSCSI_UNLOCK(sc);
669 }
670 
671 static void
672 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
673 {
674 	struct cam_sim *sim;
675 	struct vtscsi_softc *sc;
676 
677 	sim = cb_arg;
678 	sc = cam_sim_softc(sim);
679 
680 	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
681 
682 	/*
683 	 * TODO Once QEMU supports event reporting, we should
684 	 *      (un)subscribe to events here.
685 	 */
686 	switch (code) {
687 	case AC_FOUND_DEVICE:
688 		break;
689 	case AC_LOST_DEVICE:
690 		break;
691 	}
692 }
693 
694 static int
695 vtscsi_register_async(struct vtscsi_softc *sc)
696 {
697 	struct ccb_setasync csa;
698 
699 	memset(&csa, 0, sizeof(csa));
700 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
701 	csa.ccb_h.func_code = XPT_SASYNC_CB;
702 	csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
703 	csa.callback = vtscsi_cam_async;
704 	csa.callback_arg = sc->vtscsi_sim;
705 
706 	xpt_action((union ccb *) &csa);
707 
708 	return (csa.ccb_h.status);
709 }
710 
711 static void
712 vtscsi_deregister_async(struct vtscsi_softc *sc)
713 {
714 	struct ccb_setasync csa;
715 
716 	memset(&csa, 0, sizeof(csa));
717 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
718 	csa.ccb_h.func_code = XPT_SASYNC_CB;
719 	csa.event_enable = 0;
720 	csa.callback = vtscsi_cam_async;
721 	csa.callback_arg = sc->vtscsi_sim;
722 
723 	xpt_action((union ccb *) &csa);
724 }
725 
726 static void
727 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
728 {
729 	struct vtscsi_softc *sc;
730 	struct ccb_hdr *ccbh;
731 
732 	sc = cam_sim_softc(sim);
733 	ccbh = &ccb->ccb_h;
734 
735 	VTSCSI_LOCK_OWNED(sc);
736 
737 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
738 		/*
739 		 * The VTSCSI_MTX is briefly dropped between setting
740 		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
741 		 * drop any CCBs that come in during that window.
742 		 */
743 		ccbh->status = CAM_NO_HBA;
744 		xpt_done(ccb);
745 		return;
746 	}
747 
748 	switch (ccbh->func_code) {
749 	case XPT_SCSI_IO:
750 		vtscsi_cam_scsi_io(sc, sim, ccb);
751 		break;
752 
753 	case XPT_SET_TRAN_SETTINGS:
754 		ccbh->status = CAM_FUNC_NOTAVAIL;
755 		xpt_done(ccb);
756 		break;
757 
758 	case XPT_GET_TRAN_SETTINGS:
759 		vtscsi_cam_get_tran_settings(sc, ccb);
760 		break;
761 
762 	case XPT_RESET_BUS:
763 		vtscsi_cam_reset_bus(sc, ccb);
764 		break;
765 
766 	case XPT_RESET_DEV:
767 		vtscsi_cam_reset_dev(sc, ccb);
768 		break;
769 
770 	case XPT_ABORT:
771 		vtscsi_cam_abort(sc, ccb);
772 		break;
773 
774 	case XPT_CALC_GEOMETRY:
775 		cam_calc_geometry(&ccb->ccg, 1);
776 		xpt_done(ccb);
777 		break;
778 
779 	case XPT_PATH_INQ:
780 		vtscsi_cam_path_inquiry(sc, sim, ccb);
781 		break;
782 
783 	default:
784 		vtscsi_dprintf(sc, VTSCSI_ERROR,
785 		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
786 
787 		ccbh->status = CAM_REQ_INVALID;
788 		xpt_done(ccb);
789 		break;
790 	}
791 }
792 
793 static void
794 vtscsi_cam_poll(struct cam_sim *sim)
795 {
796 	struct vtscsi_softc *sc;
797 
798 	sc = cam_sim_softc(sim);
799 
800 	vtscsi_complete_vqs_locked(sc);
801 }
802 
803 static void
804 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
805     union ccb *ccb)
806 {
807 	struct ccb_hdr *ccbh;
808 	struct ccb_scsiio *csio;
809 	int error;
810 
811 	ccbh = &ccb->ccb_h;
812 	csio = &ccb->csio;
813 
814 	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
815 		error = EINVAL;
816 		ccbh->status = CAM_REQ_INVALID;
817 		goto done;
818 	}
819 
820 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
821 	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
822 		error = EINVAL;
823 		ccbh->status = CAM_REQ_INVALID;
824 		goto done;
825 	}
826 
827 	error = vtscsi_start_scsi_cmd(sc, ccb);
828 
829 done:
830 	if (error) {
831 		vtscsi_dprintf(sc, VTSCSI_ERROR,
832 		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
833 		xpt_done(ccb);
834 	}
835 }
836 
837 static void
838 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
839 {
840 	struct ccb_trans_settings *cts;
841 	struct ccb_trans_settings_scsi *scsi;
842 
843 	cts = &ccb->cts;
844 	scsi = &cts->proto_specific.scsi;
845 
846 	cts->protocol = PROTO_SCSI;
847 	cts->protocol_version = SCSI_REV_SPC3;
848 	cts->transport = XPORT_SAS;
849 	cts->transport_version = 0;
850 
851 	scsi->valid = CTS_SCSI_VALID_TQ;
852 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
853 
854 	ccb->ccb_h.status = CAM_REQ_CMP;
855 	xpt_done(ccb);
856 }
857 
858 static void
859 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
860 {
861 	int error;
862 
863 	error = vtscsi_reset_bus(sc);
864 	if (error == 0)
865 		ccb->ccb_h.status = CAM_REQ_CMP;
866 	else
867 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
868 
869 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
870 	    error, ccb, ccb->ccb_h.status);
871 
872 	xpt_done(ccb);
873 }
874 
875 static void
876 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
877 {
878 	struct ccb_hdr *ccbh;
879 	struct vtscsi_request *req;
880 	int error;
881 
882 	ccbh = &ccb->ccb_h;
883 
884 	req = vtscsi_dequeue_request(sc);
885 	if (req == NULL) {
886 		error = EAGAIN;
887 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
888 		goto fail;
889 	}
890 
891 	req->vsr_ccb = ccb;
892 
893 	error = vtscsi_execute_reset_dev_cmd(sc, req);
894 	if (error == 0)
895 		return;
896 
897 	vtscsi_enqueue_request(sc, req);
898 
899 fail:
900 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
901 	    error, req, ccb);
902 
903 	if (error == EAGAIN)
904 		ccbh->status = CAM_RESRC_UNAVAIL;
905 	else
906 		ccbh->status = CAM_REQ_CMP_ERR;
907 
908 	xpt_done(ccb);
909 }
910 
911 static void
912 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
913 {
914 	struct vtscsi_request *req;
915 	struct ccb_hdr *ccbh;
916 	int error;
917 
918 	ccbh = &ccb->ccb_h;
919 
920 	req = vtscsi_dequeue_request(sc);
921 	if (req == NULL) {
922 		error = EAGAIN;
923 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
924 		goto fail;
925 	}
926 
927 	req->vsr_ccb = ccb;
928 
929 	error = vtscsi_execute_abort_task_cmd(sc, req);
930 	if (error == 0)
931 		return;
932 
933 	vtscsi_enqueue_request(sc, req);
934 
935 fail:
936 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
937 	    error, req, ccb);
938 
939 	if (error == EAGAIN)
940 		ccbh->status = CAM_RESRC_UNAVAIL;
941 	else
942 		ccbh->status = CAM_REQ_CMP_ERR;
943 
944 	xpt_done(ccb);
945 }
946 
947 static void
948 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
949     union ccb *ccb)
950 {
951 	device_t dev;
952 	struct ccb_pathinq *cpi;
953 
954 	dev = sc->vtscsi_dev;
955 	cpi = &ccb->cpi;
956 
957 	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
958 
959 	cpi->version_num = 1;
960 	cpi->hba_inquiry = PI_TAG_ABLE;
961 	cpi->target_sprt = 0;
962 	cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
963 	if (vtscsi_bus_reset_disable != 0)
964 		cpi->hba_misc |= PIM_NOBUSRESET;
965 	cpi->hba_eng_cnt = 0;
966 
967 	cpi->max_target = sc->vtscsi_max_target;
968 	cpi->max_lun = sc->vtscsi_max_lun;
969 	cpi->initiator_id = cpi->max_target + 1;
970 
971 	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
972 	strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
973 	strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
974 
975 	cpi->unit_number = cam_sim_unit(sim);
976 	cpi->bus_id = cam_sim_bus(sim);
977 
978 	cpi->base_transfer_speed = 300000;
979 
980 	cpi->protocol = PROTO_SCSI;
981 	cpi->protocol_version = SCSI_REV_SPC3;
982 	cpi->transport = XPORT_SAS;
983 	cpi->transport_version = 0;
984 
985 	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
986 	    PAGE_SIZE;
987 
988 	cpi->hba_vendor = virtio_get_vendor(dev);
989 	cpi->hba_device = virtio_get_device(dev);
990 	cpi->hba_subvendor = virtio_get_subvendor(dev);
991 	cpi->hba_subdevice = virtio_get_subdevice(dev);
992 
993 	ccb->ccb_h.status = CAM_REQ_CMP;
994 	xpt_done(ccb);
995 }
996 
997 static int
998 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
999     struct ccb_scsiio *csio)
1000 {
1001 	struct ccb_hdr *ccbh;
1002 	struct bus_dma_segment *dseg;
1003 	int i, error;
1004 
1005 	ccbh = &csio->ccb_h;
1006 	error = 0;
1007 
1008 	switch ((ccbh->flags & CAM_DATA_MASK)) {
1009 	case CAM_DATA_VADDR:
1010 		error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
1011 		break;
1012 	case CAM_DATA_PADDR:
1013 		error = sglist_append_phys(sg,
1014 		    (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
1015 		break;
1016 	case CAM_DATA_SG:
1017 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
1018 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1019 			error = sglist_append(sg,
1020 			    (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
1021 		}
1022 		break;
1023 	case CAM_DATA_SG_PADDR:
1024 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
1025 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1026 			error = sglist_append_phys(sg,
1027 			    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
1028 		}
1029 		break;
1030 	case CAM_DATA_BIO:
1031 		error = sglist_append_bio(sg, (struct bio *) csio->data_ptr);
1032 		break;
1033 	default:
1034 		error = EINVAL;
1035 		break;
1036 	}
1037 
1038 	return (error);
1039 }
1040 
1041 static int
1042 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
1043     int *readable, int *writable)
1044 {
1045 	struct sglist *sg;
1046 	struct ccb_hdr *ccbh;
1047 	struct ccb_scsiio *csio;
1048 	struct virtio_scsi_cmd_req *cmd_req;
1049 	struct virtio_scsi_cmd_resp *cmd_resp;
1050 	int error;
1051 
1052 	sg = sc->vtscsi_sglist;
1053 	csio = &req->vsr_ccb->csio;
1054 	ccbh = &csio->ccb_h;
1055 	cmd_req = &req->vsr_cmd_req;
1056 	cmd_resp = &req->vsr_cmd_resp;
1057 
1058 	sglist_reset(sg);
1059 
1060 	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1061 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1062 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1063 		/* At least one segment must be left for the response. */
1064 		if (error || sg->sg_nseg == sg->sg_maxseg)
1065 			goto fail;
1066 	}
1067 
1068 	*readable = sg->sg_nseg;
1069 
1070 	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1071 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1072 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1073 		if (error)
1074 			goto fail;
1075 	}
1076 
1077 	*writable = sg->sg_nseg - *readable;
1078 
1079 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1080 	    "writable=%d\n", req, ccbh, *readable, *writable);
1081 
1082 	return (0);
1083 
1084 fail:
1085 	/*
1086 	 * This should never happen unless maxio was incorrectly set.
1087 	 */
1088 	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1089 
1090 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1091 	    "nseg=%d maxseg=%d\n",
1092 	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1093 
1094 	return (EFBIG);
1095 }
1096 
1097 static int
1098 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1099 {
1100 	struct sglist *sg;
1101 	struct virtqueue *vq;
1102 	struct ccb_scsiio *csio;
1103 	struct ccb_hdr *ccbh;
1104 	struct virtio_scsi_cmd_req *cmd_req;
1105 	struct virtio_scsi_cmd_resp *cmd_resp;
1106 	int readable, writable, error;
1107 
1108 	sg = sc->vtscsi_sglist;
1109 	vq = sc->vtscsi_request_vq;
1110 	csio = &req->vsr_ccb->csio;
1111 	ccbh = &csio->ccb_h;
1112 	cmd_req = &req->vsr_cmd_req;
1113 	cmd_resp = &req->vsr_cmd_resp;
1114 
1115 	vtscsi_init_scsi_cmd_req(sc, csio, cmd_req);
1116 
1117 	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1118 	if (error)
1119 		return (error);
1120 
1121 	req->vsr_complete = vtscsi_complete_scsi_cmd;
1122 	cmd_resp->response = -1;
1123 
1124 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1125 	if (error) {
1126 		vtscsi_dprintf(sc, VTSCSI_ERROR,
1127 		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1128 
1129 		ccbh->status = CAM_REQUEUE_REQ;
1130 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1131 		return (error);
1132 	}
1133 
1134 	ccbh->status |= CAM_SIM_QUEUED;
1135 	ccbh->ccbh_vtscsi_req = req;
1136 
1137 	virtqueue_notify(vq);
1138 
1139 	if (ccbh->timeout != CAM_TIME_INFINITY) {
1140 		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1141 		callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout,
1142 		    0, vtscsi_timedout_scsi_cmd, req, 0);
1143 	}
1144 
1145 	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1146 	    req, ccbh);
1147 
1148 	return (0);
1149 }
1150 
1151 static int
1152 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1153 {
1154 	struct vtscsi_request *req;
1155 	int error;
1156 
1157 	req = vtscsi_dequeue_request(sc);
1158 	if (req == NULL) {
1159 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1160 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1161 		return (ENOBUFS);
1162 	}
1163 
1164 	req->vsr_ccb = ccb;
1165 
1166 	error = vtscsi_execute_scsi_cmd(sc, req);
1167 	if (error)
1168 		vtscsi_enqueue_request(sc, req);
1169 
1170 	return (error);
1171 }
1172 
1173 static void
1174 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1175     struct vtscsi_request *req)
1176 {
1177 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1178 	struct vtscsi_request *to_req;
1179 	uint8_t response;
1180 
1181 	tmf_resp = &req->vsr_tmf_resp;
1182 	response = tmf_resp->response;
1183 	to_req = req->vsr_timedout_req;
1184 
1185 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1186 	    req, to_req, response);
1187 
1188 	vtscsi_enqueue_request(sc, req);
1189 
1190 	/*
1191 	 * The timedout request could have completed between when the
1192 	 * abort task was sent and when the host processed it.
1193 	 */
1194 	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1195 		return;
1196 
1197 	/* The timedout request was successfully aborted. */
1198 	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1199 		return;
1200 
1201 	/* Don't bother if the device is going away. */
1202 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1203 		return;
1204 
1205 	/* The timedout request will be aborted by the reset. */
1206 	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1207 		return;
1208 
1209 	vtscsi_reset_bus(sc);
1210 }
1211 
1212 static int
1213 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1214     struct vtscsi_request *to_req)
1215 {
1216 	struct sglist *sg;
1217 	struct ccb_hdr *to_ccbh;
1218 	struct vtscsi_request *req;
1219 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1220 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1221 	int error;
1222 
1223 	sg = sc->vtscsi_sglist;
1224 	to_ccbh = &to_req->vsr_ccb->ccb_h;
1225 
1226 	req = vtscsi_dequeue_request(sc);
1227 	if (req == NULL) {
1228 		error = ENOBUFS;
1229 		goto fail;
1230 	}
1231 
1232 	tmf_req = &req->vsr_tmf_req;
1233 	tmf_resp = &req->vsr_tmf_resp;
1234 
1235 	vtscsi_init_ctrl_tmf_req(sc, to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1236 	    (uintptr_t) to_ccbh, tmf_req);
1237 
1238 	sglist_reset(sg);
1239 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1240 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1241 
1242 	req->vsr_timedout_req = to_req;
1243 	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1244 	tmf_resp->response = -1;
1245 
1246 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1247 	    VTSCSI_EXECUTE_ASYNC);
1248 	if (error == 0)
1249 		return (0);
1250 
1251 	vtscsi_enqueue_request(sc, req);
1252 
1253 fail:
1254 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1255 	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1256 
1257 	return (error);
1258 }
1259 
1260 static void
1261 vtscsi_timedout_scsi_cmd(void *xreq)
1262 {
1263 	struct vtscsi_softc *sc;
1264 	struct vtscsi_request *to_req;
1265 
1266 	to_req = xreq;
1267 	sc = to_req->vsr_softc;
1268 
1269 	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1270 	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1271 
1272 	/* Don't bother if the device is going away. */
1273 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1274 		return;
1275 
1276 	/*
1277 	 * Bail if the request is not in use. We likely raced when
1278 	 * stopping the callout handler or it has already been aborted.
1279 	 */
1280 	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1281 	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1282 		return;
1283 
1284 	/*
1285 	 * Complete the request queue in case the timedout request is
1286 	 * actually just pending.
1287 	 */
1288 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1289 	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1290 		return;
1291 
1292 	sc->vtscsi_stats.scsi_cmd_timeouts++;
1293 	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1294 
1295 	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1296 		return;
1297 
1298 	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1299 	vtscsi_reset_bus(sc);
1300 }
1301 
1302 static cam_status
1303 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1304 {
1305 	cam_status status;
1306 
1307 	switch (cmd_resp->response) {
1308 	case VIRTIO_SCSI_S_OK:
1309 		status = CAM_REQ_CMP;
1310 		break;
1311 	case VIRTIO_SCSI_S_OVERRUN:
1312 		status = CAM_DATA_RUN_ERR;
1313 		break;
1314 	case VIRTIO_SCSI_S_ABORTED:
1315 		status = CAM_REQ_ABORTED;
1316 		break;
1317 	case VIRTIO_SCSI_S_BAD_TARGET:
1318 		status = CAM_SEL_TIMEOUT;
1319 		break;
1320 	case VIRTIO_SCSI_S_RESET:
1321 		status = CAM_SCSI_BUS_RESET;
1322 		break;
1323 	case VIRTIO_SCSI_S_BUSY:
1324 		status = CAM_SCSI_BUSY;
1325 		break;
1326 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1327 	case VIRTIO_SCSI_S_TARGET_FAILURE:
1328 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1329 		status = CAM_SCSI_IT_NEXUS_LOST;
1330 		break;
1331 	default: /* VIRTIO_SCSI_S_FAILURE */
1332 		status = CAM_REQ_CMP_ERR;
1333 		break;
1334 	}
1335 
1336 	return (status);
1337 }
1338 
1339 static cam_status
1340 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1341     struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1342 {
1343 	uint32_t resp_sense_length;
1344 	cam_status status;
1345 
1346 	csio->scsi_status = cmd_resp->status;
1347 	csio->resid = vtscsi_htog32(sc, cmd_resp->resid);
1348 
1349 	if (csio->scsi_status == SCSI_STATUS_OK)
1350 		status = CAM_REQ_CMP;
1351 	else
1352 		status = CAM_SCSI_STATUS_ERROR;
1353 
1354 	resp_sense_length = vtscsi_htog32(sc, cmd_resp->sense_len);
1355 
1356 	if (resp_sense_length > 0) {
1357 		status |= CAM_AUTOSNS_VALID;
1358 
1359 		if (resp_sense_length < csio->sense_len)
1360 			csio->sense_resid = csio->sense_len - resp_sense_length;
1361 		else
1362 			csio->sense_resid = 0;
1363 
1364 		memcpy(&csio->sense_data, cmd_resp->sense,
1365 		    csio->sense_len - csio->sense_resid);
1366 	}
1367 
1368 	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1369 	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1370 	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1371 
1372 	return (status);
1373 }
1374 
1375 static void
1376 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1377 {
1378 	struct ccb_hdr *ccbh;
1379 	struct ccb_scsiio *csio;
1380 	struct virtio_scsi_cmd_resp *cmd_resp;
1381 	cam_status status;
1382 
1383 	csio = &req->vsr_ccb->csio;
1384 	ccbh = &csio->ccb_h;
1385 	cmd_resp = &req->vsr_cmd_resp;
1386 
1387 	KASSERT(ccbh->ccbh_vtscsi_req == req,
1388 	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1389 
1390 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1391 		callout_stop(&req->vsr_callout);
1392 
1393 	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1394 	if (status == CAM_REQ_ABORTED) {
1395 		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1396 			status = CAM_CMD_TIMEOUT;
1397 	} else if (status == CAM_REQ_CMP)
1398 		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1399 
1400 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1401 		status |= CAM_DEV_QFRZN;
1402 		xpt_freeze_devq(ccbh->path, 1);
1403 	}
1404 
1405 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1406 		status |= CAM_RELEASE_SIMQ;
1407 
1408 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1409 	    req, ccbh, status);
1410 
1411 	ccbh->status = status;
1412 	xpt_done(req->vsr_ccb);
1413 	vtscsi_enqueue_request(sc, req);
1414 }
1415 
1416 static void
1417 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1418 {
1419 
1420 	/* XXX We probably shouldn't poll forever. */
1421 	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1422 	do
1423 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1424 	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1425 
1426 	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1427 }
1428 
1429 static int
1430 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1431     struct sglist *sg, int readable, int writable, int flag)
1432 {
1433 	struct virtqueue *vq;
1434 	int error;
1435 
1436 	vq = sc->vtscsi_control_vq;
1437 
1438 	MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1439 
1440 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1441 	if (error) {
1442 		/*
1443 		 * Return EAGAIN when the virtqueue does not have enough
1444 		 * descriptors available.
1445 		 */
1446 		if (error == ENOSPC || error == EMSGSIZE)
1447 			error = EAGAIN;
1448 
1449 		return (error);
1450 	}
1451 
1452 	virtqueue_notify(vq);
1453 	if (flag == VTSCSI_EXECUTE_POLL)
1454 		vtscsi_poll_ctrl_req(sc, req);
1455 
1456 	return (0);
1457 }
1458 
1459 static void
1460 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1461     struct vtscsi_request *req)
1462 {
1463 	union ccb *ccb;
1464 	struct ccb_hdr *ccbh;
1465 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1466 
1467 	ccb = req->vsr_ccb;
1468 	ccbh = &ccb->ccb_h;
1469 	tmf_resp = &req->vsr_tmf_resp;
1470 
1471 	switch (tmf_resp->response) {
1472 	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1473 		ccbh->status = CAM_REQ_CMP;
1474 		break;
1475 	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1476 		ccbh->status = CAM_UA_ABORT;
1477 		break;
1478 	default:
1479 		ccbh->status = CAM_REQ_CMP_ERR;
1480 		break;
1481 	}
1482 
1483 	xpt_done(ccb);
1484 	vtscsi_enqueue_request(sc, req);
1485 }
1486 
1487 static int
1488 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1489     struct vtscsi_request *req)
1490 {
1491 	struct sglist *sg;
1492 	struct ccb_abort *cab;
1493 	struct ccb_hdr *ccbh;
1494 	struct ccb_hdr *abort_ccbh;
1495 	struct vtscsi_request *abort_req;
1496 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1497 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1498 	int error;
1499 
1500 	sg = sc->vtscsi_sglist;
1501 	cab = &req->vsr_ccb->cab;
1502 	ccbh = &cab->ccb_h;
1503 	tmf_req = &req->vsr_tmf_req;
1504 	tmf_resp = &req->vsr_tmf_resp;
1505 
1506 	/* CCB header and request that's to be aborted. */
1507 	abort_ccbh = &cab->abort_ccb->ccb_h;
1508 	abort_req = abort_ccbh->ccbh_vtscsi_req;
1509 
1510 	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1511 		error = EINVAL;
1512 		goto fail;
1513 	}
1514 
1515 	/* Only attempt to abort requests that could be in-flight. */
1516 	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1517 		error = EALREADY;
1518 		goto fail;
1519 	}
1520 
1521 	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1522 	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1523 		callout_stop(&abort_req->vsr_callout);
1524 
1525 	vtscsi_init_ctrl_tmf_req(sc, ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1526 	    (uintptr_t) abort_ccbh, tmf_req);
1527 
1528 	sglist_reset(sg);
1529 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1530 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1531 
1532 	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1533 	tmf_resp->response = -1;
1534 
1535 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1536 	    VTSCSI_EXECUTE_ASYNC);
1537 
1538 fail:
1539 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1540 	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1541 
1542 	return (error);
1543 }
1544 
1545 static void
1546 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1547     struct vtscsi_request *req)
1548 {
1549 	union ccb *ccb;
1550 	struct ccb_hdr *ccbh;
1551 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1552 
1553 	ccb = req->vsr_ccb;
1554 	ccbh = &ccb->ccb_h;
1555 	tmf_resp = &req->vsr_tmf_resp;
1556 
1557 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1558 	    req, ccb, tmf_resp->response);
1559 
1560 	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1561 		ccbh->status = CAM_REQ_CMP;
1562 		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1563 		    ccbh->target_lun);
1564 	} else
1565 		ccbh->status = CAM_REQ_CMP_ERR;
1566 
1567 	xpt_done(ccb);
1568 	vtscsi_enqueue_request(sc, req);
1569 }
1570 
1571 static int
1572 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1573     struct vtscsi_request *req)
1574 {
1575 	struct sglist *sg;
1576 	struct ccb_resetdev *crd;
1577 	struct ccb_hdr *ccbh;
1578 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1579 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1580 	uint32_t subtype;
1581 	int error;
1582 
1583 	sg = sc->vtscsi_sglist;
1584 	crd = &req->vsr_ccb->crd;
1585 	ccbh = &crd->ccb_h;
1586 	tmf_req = &req->vsr_tmf_req;
1587 	tmf_resp = &req->vsr_tmf_resp;
1588 
1589 	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1590 		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1591 	else
1592 		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1593 
1594 	vtscsi_init_ctrl_tmf_req(sc, ccbh, subtype, 0, tmf_req);
1595 
1596 	sglist_reset(sg);
1597 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1598 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1599 
1600 	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1601 	tmf_resp->response = -1;
1602 
1603 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1604 	    VTSCSI_EXECUTE_ASYNC);
1605 
1606 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1607 	    error, req, ccbh);
1608 
1609 	return (error);
1610 }
1611 
1612 static void
1613 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1614 {
1615 
1616 	*target_id = lun[1];
1617 	*lun_id = (lun[2] << 8) | lun[3];
1618 }
1619 
1620 static void
1621 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1622 {
1623 
1624 	lun[0] = 1;
1625 	lun[1] = ccbh->target_id;
1626 	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1627 	lun[3] = ccbh->target_lun & 0xFF;
1628 }
1629 
1630 static void
1631 vtscsi_init_scsi_cmd_req(struct vtscsi_softc *sc, struct ccb_scsiio *csio,
1632     struct virtio_scsi_cmd_req *cmd_req)
1633 {
1634 	uint8_t attr;
1635 
1636 	switch (csio->tag_action) {
1637 	case MSG_HEAD_OF_Q_TAG:
1638 		attr = VIRTIO_SCSI_S_HEAD;
1639 		break;
1640 	case MSG_ORDERED_Q_TAG:
1641 		attr = VIRTIO_SCSI_S_ORDERED;
1642 		break;
1643 	case MSG_ACA_TASK:
1644 		attr = VIRTIO_SCSI_S_ACA;
1645 		break;
1646 	default: /* MSG_SIMPLE_Q_TAG */
1647 		attr = VIRTIO_SCSI_S_SIMPLE;
1648 		break;
1649 	}
1650 
1651 	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1652 	cmd_req->tag = vtscsi_gtoh64(sc, (uintptr_t) csio);
1653 	cmd_req->task_attr = attr;
1654 
1655 	memcpy(cmd_req->cdb,
1656 	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1657 	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1658 	    csio->cdb_len);
1659 }
1660 
1661 static void
1662 vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *sc, struct ccb_hdr *ccbh,
1663     uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1664 {
1665 
1666 	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1667 
1668 	tmf_req->type = vtscsi_gtoh32(sc, VIRTIO_SCSI_T_TMF);
1669 	tmf_req->subtype = vtscsi_gtoh32(sc, subtype);
1670 	tmf_req->tag = vtscsi_gtoh64(sc, tag);
1671 }
1672 
1673 static void
1674 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1675 {
1676 	int frozen;
1677 
1678 	frozen = sc->vtscsi_frozen;
1679 
1680 	if (reason & VTSCSI_REQUEST &&
1681 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1682 		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1683 
1684 	if (reason & VTSCSI_REQUEST_VQ &&
1685 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1686 		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1687 
1688 	/* Freeze the SIMQ if transitioned to frozen. */
1689 	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1690 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1691 		xpt_freeze_simq(sc->vtscsi_sim, 1);
1692 	}
1693 }
1694 
1695 static int
1696 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1697 {
1698 	int thawed;
1699 
1700 	if (sc->vtscsi_frozen == 0 || reason == 0)
1701 		return (0);
1702 
1703 	if (reason & VTSCSI_REQUEST &&
1704 	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1705 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1706 
1707 	if (reason & VTSCSI_REQUEST_VQ &&
1708 	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1709 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1710 
1711 	thawed = sc->vtscsi_frozen == 0;
1712 	if (thawed != 0)
1713 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1714 
1715 	return (thawed);
1716 }
1717 
1718 static void
1719 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1720     target_id_t target_id, lun_id_t lun_id)
1721 {
1722 	struct cam_path *path;
1723 
1724 	/* Use the wildcard path from our softc for bus announcements. */
1725 	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1726 		xpt_async(ac_code, sc->vtscsi_path, NULL);
1727 		return;
1728 	}
1729 
1730 	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1731 	    target_id, lun_id) != CAM_REQ_CMP) {
1732 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1733 		return;
1734 	}
1735 
1736 	xpt_async(ac_code, path, NULL);
1737 	xpt_free_path(path);
1738 }
1739 
1740 static void
1741 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1742     lun_id_t lun_id)
1743 {
1744 	union ccb *ccb;
1745 	cam_status status;
1746 
1747 	ccb = xpt_alloc_ccb_nowait();
1748 	if (ccb == NULL) {
1749 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1750 		return;
1751 	}
1752 
1753 	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1754 	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1755 	if (status != CAM_REQ_CMP) {
1756 		xpt_free_ccb(ccb);
1757 		return;
1758 	}
1759 
1760 	xpt_rescan(ccb);
1761 }
1762 
1763 static void
1764 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1765 {
1766 
1767 	vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1768 }
1769 
1770 static void
1771 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1772     struct virtio_scsi_event *event)
1773 {
1774 	target_id_t target_id;
1775 	lun_id_t lun_id;
1776 
1777 	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1778 
1779 	switch (event->reason) {
1780 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1781 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1782 		vtscsi_execute_rescan(sc, target_id, lun_id);
1783 		break;
1784 	default:
1785 		device_printf(sc->vtscsi_dev,
1786 		    "unhandled transport event reason: %d\n", event->reason);
1787 		break;
1788 	}
1789 }
1790 
1791 static void
1792 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1793 {
1794 	int error __diagused;
1795 
1796 	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1797 		switch (event->event) {
1798 		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1799 			vtscsi_transport_reset_event(sc, event);
1800 			break;
1801 		default:
1802 			device_printf(sc->vtscsi_dev,
1803 			    "unhandled event: %d\n", event->event);
1804 			break;
1805 		}
1806 	} else
1807 		vtscsi_execute_rescan_bus(sc);
1808 
1809 	/*
1810 	 * This should always be successful since the buffer
1811 	 * was just dequeued.
1812 	 */
1813 	error = vtscsi_enqueue_event_buf(sc, event);
1814 	KASSERT(error == 0,
1815 	    ("cannot requeue event buffer: %d", error));
1816 }
1817 
1818 static int
1819 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1820     struct virtio_scsi_event *event)
1821 {
1822 	struct sglist *sg;
1823 	struct virtqueue *vq;
1824 	int size, error;
1825 
1826 	sg = sc->vtscsi_sglist;
1827 	vq = sc->vtscsi_event_vq;
1828 	size = sc->vtscsi_event_buf_size;
1829 
1830 	bzero(event, size);
1831 
1832 	sglist_reset(sg);
1833 	error = sglist_append(sg, event, size);
1834 	if (error)
1835 		return (error);
1836 
1837 	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1838 	if (error)
1839 		return (error);
1840 
1841 	virtqueue_notify(vq);
1842 
1843 	return (0);
1844 }
1845 
1846 static int
1847 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1848 {
1849 	struct virtio_scsi_event *event;
1850 	int i, size, error;
1851 
1852 	/*
1853 	 * The first release of QEMU with VirtIO SCSI support would crash
1854 	 * when attempting to notify the event virtqueue. This was fixed
1855 	 * when hotplug support was added.
1856 	 */
1857 	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1858 		size = sc->vtscsi_event_buf_size;
1859 	else
1860 		size = 0;
1861 
1862 	if (size < sizeof(struct virtio_scsi_event))
1863 		return (0);
1864 
1865 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1866 		event = &sc->vtscsi_event_bufs[i];
1867 
1868 		error = vtscsi_enqueue_event_buf(sc, event);
1869 		if (error)
1870 			break;
1871 	}
1872 
1873 	/*
1874 	 * Even just one buffer is enough. Missed events are
1875 	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1876 	 */
1877 	if (i > 0)
1878 		error = 0;
1879 
1880 	return (error);
1881 }
1882 
1883 static void
1884 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1885 {
1886 	struct virtio_scsi_event *event;
1887 	int i, error;
1888 
1889 	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1890 	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1891 		return;
1892 
1893 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1894 		event = &sc->vtscsi_event_bufs[i];
1895 
1896 		error = vtscsi_enqueue_event_buf(sc, event);
1897 		if (error)
1898 			break;
1899 	}
1900 
1901 	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1902 }
1903 
1904 static void
1905 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1906 {
1907 	struct virtqueue *vq;
1908 	int last;
1909 
1910 	vq = sc->vtscsi_event_vq;
1911 	last = 0;
1912 
1913 	while (virtqueue_drain(vq, &last) != NULL)
1914 		;
1915 
1916 	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1917 }
1918 
1919 static void
1920 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1921 {
1922 
1923 	VTSCSI_LOCK_OWNED(sc);
1924 
1925 	if (sc->vtscsi_request_vq != NULL)
1926 		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1927 	if (sc->vtscsi_control_vq != NULL)
1928 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1929 }
1930 
1931 static void
1932 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1933 {
1934 
1935 	VTSCSI_LOCK(sc);
1936 	vtscsi_complete_vqs_locked(sc);
1937 	VTSCSI_UNLOCK(sc);
1938 }
1939 
1940 static void
1941 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1942 {
1943 	union ccb *ccb;
1944 	int detach;
1945 
1946 	ccb = req->vsr_ccb;
1947 
1948 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1949 
1950 	/*
1951 	 * The callout must be drained when detaching since the request is
1952 	 * about to be freed. The VTSCSI_MTX must not be held for this in
1953 	 * case the callout is pending because there is a deadlock potential.
1954 	 * Otherwise, the virtqueue is being drained because of a bus reset
1955 	 * so we only need to attempt to stop the callouts.
1956 	 */
1957 	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1958 	if (detach != 0)
1959 		VTSCSI_LOCK_NOTOWNED(sc);
1960 	else
1961 		VTSCSI_LOCK_OWNED(sc);
1962 
1963 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1964 		if (detach != 0)
1965 			callout_drain(&req->vsr_callout);
1966 		else
1967 			callout_stop(&req->vsr_callout);
1968 	}
1969 
1970 	if (ccb != NULL) {
1971 		if (detach != 0) {
1972 			VTSCSI_LOCK(sc);
1973 			ccb->ccb_h.status = CAM_NO_HBA;
1974 		} else
1975 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1976 		xpt_done(ccb);
1977 		if (detach != 0)
1978 			VTSCSI_UNLOCK(sc);
1979 	}
1980 
1981 	vtscsi_enqueue_request(sc, req);
1982 }
1983 
1984 static void
1985 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1986 {
1987 	struct vtscsi_request *req;
1988 	int last;
1989 
1990 	last = 0;
1991 
1992 	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1993 
1994 	while ((req = virtqueue_drain(vq, &last)) != NULL)
1995 		vtscsi_cancel_request(sc, req);
1996 
1997 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1998 }
1999 
2000 static void
2001 vtscsi_drain_vqs(struct vtscsi_softc *sc)
2002 {
2003 
2004 	if (sc->vtscsi_control_vq != NULL)
2005 		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
2006 	if (sc->vtscsi_request_vq != NULL)
2007 		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
2008 	if (sc->vtscsi_event_vq != NULL)
2009 		vtscsi_drain_event_vq(sc);
2010 }
2011 
2012 static void
2013 vtscsi_stop(struct vtscsi_softc *sc)
2014 {
2015 
2016 	vtscsi_disable_vqs_intr(sc);
2017 	virtio_stop(sc->vtscsi_dev);
2018 }
2019 
2020 static int
2021 vtscsi_reset_bus(struct vtscsi_softc *sc)
2022 {
2023 	int error;
2024 
2025 	VTSCSI_LOCK_OWNED(sc);
2026 
2027 	if (vtscsi_bus_reset_disable != 0) {
2028 		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
2029 		return (0);
2030 	}
2031 
2032 	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
2033 
2034 	/*
2035 	 * vtscsi_stop() will cause the in-flight requests to be canceled.
2036 	 * Those requests are then completed here so CAM will retry them
2037 	 * after the reset is complete.
2038 	 */
2039 	vtscsi_stop(sc);
2040 	vtscsi_complete_vqs_locked(sc);
2041 
2042 	/* Rid the virtqueues of any remaining requests. */
2043 	vtscsi_drain_vqs(sc);
2044 
2045 	/*
2046 	 * Any resource shortage that froze the SIMQ cannot persist across
2047 	 * a bus reset so ensure it gets thawed here.
2048 	 */
2049 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2050 		xpt_release_simq(sc->vtscsi_sim, 0);
2051 
2052 	error = vtscsi_reinit(sc);
2053 	if (error) {
2054 		device_printf(sc->vtscsi_dev,
2055 		    "reinitialization failed, stopping device...\n");
2056 		vtscsi_stop(sc);
2057 	} else
2058 		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2059 		    CAM_LUN_WILDCARD);
2060 
2061 	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2062 
2063 	return (error);
2064 }
2065 
2066 static void
2067 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2068 {
2069 
2070 #ifdef INVARIANTS
2071 	int req_nsegs, resp_nsegs;
2072 
2073 	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2074 	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2075 
2076 	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2077 	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2078 #endif
2079 
2080 	req->vsr_softc = sc;
2081 	callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2082 }
2083 
2084 static int
2085 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2086 {
2087 	struct vtscsi_request *req;
2088 	int i, nreqs;
2089 
2090 	/*
2091 	 * Commands destined for either the request or control queues come
2092 	 * from the same SIM queue. Use the size of the request virtqueue
2093 	 * as it (should) be much more frequently used. Some additional
2094 	 * requests are allocated for internal (TMF) use.
2095 	 */
2096 	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2097 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2098 		nreqs /= VTSCSI_MIN_SEGMENTS;
2099 	nreqs += VTSCSI_RESERVED_REQUESTS;
2100 
2101 	for (i = 0; i < nreqs; i++) {
2102 		req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2103 		    M_NOWAIT);
2104 		if (req == NULL)
2105 			return (ENOMEM);
2106 
2107 		vtscsi_init_request(sc, req);
2108 
2109 		sc->vtscsi_nrequests++;
2110 		vtscsi_enqueue_request(sc, req);
2111 	}
2112 
2113 	return (0);
2114 }
2115 
2116 static void
2117 vtscsi_free_requests(struct vtscsi_softc *sc)
2118 {
2119 	struct vtscsi_request *req;
2120 
2121 	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2122 		KASSERT(callout_active(&req->vsr_callout) == 0,
2123 		    ("request callout still active"));
2124 
2125 		sc->vtscsi_nrequests--;
2126 		free(req, M_DEVBUF);
2127 	}
2128 
2129 	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2130 	    sc->vtscsi_nrequests));
2131 }
2132 
2133 static void
2134 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2135 {
2136 
2137 	KASSERT(req->vsr_softc == sc,
2138 	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2139 
2140 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2141 
2142 	/* A request is available so the SIMQ could be released. */
2143 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2144 		xpt_release_simq(sc->vtscsi_sim, 1);
2145 
2146 	req->vsr_ccb = NULL;
2147 	req->vsr_complete = NULL;
2148 	req->vsr_ptr0 = NULL;
2149 	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2150 	req->vsr_flags = 0;
2151 
2152 	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2153 	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2154 
2155 	/*
2156 	 * We insert at the tail of the queue in order to make it
2157 	 * very unlikely a request will be reused if we race with
2158 	 * stopping its callout handler.
2159 	 */
2160 	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2161 }
2162 
2163 static struct vtscsi_request *
2164 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2165 {
2166 	struct vtscsi_request *req;
2167 
2168 	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2169 	if (req != NULL) {
2170 		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2171 		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2172 	} else
2173 		sc->vtscsi_stats.dequeue_no_requests++;
2174 
2175 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2176 
2177 	return (req);
2178 }
2179 
2180 static void
2181 vtscsi_complete_request(struct vtscsi_request *req)
2182 {
2183 
2184 	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2185 		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2186 
2187 	if (req->vsr_complete != NULL)
2188 		req->vsr_complete(req->vsr_softc, req);
2189 }
2190 
2191 static void
2192 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2193 {
2194 	struct vtscsi_request *req;
2195 
2196 	VTSCSI_LOCK_OWNED(sc);
2197 
2198 	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2199 		vtscsi_complete_request(req);
2200 }
2201 
2202 static void
2203 vtscsi_control_vq_intr(void *xsc)
2204 {
2205 	struct vtscsi_softc *sc;
2206 	struct virtqueue *vq;
2207 
2208 	sc = xsc;
2209 	vq = sc->vtscsi_control_vq;
2210 
2211 again:
2212 	VTSCSI_LOCK(sc);
2213 
2214 	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2215 
2216 	if (virtqueue_enable_intr(vq) != 0) {
2217 		virtqueue_disable_intr(vq);
2218 		VTSCSI_UNLOCK(sc);
2219 		goto again;
2220 	}
2221 
2222 	VTSCSI_UNLOCK(sc);
2223 }
2224 
2225 static void
2226 vtscsi_event_vq_intr(void *xsc)
2227 {
2228 	struct vtscsi_softc *sc;
2229 	struct virtqueue *vq;
2230 	struct virtio_scsi_event *event;
2231 
2232 	sc = xsc;
2233 	vq = sc->vtscsi_event_vq;
2234 
2235 again:
2236 	VTSCSI_LOCK(sc);
2237 
2238 	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2239 		vtscsi_handle_event(sc, event);
2240 
2241 	if (virtqueue_enable_intr(vq) != 0) {
2242 		virtqueue_disable_intr(vq);
2243 		VTSCSI_UNLOCK(sc);
2244 		goto again;
2245 	}
2246 
2247 	VTSCSI_UNLOCK(sc);
2248 }
2249 
2250 static void
2251 vtscsi_request_vq_intr(void *xsc)
2252 {
2253 	struct vtscsi_softc *sc;
2254 	struct virtqueue *vq;
2255 
2256 	sc = xsc;
2257 	vq = sc->vtscsi_request_vq;
2258 
2259 again:
2260 	VTSCSI_LOCK(sc);
2261 
2262 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2263 
2264 	if (virtqueue_enable_intr(vq) != 0) {
2265 		virtqueue_disable_intr(vq);
2266 		VTSCSI_UNLOCK(sc);
2267 		goto again;
2268 	}
2269 
2270 	VTSCSI_UNLOCK(sc);
2271 }
2272 
2273 static void
2274 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2275 {
2276 
2277 	virtqueue_disable_intr(sc->vtscsi_control_vq);
2278 	virtqueue_disable_intr(sc->vtscsi_event_vq);
2279 	virtqueue_disable_intr(sc->vtscsi_request_vq);
2280 }
2281 
2282 static void
2283 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2284 {
2285 
2286 	virtqueue_enable_intr(sc->vtscsi_control_vq);
2287 	virtqueue_enable_intr(sc->vtscsi_event_vq);
2288 	virtqueue_enable_intr(sc->vtscsi_request_vq);
2289 }
2290 
2291 static void
2292 vtscsi_get_tunables(struct vtscsi_softc *sc)
2293 {
2294 	char tmpstr[64];
2295 
2296 	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2297 
2298 	snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2299 	    device_get_unit(sc->vtscsi_dev));
2300 	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2301 }
2302 
2303 static void
2304 vtscsi_setup_sysctl(struct vtscsi_softc *sc)
2305 {
2306 	device_t dev;
2307 	struct vtscsi_statistics *stats;
2308         struct sysctl_ctx_list *ctx;
2309 	struct sysctl_oid *tree;
2310 	struct sysctl_oid_list *child;
2311 
2312 	dev = sc->vtscsi_dev;
2313 	stats = &sc->vtscsi_stats;
2314 	ctx = device_get_sysctl_ctx(dev);
2315 	tree = device_get_sysctl_tree(dev);
2316 	child = SYSCTL_CHILDREN(tree);
2317 
2318 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2319 	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2320 	    "Debug level");
2321 
2322 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2323 	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2324 	    "SCSI command timeouts");
2325 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2326 	    CTLFLAG_RD, &stats->dequeue_no_requests,
2327 	    "No available requests to dequeue");
2328 }
2329 
2330 static void
2331 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2332     const char *fmt, ...)
2333 {
2334 	struct vtscsi_softc *sc;
2335 	union ccb *ccb;
2336 	struct sbuf sb;
2337 	va_list ap;
2338 	char str[192];
2339 	char path_str[64];
2340 
2341 	if (req == NULL)
2342 		return;
2343 
2344 	sc = req->vsr_softc;
2345 	ccb = req->vsr_ccb;
2346 
2347 	va_start(ap, fmt);
2348 	sbuf_new(&sb, str, sizeof(str), 0);
2349 
2350 	if (ccb == NULL) {
2351 		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2352 		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2353 		    cam_sim_bus(sc->vtscsi_sim));
2354 	} else {
2355 		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2356 		sbuf_cat(&sb, path_str);
2357 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2358 			scsi_command_string(&ccb->csio, &sb);
2359 			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2360 		}
2361 	}
2362 
2363 	sbuf_vprintf(&sb, fmt, ap);
2364 	va_end(ap);
2365 
2366 	sbuf_finish(&sb);
2367 	printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2368 	    sbuf_data(&sb));
2369 }
2370