xref: /freebsd/sys/dev/virtio/scsi/virtio_scsi.c (revision 0a36787e4c1fa0cf77dcf83be0867178476e372b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /* Driver for VirtIO SCSI devices. */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sglist.h>
41 #include <sys/sysctl.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/callout.h>
45 #include <sys/queue.h>
46 #include <sys/sbuf.h>
47 
48 #include <machine/stdarg.h>
49 
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 #include <sys/bus.h>
53 #include <sys/rman.h>
54 
55 #include <cam/cam.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_xpt_sim.h>
60 #include <cam/cam_debug.h>
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63 
64 #include <dev/virtio/virtio.h>
65 #include <dev/virtio/virtqueue.h>
66 #include <dev/virtio/scsi/virtio_scsi.h>
67 #include <dev/virtio/scsi/virtio_scsivar.h>
68 
69 #include "virtio_if.h"
70 
71 static int	vtscsi_modevent(module_t, int, void *);
72 
73 static int	vtscsi_probe(device_t);
74 static int	vtscsi_attach(device_t);
75 static int	vtscsi_detach(device_t);
76 static int	vtscsi_suspend(device_t);
77 static int	vtscsi_resume(device_t);
78 
79 static int	vtscsi_negotiate_features(struct vtscsi_softc *);
80 static int	vtscsi_setup_features(struct vtscsi_softc *);
81 static void	vtscsi_read_config(struct vtscsi_softc *,
82 		    struct virtio_scsi_config *);
83 static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
84 static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
85 static void	vtscsi_check_sizes(struct vtscsi_softc *);
86 static void	vtscsi_write_device_config(struct vtscsi_softc *);
87 static int	vtscsi_reinit(struct vtscsi_softc *);
88 
89 static int	vtscsi_alloc_cam(struct vtscsi_softc *);
90 static int	vtscsi_register_cam(struct vtscsi_softc *);
91 static void	vtscsi_free_cam(struct vtscsi_softc *);
92 static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
93 static int	vtscsi_register_async(struct vtscsi_softc *);
94 static void	vtscsi_deregister_async(struct vtscsi_softc *);
95 static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
96 static void	vtscsi_cam_poll(struct cam_sim *);
97 
98 static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
99 		    union ccb *);
100 static void	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
101 		    union ccb *);
102 static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
103 static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
104 static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
105 static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
106 		    struct cam_sim *, union ccb *);
107 
108 static int	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
109 		    struct sglist *, struct ccb_scsiio *);
110 static int	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
111 		    struct vtscsi_request *, int *, int *);
112 static int	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
113 		    struct vtscsi_request *);
114 static int	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
115 static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
116 		    struct vtscsi_request *);
117 static int	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
118 		    struct vtscsi_request *);
119 static void	vtscsi_timedout_scsi_cmd(void *);
120 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
121 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
122 		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
123 static void	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
124 		    struct vtscsi_request *);
125 
126 static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
127 		    struct vtscsi_request *);
128 static int	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
129 		    struct vtscsi_request *, struct sglist *, int, int, int);
130 static void	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
131 		    struct vtscsi_request *);
132 static int	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
133 		    struct vtscsi_request *);
134 static int	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
135 		    struct vtscsi_request *);
136 
137 static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
138 static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
139 static void	vtscsi_init_scsi_cmd_req(struct vtscsi_softc *,
140 		    struct ccb_scsiio *, struct virtio_scsi_cmd_req *);
141 static void	vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *, struct ccb_hdr *,
142 		    uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
143 
144 static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
145 static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
146 
147 static void	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
148 		    lun_id_t);
149 static void	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
150 		    lun_id_t);
151 static void	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
152 
153 static void	vtscsi_handle_event(struct vtscsi_softc *,
154 		    struct virtio_scsi_event *);
155 static int	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
156 		    struct virtio_scsi_event *);
157 static int	vtscsi_init_event_vq(struct vtscsi_softc *);
158 static void	vtscsi_reinit_event_vq(struct vtscsi_softc *);
159 static void	vtscsi_drain_event_vq(struct vtscsi_softc *);
160 
161 static void	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
162 static void	vtscsi_complete_vqs(struct vtscsi_softc *);
163 static void	vtscsi_drain_vqs(struct vtscsi_softc *);
164 static void	vtscsi_cancel_request(struct vtscsi_softc *,
165 		    struct vtscsi_request *);
166 static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
167 static void	vtscsi_stop(struct vtscsi_softc *);
168 static int	vtscsi_reset_bus(struct vtscsi_softc *);
169 
170 static void	vtscsi_init_request(struct vtscsi_softc *,
171 		    struct vtscsi_request *);
172 static int	vtscsi_alloc_requests(struct vtscsi_softc *);
173 static void	vtscsi_free_requests(struct vtscsi_softc *);
174 static void	vtscsi_enqueue_request(struct vtscsi_softc *,
175 		    struct vtscsi_request *);
176 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
177 
178 static void	vtscsi_complete_request(struct vtscsi_request *);
179 static void	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
180 
181 static void	vtscsi_control_vq_intr(void *);
182 static void	vtscsi_event_vq_intr(void *);
183 static void	vtscsi_request_vq_intr(void *);
184 static void	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
185 static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
186 
187 static void	vtscsi_get_tunables(struct vtscsi_softc *);
188 static void	vtscsi_setup_sysctl(struct vtscsi_softc *);
189 
190 static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
191 		    const char *, ...);
192 
193 #define vtscsi_modern(_sc) (((_sc)->vtscsi_features & VIRTIO_F_VERSION_1) != 0)
194 #define vtscsi_htog16(_sc, _val)	virtio_htog16(vtscsi_modern(_sc), _val)
195 #define vtscsi_htog32(_sc, _val)	virtio_htog32(vtscsi_modern(_sc), _val)
196 #define vtscsi_htog64(_sc, _val)	virtio_htog64(vtscsi_modern(_sc), _val)
197 #define vtscsi_gtoh16(_sc, _val)	virtio_gtoh16(vtscsi_modern(_sc), _val)
198 #define vtscsi_gtoh32(_sc, _val)	virtio_gtoh32(vtscsi_modern(_sc), _val)
199 #define vtscsi_gtoh64(_sc, _val)	virtio_gtoh64(vtscsi_modern(_sc), _val)
200 
201 /* Global tunables. */
202 /*
203  * The current QEMU VirtIO SCSI implementation does not cancel in-flight
204  * IO during virtio_stop(). So in-flight requests still complete after the
205  * device reset. We would have to wait for all the in-flight IO to complete,
206  * which defeats the typical purpose of a bus reset. We could simulate the
207  * bus reset with either I_T_NEXUS_RESET of all the targets, or with
208  * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
209  * control virtqueue). But this isn't very useful if things really go off
210  * the rails, so default to disabled for now.
211  */
212 static int vtscsi_bus_reset_disable = 1;
213 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
214 
215 static struct virtio_feature_desc vtscsi_feature_desc[] = {
216 	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
217 	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
218 	{ VIRTIO_SCSI_F_CHANGE,		"ChangeEvent"	},
219 	{ VIRTIO_SCSI_F_T10_PI, 	"T10PI"		},
220 
221 	{ 0, NULL }
222 };
223 
224 static device_method_t vtscsi_methods[] = {
225 	/* Device methods. */
226 	DEVMETHOD(device_probe,		vtscsi_probe),
227 	DEVMETHOD(device_attach,	vtscsi_attach),
228 	DEVMETHOD(device_detach,	vtscsi_detach),
229 	DEVMETHOD(device_suspend,	vtscsi_suspend),
230 	DEVMETHOD(device_resume,	vtscsi_resume),
231 
232 	DEVMETHOD_END
233 };
234 
235 static driver_t vtscsi_driver = {
236 	"vtscsi",
237 	vtscsi_methods,
238 	sizeof(struct vtscsi_softc)
239 };
240 static devclass_t vtscsi_devclass;
241 
242 VIRTIO_DRIVER_MODULE(virtio_scsi, vtscsi_driver, vtscsi_devclass,
243     vtscsi_modevent, 0);
244 MODULE_VERSION(virtio_scsi, 1);
245 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
246 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
247 
248 VIRTIO_SIMPLE_PNPINFO(virtio_scsi, VIRTIO_ID_SCSI, "VirtIO SCSI Adapter");
249 
250 static int
251 vtscsi_modevent(module_t mod, int type, void *unused)
252 {
253 	int error;
254 
255 	switch (type) {
256 	case MOD_LOAD:
257 	case MOD_QUIESCE:
258 	case MOD_UNLOAD:
259 	case MOD_SHUTDOWN:
260 		error = 0;
261 		break;
262 	default:
263 		error = EOPNOTSUPP;
264 		break;
265 	}
266 
267 	return (error);
268 }
269 
270 static int
271 vtscsi_probe(device_t dev)
272 {
273 	return (VIRTIO_SIMPLE_PROBE(dev, virtio_scsi));
274 }
275 
276 static int
277 vtscsi_attach(device_t dev)
278 {
279 	struct vtscsi_softc *sc;
280 	struct virtio_scsi_config scsicfg;
281 	int error;
282 
283 	sc = device_get_softc(dev);
284 	sc->vtscsi_dev = dev;
285 	virtio_set_feature_desc(dev, vtscsi_feature_desc);
286 
287 	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
288 	TAILQ_INIT(&sc->vtscsi_req_free);
289 
290 	vtscsi_get_tunables(sc);
291 	vtscsi_setup_sysctl(sc);
292 
293 	error = vtscsi_setup_features(sc);
294 	if (error) {
295 		device_printf(dev, "cannot setup features\n");
296 		goto fail;
297 	}
298 
299 	vtscsi_read_config(sc, &scsicfg);
300 
301 	sc->vtscsi_max_channel = scsicfg.max_channel;
302 	sc->vtscsi_max_target = scsicfg.max_target;
303 	sc->vtscsi_max_lun = scsicfg.max_lun;
304 	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
305 
306 	vtscsi_write_device_config(sc);
307 
308 	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
309 	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
310 	if (sc->vtscsi_sglist == NULL) {
311 		error = ENOMEM;
312 		device_printf(dev, "cannot allocate sglist\n");
313 		goto fail;
314 	}
315 
316 	error = vtscsi_alloc_virtqueues(sc);
317 	if (error) {
318 		device_printf(dev, "cannot allocate virtqueues\n");
319 		goto fail;
320 	}
321 
322 	vtscsi_check_sizes(sc);
323 
324 	error = vtscsi_init_event_vq(sc);
325 	if (error) {
326 		device_printf(dev, "cannot populate the eventvq\n");
327 		goto fail;
328 	}
329 
330 	error = vtscsi_alloc_requests(sc);
331 	if (error) {
332 		device_printf(dev, "cannot allocate requests\n");
333 		goto fail;
334 	}
335 
336 	error = vtscsi_alloc_cam(sc);
337 	if (error) {
338 		device_printf(dev, "cannot allocate CAM structures\n");
339 		goto fail;
340 	}
341 
342 	error = virtio_setup_intr(dev, INTR_TYPE_CAM);
343 	if (error) {
344 		device_printf(dev, "cannot setup virtqueue interrupts\n");
345 		goto fail;
346 	}
347 
348 	vtscsi_enable_vqs_intr(sc);
349 
350 	/*
351 	 * Register with CAM after interrupts are enabled so we will get
352 	 * notified of the probe responses.
353 	 */
354 	error = vtscsi_register_cam(sc);
355 	if (error) {
356 		device_printf(dev, "cannot register with CAM\n");
357 		goto fail;
358 	}
359 
360 fail:
361 	if (error)
362 		vtscsi_detach(dev);
363 
364 	return (error);
365 }
366 
367 static int
368 vtscsi_detach(device_t dev)
369 {
370 	struct vtscsi_softc *sc;
371 
372 	sc = device_get_softc(dev);
373 
374 	VTSCSI_LOCK(sc);
375 	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
376 	if (device_is_attached(dev))
377 		vtscsi_stop(sc);
378 	VTSCSI_UNLOCK(sc);
379 
380 	vtscsi_complete_vqs(sc);
381 	vtscsi_drain_vqs(sc);
382 
383 	vtscsi_free_cam(sc);
384 	vtscsi_free_requests(sc);
385 
386 	if (sc->vtscsi_sglist != NULL) {
387 		sglist_free(sc->vtscsi_sglist);
388 		sc->vtscsi_sglist = NULL;
389 	}
390 
391 	VTSCSI_LOCK_DESTROY(sc);
392 
393 	return (0);
394 }
395 
396 static int
397 vtscsi_suspend(device_t dev)
398 {
399 
400 	return (0);
401 }
402 
403 static int
404 vtscsi_resume(device_t dev)
405 {
406 
407 	return (0);
408 }
409 
410 static int
411 vtscsi_negotiate_features(struct vtscsi_softc *sc)
412 {
413 	device_t dev;
414 	uint64_t features;
415 
416 	dev = sc->vtscsi_dev;
417 	features = VTSCSI_FEATURES;
418 
419 	sc->vtscsi_features = virtio_negotiate_features(dev, features);
420 	return (virtio_finalize_features(dev));
421 }
422 
423 static int
424 vtscsi_setup_features(struct vtscsi_softc *sc)
425 {
426 	device_t dev;
427 	int error;
428 
429 	dev = sc->vtscsi_dev;
430 
431 	error = vtscsi_negotiate_features(sc);
432 	if (error)
433 		return (error);
434 
435 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
436 		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
437 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
438 		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
439 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
440 		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
441 
442 	return (0);
443 }
444 
445 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
446 	virtio_read_device_config(_dev,				\
447 	    offsetof(struct virtio_scsi_config, _field),	\
448 	    &(_cfg)->_field, sizeof((_cfg)->_field))		\
449 
450 static void
451 vtscsi_read_config(struct vtscsi_softc *sc,
452     struct virtio_scsi_config *scsicfg)
453 {
454 	device_t dev;
455 
456 	dev = sc->vtscsi_dev;
457 
458 	bzero(scsicfg, sizeof(struct virtio_scsi_config));
459 
460 	VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
461 	VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
462 	VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
463 	VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
464 	VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
465 	VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
466 	VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
467 	VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
468 	VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
469 	VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
470 }
471 
472 #undef VTSCSI_GET_CONFIG
473 
474 static int
475 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
476 {
477 	int nsegs;
478 
479 	nsegs = VTSCSI_MIN_SEGMENTS;
480 
481 	if (seg_max > 0) {
482 		nsegs += MIN(seg_max, maxphys / PAGE_SIZE + 1);
483 		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
484 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
485 	} else
486 		nsegs += 1;
487 
488 	return (nsegs);
489 }
490 
491 static int
492 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
493 {
494 	device_t dev;
495 	struct vq_alloc_info vq_info[3];
496 	int nvqs;
497 
498 	dev = sc->vtscsi_dev;
499 	nvqs = 3;
500 
501 	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
502 	    &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
503 
504 	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
505 	    &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
506 
507 	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
508 	    vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
509 	    "%s request", device_get_nameunit(dev));
510 
511 	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
512 }
513 
514 static void
515 vtscsi_check_sizes(struct vtscsi_softc *sc)
516 {
517 	int rqsize;
518 
519 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) {
520 		/*
521 		 * Ensure the assertions in virtqueue_enqueue(),
522 		 * even if the hypervisor reports a bad seg_max.
523 		 */
524 		rqsize = virtqueue_size(sc->vtscsi_request_vq);
525 		if (sc->vtscsi_max_nsegs > rqsize) {
526 			device_printf(sc->vtscsi_dev,
527 			    "clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs,
528 			    rqsize);
529 			sc->vtscsi_max_nsegs = rqsize;
530 		}
531 	}
532 }
533 
534 static void
535 vtscsi_write_device_config(struct vtscsi_softc *sc)
536 {
537 
538 	virtio_write_dev_config_4(sc->vtscsi_dev,
539 	    offsetof(struct virtio_scsi_config, sense_size),
540 	    VIRTIO_SCSI_SENSE_SIZE);
541 
542 	/*
543 	 * This is the size in the virtio_scsi_cmd_req structure. Note
544 	 * this value (32) is larger than the maximum CAM CDB size (16).
545 	 */
546 	virtio_write_dev_config_4(sc->vtscsi_dev,
547 	    offsetof(struct virtio_scsi_config, cdb_size),
548 	    VIRTIO_SCSI_CDB_SIZE);
549 }
550 
551 static int
552 vtscsi_reinit(struct vtscsi_softc *sc)
553 {
554 	device_t dev;
555 	int error;
556 
557 	dev = sc->vtscsi_dev;
558 
559 	error = virtio_reinit(dev, sc->vtscsi_features);
560 	if (error == 0) {
561 		vtscsi_write_device_config(sc);
562 		virtio_reinit_complete(dev);
563 		vtscsi_reinit_event_vq(sc);
564 
565 		vtscsi_enable_vqs_intr(sc);
566 	}
567 
568 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
569 
570 	return (error);
571 }
572 
573 static int
574 vtscsi_alloc_cam(struct vtscsi_softc *sc)
575 {
576 	device_t dev;
577 	struct cam_devq *devq;
578 	int openings;
579 
580 	dev = sc->vtscsi_dev;
581 	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
582 
583 	devq = cam_simq_alloc(openings);
584 	if (devq == NULL) {
585 		device_printf(dev, "cannot allocate SIM queue\n");
586 		return (ENOMEM);
587 	}
588 
589 	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
590 	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
591 	    openings, devq);
592 	if (sc->vtscsi_sim == NULL) {
593 		cam_simq_free(devq);
594 		device_printf(dev, "cannot allocate SIM\n");
595 		return (ENOMEM);
596 	}
597 
598 	return (0);
599 }
600 
601 static int
602 vtscsi_register_cam(struct vtscsi_softc *sc)
603 {
604 	device_t dev;
605 	int registered, error;
606 
607 	dev = sc->vtscsi_dev;
608 	registered = 0;
609 
610 	VTSCSI_LOCK(sc);
611 
612 	if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
613 		error = ENOMEM;
614 		device_printf(dev, "cannot register XPT bus\n");
615 		goto fail;
616 	}
617 
618 	registered = 1;
619 
620 	if (xpt_create_path(&sc->vtscsi_path, NULL,
621 	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
622 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
623 		error = ENOMEM;
624 		device_printf(dev, "cannot create bus path\n");
625 		goto fail;
626 	}
627 
628 	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
629 		error = EIO;
630 		device_printf(dev, "cannot register async callback\n");
631 		goto fail;
632 	}
633 
634 	VTSCSI_UNLOCK(sc);
635 
636 	return (0);
637 
638 fail:
639 	if (sc->vtscsi_path != NULL) {
640 		xpt_free_path(sc->vtscsi_path);
641 		sc->vtscsi_path = NULL;
642 	}
643 
644 	if (registered != 0)
645 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
646 
647 	VTSCSI_UNLOCK(sc);
648 
649 	return (error);
650 }
651 
652 static void
653 vtscsi_free_cam(struct vtscsi_softc *sc)
654 {
655 
656 	VTSCSI_LOCK(sc);
657 
658 	if (sc->vtscsi_path != NULL) {
659 		vtscsi_deregister_async(sc);
660 
661 		xpt_free_path(sc->vtscsi_path);
662 		sc->vtscsi_path = NULL;
663 
664 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
665 	}
666 
667 	if (sc->vtscsi_sim != NULL) {
668 		cam_sim_free(sc->vtscsi_sim, 1);
669 		sc->vtscsi_sim = NULL;
670 	}
671 
672 	VTSCSI_UNLOCK(sc);
673 }
674 
675 static void
676 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
677 {
678 	struct cam_sim *sim;
679 	struct vtscsi_softc *sc;
680 
681 	sim = cb_arg;
682 	sc = cam_sim_softc(sim);
683 
684 	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
685 
686 	/*
687 	 * TODO Once QEMU supports event reporting, we should
688 	 *      (un)subscribe to events here.
689 	 */
690 	switch (code) {
691 	case AC_FOUND_DEVICE:
692 		break;
693 	case AC_LOST_DEVICE:
694 		break;
695 	}
696 }
697 
698 static int
699 vtscsi_register_async(struct vtscsi_softc *sc)
700 {
701 	struct ccb_setasync csa;
702 
703 	memset(&csa, 0, sizeof(csa));
704 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
705 	csa.ccb_h.func_code = XPT_SASYNC_CB;
706 	csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
707 	csa.callback = vtscsi_cam_async;
708 	csa.callback_arg = sc->vtscsi_sim;
709 
710 	xpt_action((union ccb *) &csa);
711 
712 	return (csa.ccb_h.status);
713 }
714 
715 static void
716 vtscsi_deregister_async(struct vtscsi_softc *sc)
717 {
718 	struct ccb_setasync csa;
719 
720 	memset(&csa, 0, sizeof(csa));
721 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
722 	csa.ccb_h.func_code = XPT_SASYNC_CB;
723 	csa.event_enable = 0;
724 	csa.callback = vtscsi_cam_async;
725 	csa.callback_arg = sc->vtscsi_sim;
726 
727 	xpt_action((union ccb *) &csa);
728 }
729 
730 static void
731 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
732 {
733 	struct vtscsi_softc *sc;
734 	struct ccb_hdr *ccbh;
735 
736 	sc = cam_sim_softc(sim);
737 	ccbh = &ccb->ccb_h;
738 
739 	VTSCSI_LOCK_OWNED(sc);
740 
741 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
742 		/*
743 		 * The VTSCSI_MTX is briefly dropped between setting
744 		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
745 		 * drop any CCBs that come in during that window.
746 		 */
747 		ccbh->status = CAM_NO_HBA;
748 		xpt_done(ccb);
749 		return;
750 	}
751 
752 	switch (ccbh->func_code) {
753 	case XPT_SCSI_IO:
754 		vtscsi_cam_scsi_io(sc, sim, ccb);
755 		break;
756 
757 	case XPT_SET_TRAN_SETTINGS:
758 		ccbh->status = CAM_FUNC_NOTAVAIL;
759 		xpt_done(ccb);
760 		break;
761 
762 	case XPT_GET_TRAN_SETTINGS:
763 		vtscsi_cam_get_tran_settings(sc, ccb);
764 		break;
765 
766 	case XPT_RESET_BUS:
767 		vtscsi_cam_reset_bus(sc, ccb);
768 		break;
769 
770 	case XPT_RESET_DEV:
771 		vtscsi_cam_reset_dev(sc, ccb);
772 		break;
773 
774 	case XPT_ABORT:
775 		vtscsi_cam_abort(sc, ccb);
776 		break;
777 
778 	case XPT_CALC_GEOMETRY:
779 		cam_calc_geometry(&ccb->ccg, 1);
780 		xpt_done(ccb);
781 		break;
782 
783 	case XPT_PATH_INQ:
784 		vtscsi_cam_path_inquiry(sc, sim, ccb);
785 		break;
786 
787 	default:
788 		vtscsi_dprintf(sc, VTSCSI_ERROR,
789 		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
790 
791 		ccbh->status = CAM_REQ_INVALID;
792 		xpt_done(ccb);
793 		break;
794 	}
795 }
796 
797 static void
798 vtscsi_cam_poll(struct cam_sim *sim)
799 {
800 	struct vtscsi_softc *sc;
801 
802 	sc = cam_sim_softc(sim);
803 
804 	vtscsi_complete_vqs_locked(sc);
805 }
806 
807 static void
808 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
809     union ccb *ccb)
810 {
811 	struct ccb_hdr *ccbh;
812 	struct ccb_scsiio *csio;
813 	int error;
814 
815 	ccbh = &ccb->ccb_h;
816 	csio = &ccb->csio;
817 
818 	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
819 		error = EINVAL;
820 		ccbh->status = CAM_REQ_INVALID;
821 		goto done;
822 	}
823 
824 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
825 	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
826 		error = EINVAL;
827 		ccbh->status = CAM_REQ_INVALID;
828 		goto done;
829 	}
830 
831 	error = vtscsi_start_scsi_cmd(sc, ccb);
832 
833 done:
834 	if (error) {
835 		vtscsi_dprintf(sc, VTSCSI_ERROR,
836 		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
837 		xpt_done(ccb);
838 	}
839 }
840 
841 static void
842 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
843 {
844 	struct ccb_trans_settings *cts;
845 	struct ccb_trans_settings_scsi *scsi;
846 
847 	cts = &ccb->cts;
848 	scsi = &cts->proto_specific.scsi;
849 
850 	cts->protocol = PROTO_SCSI;
851 	cts->protocol_version = SCSI_REV_SPC3;
852 	cts->transport = XPORT_SAS;
853 	cts->transport_version = 0;
854 
855 	scsi->valid = CTS_SCSI_VALID_TQ;
856 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
857 
858 	ccb->ccb_h.status = CAM_REQ_CMP;
859 	xpt_done(ccb);
860 }
861 
862 static void
863 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
864 {
865 	int error;
866 
867 	error = vtscsi_reset_bus(sc);
868 	if (error == 0)
869 		ccb->ccb_h.status = CAM_REQ_CMP;
870 	else
871 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
872 
873 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
874 	    error, ccb, ccb->ccb_h.status);
875 
876 	xpt_done(ccb);
877 }
878 
879 static void
880 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
881 {
882 	struct ccb_hdr *ccbh;
883 	struct vtscsi_request *req;
884 	int error;
885 
886 	ccbh = &ccb->ccb_h;
887 
888 	req = vtscsi_dequeue_request(sc);
889 	if (req == NULL) {
890 		error = EAGAIN;
891 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
892 		goto fail;
893 	}
894 
895 	req->vsr_ccb = ccb;
896 
897 	error = vtscsi_execute_reset_dev_cmd(sc, req);
898 	if (error == 0)
899 		return;
900 
901 	vtscsi_enqueue_request(sc, req);
902 
903 fail:
904 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
905 	    error, req, ccb);
906 
907 	if (error == EAGAIN)
908 		ccbh->status = CAM_RESRC_UNAVAIL;
909 	else
910 		ccbh->status = CAM_REQ_CMP_ERR;
911 
912 	xpt_done(ccb);
913 }
914 
915 static void
916 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
917 {
918 	struct vtscsi_request *req;
919 	struct ccb_hdr *ccbh;
920 	int error;
921 
922 	ccbh = &ccb->ccb_h;
923 
924 	req = vtscsi_dequeue_request(sc);
925 	if (req == NULL) {
926 		error = EAGAIN;
927 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
928 		goto fail;
929 	}
930 
931 	req->vsr_ccb = ccb;
932 
933 	error = vtscsi_execute_abort_task_cmd(sc, req);
934 	if (error == 0)
935 		return;
936 
937 	vtscsi_enqueue_request(sc, req);
938 
939 fail:
940 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
941 	    error, req, ccb);
942 
943 	if (error == EAGAIN)
944 		ccbh->status = CAM_RESRC_UNAVAIL;
945 	else
946 		ccbh->status = CAM_REQ_CMP_ERR;
947 
948 	xpt_done(ccb);
949 }
950 
951 static void
952 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
953     union ccb *ccb)
954 {
955 	device_t dev;
956 	struct ccb_pathinq *cpi;
957 
958 	dev = sc->vtscsi_dev;
959 	cpi = &ccb->cpi;
960 
961 	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
962 
963 	cpi->version_num = 1;
964 	cpi->hba_inquiry = PI_TAG_ABLE;
965 	cpi->target_sprt = 0;
966 	cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
967 	if (vtscsi_bus_reset_disable != 0)
968 		cpi->hba_misc |= PIM_NOBUSRESET;
969 	cpi->hba_eng_cnt = 0;
970 
971 	cpi->max_target = sc->vtscsi_max_target;
972 	cpi->max_lun = sc->vtscsi_max_lun;
973 	cpi->initiator_id = cpi->max_target + 1;
974 
975 	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
976 	strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
977 	strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
978 
979 	cpi->unit_number = cam_sim_unit(sim);
980 	cpi->bus_id = cam_sim_bus(sim);
981 
982 	cpi->base_transfer_speed = 300000;
983 
984 	cpi->protocol = PROTO_SCSI;
985 	cpi->protocol_version = SCSI_REV_SPC3;
986 	cpi->transport = XPORT_SAS;
987 	cpi->transport_version = 0;
988 
989 	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
990 	    PAGE_SIZE;
991 
992 	cpi->hba_vendor = virtio_get_vendor(dev);
993 	cpi->hba_device = virtio_get_device(dev);
994 	cpi->hba_subvendor = virtio_get_subvendor(dev);
995 	cpi->hba_subdevice = virtio_get_subdevice(dev);
996 
997 	ccb->ccb_h.status = CAM_REQ_CMP;
998 	xpt_done(ccb);
999 }
1000 
1001 static int
1002 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
1003     struct ccb_scsiio *csio)
1004 {
1005 	struct ccb_hdr *ccbh;
1006 	struct bus_dma_segment *dseg;
1007 	int i, error;
1008 
1009 	ccbh = &csio->ccb_h;
1010 	error = 0;
1011 
1012 	switch ((ccbh->flags & CAM_DATA_MASK)) {
1013 	case CAM_DATA_VADDR:
1014 		error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
1015 		break;
1016 	case CAM_DATA_PADDR:
1017 		error = sglist_append_phys(sg,
1018 		    (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
1019 		break;
1020 	case CAM_DATA_SG:
1021 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
1022 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1023 			error = sglist_append(sg,
1024 			    (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
1025 		}
1026 		break;
1027 	case CAM_DATA_SG_PADDR:
1028 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
1029 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1030 			error = sglist_append_phys(sg,
1031 			    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
1032 		}
1033 		break;
1034 	case CAM_DATA_BIO:
1035 		error = sglist_append_bio(sg, (struct bio *) csio->data_ptr);
1036 		break;
1037 	default:
1038 		error = EINVAL;
1039 		break;
1040 	}
1041 
1042 	return (error);
1043 }
1044 
1045 static int
1046 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
1047     int *readable, int *writable)
1048 {
1049 	struct sglist *sg;
1050 	struct ccb_hdr *ccbh;
1051 	struct ccb_scsiio *csio;
1052 	struct virtio_scsi_cmd_req *cmd_req;
1053 	struct virtio_scsi_cmd_resp *cmd_resp;
1054 	int error;
1055 
1056 	sg = sc->vtscsi_sglist;
1057 	csio = &req->vsr_ccb->csio;
1058 	ccbh = &csio->ccb_h;
1059 	cmd_req = &req->vsr_cmd_req;
1060 	cmd_resp = &req->vsr_cmd_resp;
1061 
1062 	sglist_reset(sg);
1063 
1064 	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1065 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1066 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1067 		/* At least one segment must be left for the response. */
1068 		if (error || sg->sg_nseg == sg->sg_maxseg)
1069 			goto fail;
1070 	}
1071 
1072 	*readable = sg->sg_nseg;
1073 
1074 	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1075 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1076 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1077 		if (error)
1078 			goto fail;
1079 	}
1080 
1081 	*writable = sg->sg_nseg - *readable;
1082 
1083 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1084 	    "writable=%d\n", req, ccbh, *readable, *writable);
1085 
1086 	return (0);
1087 
1088 fail:
1089 	/*
1090 	 * This should never happen unless maxio was incorrectly set.
1091 	 */
1092 	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1093 
1094 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1095 	    "nseg=%d maxseg=%d\n",
1096 	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1097 
1098 	return (EFBIG);
1099 }
1100 
1101 static int
1102 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1103 {
1104 	struct sglist *sg;
1105 	struct virtqueue *vq;
1106 	struct ccb_scsiio *csio;
1107 	struct ccb_hdr *ccbh;
1108 	struct virtio_scsi_cmd_req *cmd_req;
1109 	struct virtio_scsi_cmd_resp *cmd_resp;
1110 	int readable, writable, error;
1111 
1112 	sg = sc->vtscsi_sglist;
1113 	vq = sc->vtscsi_request_vq;
1114 	csio = &req->vsr_ccb->csio;
1115 	ccbh = &csio->ccb_h;
1116 	cmd_req = &req->vsr_cmd_req;
1117 	cmd_resp = &req->vsr_cmd_resp;
1118 
1119 	vtscsi_init_scsi_cmd_req(sc, csio, cmd_req);
1120 
1121 	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1122 	if (error)
1123 		return (error);
1124 
1125 	req->vsr_complete = vtscsi_complete_scsi_cmd;
1126 	cmd_resp->response = -1;
1127 
1128 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1129 	if (error) {
1130 		vtscsi_dprintf(sc, VTSCSI_ERROR,
1131 		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1132 
1133 		ccbh->status = CAM_REQUEUE_REQ;
1134 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1135 		return (error);
1136 	}
1137 
1138 	ccbh->status |= CAM_SIM_QUEUED;
1139 	ccbh->ccbh_vtscsi_req = req;
1140 
1141 	virtqueue_notify(vq);
1142 
1143 	if (ccbh->timeout != CAM_TIME_INFINITY) {
1144 		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1145 		callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout,
1146 		    0, vtscsi_timedout_scsi_cmd, req, 0);
1147 	}
1148 
1149 	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1150 	    req, ccbh);
1151 
1152 	return (0);
1153 }
1154 
1155 static int
1156 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1157 {
1158 	struct vtscsi_request *req;
1159 	int error;
1160 
1161 	req = vtscsi_dequeue_request(sc);
1162 	if (req == NULL) {
1163 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1164 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1165 		return (ENOBUFS);
1166 	}
1167 
1168 	req->vsr_ccb = ccb;
1169 
1170 	error = vtscsi_execute_scsi_cmd(sc, req);
1171 	if (error)
1172 		vtscsi_enqueue_request(sc, req);
1173 
1174 	return (error);
1175 }
1176 
1177 static void
1178 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1179     struct vtscsi_request *req)
1180 {
1181 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1182 	struct vtscsi_request *to_req;
1183 	uint8_t response;
1184 
1185 	tmf_resp = &req->vsr_tmf_resp;
1186 	response = tmf_resp->response;
1187 	to_req = req->vsr_timedout_req;
1188 
1189 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1190 	    req, to_req, response);
1191 
1192 	vtscsi_enqueue_request(sc, req);
1193 
1194 	/*
1195 	 * The timedout request could have completed between when the
1196 	 * abort task was sent and when the host processed it.
1197 	 */
1198 	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1199 		return;
1200 
1201 	/* The timedout request was successfully aborted. */
1202 	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1203 		return;
1204 
1205 	/* Don't bother if the device is going away. */
1206 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1207 		return;
1208 
1209 	/* The timedout request will be aborted by the reset. */
1210 	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1211 		return;
1212 
1213 	vtscsi_reset_bus(sc);
1214 }
1215 
1216 static int
1217 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1218     struct vtscsi_request *to_req)
1219 {
1220 	struct sglist *sg;
1221 	struct ccb_hdr *to_ccbh;
1222 	struct vtscsi_request *req;
1223 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1224 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1225 	int error;
1226 
1227 	sg = sc->vtscsi_sglist;
1228 	to_ccbh = &to_req->vsr_ccb->ccb_h;
1229 
1230 	req = vtscsi_dequeue_request(sc);
1231 	if (req == NULL) {
1232 		error = ENOBUFS;
1233 		goto fail;
1234 	}
1235 
1236 	tmf_req = &req->vsr_tmf_req;
1237 	tmf_resp = &req->vsr_tmf_resp;
1238 
1239 	vtscsi_init_ctrl_tmf_req(sc, to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1240 	    (uintptr_t) to_ccbh, tmf_req);
1241 
1242 	sglist_reset(sg);
1243 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1244 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1245 
1246 	req->vsr_timedout_req = to_req;
1247 	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1248 	tmf_resp->response = -1;
1249 
1250 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1251 	    VTSCSI_EXECUTE_ASYNC);
1252 	if (error == 0)
1253 		return (0);
1254 
1255 	vtscsi_enqueue_request(sc, req);
1256 
1257 fail:
1258 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1259 	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1260 
1261 	return (error);
1262 }
1263 
1264 static void
1265 vtscsi_timedout_scsi_cmd(void *xreq)
1266 {
1267 	struct vtscsi_softc *sc;
1268 	struct vtscsi_request *to_req;
1269 
1270 	to_req = xreq;
1271 	sc = to_req->vsr_softc;
1272 
1273 	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1274 	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1275 
1276 	/* Don't bother if the device is going away. */
1277 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1278 		return;
1279 
1280 	/*
1281 	 * Bail if the request is not in use. We likely raced when
1282 	 * stopping the callout handler or it has already been aborted.
1283 	 */
1284 	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1285 	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1286 		return;
1287 
1288 	/*
1289 	 * Complete the request queue in case the timedout request is
1290 	 * actually just pending.
1291 	 */
1292 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1293 	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1294 		return;
1295 
1296 	sc->vtscsi_stats.scsi_cmd_timeouts++;
1297 	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1298 
1299 	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1300 		return;
1301 
1302 	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1303 	vtscsi_reset_bus(sc);
1304 }
1305 
1306 static cam_status
1307 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1308 {
1309 	cam_status status;
1310 
1311 	switch (cmd_resp->response) {
1312 	case VIRTIO_SCSI_S_OK:
1313 		status = CAM_REQ_CMP;
1314 		break;
1315 	case VIRTIO_SCSI_S_OVERRUN:
1316 		status = CAM_DATA_RUN_ERR;
1317 		break;
1318 	case VIRTIO_SCSI_S_ABORTED:
1319 		status = CAM_REQ_ABORTED;
1320 		break;
1321 	case VIRTIO_SCSI_S_BAD_TARGET:
1322 		status = CAM_SEL_TIMEOUT;
1323 		break;
1324 	case VIRTIO_SCSI_S_RESET:
1325 		status = CAM_SCSI_BUS_RESET;
1326 		break;
1327 	case VIRTIO_SCSI_S_BUSY:
1328 		status = CAM_SCSI_BUSY;
1329 		break;
1330 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1331 	case VIRTIO_SCSI_S_TARGET_FAILURE:
1332 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1333 		status = CAM_SCSI_IT_NEXUS_LOST;
1334 		break;
1335 	default: /* VIRTIO_SCSI_S_FAILURE */
1336 		status = CAM_REQ_CMP_ERR;
1337 		break;
1338 	}
1339 
1340 	return (status);
1341 }
1342 
1343 static cam_status
1344 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1345     struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1346 {
1347 	uint32_t resp_sense_length;
1348 	cam_status status;
1349 
1350 	csio->scsi_status = cmd_resp->status;
1351 	csio->resid = vtscsi_htog32(sc, cmd_resp->resid);
1352 
1353 	if (csio->scsi_status == SCSI_STATUS_OK)
1354 		status = CAM_REQ_CMP;
1355 	else
1356 		status = CAM_SCSI_STATUS_ERROR;
1357 
1358 	resp_sense_length = vtscsi_htog32(sc, cmd_resp->sense_len);
1359 
1360 	if (resp_sense_length > 0) {
1361 		status |= CAM_AUTOSNS_VALID;
1362 
1363 		if (resp_sense_length < csio->sense_len)
1364 			csio->sense_resid = csio->sense_len - resp_sense_length;
1365 		else
1366 			csio->sense_resid = 0;
1367 
1368 		memcpy(&csio->sense_data, cmd_resp->sense,
1369 		    csio->sense_len - csio->sense_resid);
1370 	}
1371 
1372 	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1373 	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1374 	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1375 
1376 	return (status);
1377 }
1378 
1379 static void
1380 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1381 {
1382 	struct ccb_hdr *ccbh;
1383 	struct ccb_scsiio *csio;
1384 	struct virtio_scsi_cmd_resp *cmd_resp;
1385 	cam_status status;
1386 
1387 	csio = &req->vsr_ccb->csio;
1388 	ccbh = &csio->ccb_h;
1389 	cmd_resp = &req->vsr_cmd_resp;
1390 
1391 	KASSERT(ccbh->ccbh_vtscsi_req == req,
1392 	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1393 
1394 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1395 		callout_stop(&req->vsr_callout);
1396 
1397 	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1398 	if (status == CAM_REQ_ABORTED) {
1399 		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1400 			status = CAM_CMD_TIMEOUT;
1401 	} else if (status == CAM_REQ_CMP)
1402 		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1403 
1404 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1405 		status |= CAM_DEV_QFRZN;
1406 		xpt_freeze_devq(ccbh->path, 1);
1407 	}
1408 
1409 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1410 		status |= CAM_RELEASE_SIMQ;
1411 
1412 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1413 	    req, ccbh, status);
1414 
1415 	ccbh->status = status;
1416 	xpt_done(req->vsr_ccb);
1417 	vtscsi_enqueue_request(sc, req);
1418 }
1419 
1420 static void
1421 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1422 {
1423 
1424 	/* XXX We probably shouldn't poll forever. */
1425 	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1426 	do
1427 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1428 	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1429 
1430 	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1431 }
1432 
1433 static int
1434 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1435     struct sglist *sg, int readable, int writable, int flag)
1436 {
1437 	struct virtqueue *vq;
1438 	int error;
1439 
1440 	vq = sc->vtscsi_control_vq;
1441 
1442 	MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1443 
1444 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1445 	if (error) {
1446 		/*
1447 		 * Return EAGAIN when the virtqueue does not have enough
1448 		 * descriptors available.
1449 		 */
1450 		if (error == ENOSPC || error == EMSGSIZE)
1451 			error = EAGAIN;
1452 
1453 		return (error);
1454 	}
1455 
1456 	virtqueue_notify(vq);
1457 	if (flag == VTSCSI_EXECUTE_POLL)
1458 		vtscsi_poll_ctrl_req(sc, req);
1459 
1460 	return (0);
1461 }
1462 
1463 static void
1464 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1465     struct vtscsi_request *req)
1466 {
1467 	union ccb *ccb;
1468 	struct ccb_hdr *ccbh;
1469 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1470 
1471 	ccb = req->vsr_ccb;
1472 	ccbh = &ccb->ccb_h;
1473 	tmf_resp = &req->vsr_tmf_resp;
1474 
1475 	switch (tmf_resp->response) {
1476 	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1477 		ccbh->status = CAM_REQ_CMP;
1478 		break;
1479 	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1480 		ccbh->status = CAM_UA_ABORT;
1481 		break;
1482 	default:
1483 		ccbh->status = CAM_REQ_CMP_ERR;
1484 		break;
1485 	}
1486 
1487 	xpt_done(ccb);
1488 	vtscsi_enqueue_request(sc, req);
1489 }
1490 
1491 static int
1492 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1493     struct vtscsi_request *req)
1494 {
1495 	struct sglist *sg;
1496 	struct ccb_abort *cab;
1497 	struct ccb_hdr *ccbh;
1498 	struct ccb_hdr *abort_ccbh;
1499 	struct vtscsi_request *abort_req;
1500 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1501 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1502 	int error;
1503 
1504 	sg = sc->vtscsi_sglist;
1505 	cab = &req->vsr_ccb->cab;
1506 	ccbh = &cab->ccb_h;
1507 	tmf_req = &req->vsr_tmf_req;
1508 	tmf_resp = &req->vsr_tmf_resp;
1509 
1510 	/* CCB header and request that's to be aborted. */
1511 	abort_ccbh = &cab->abort_ccb->ccb_h;
1512 	abort_req = abort_ccbh->ccbh_vtscsi_req;
1513 
1514 	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1515 		error = EINVAL;
1516 		goto fail;
1517 	}
1518 
1519 	/* Only attempt to abort requests that could be in-flight. */
1520 	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1521 		error = EALREADY;
1522 		goto fail;
1523 	}
1524 
1525 	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1526 	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1527 		callout_stop(&abort_req->vsr_callout);
1528 
1529 	vtscsi_init_ctrl_tmf_req(sc, ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1530 	    (uintptr_t) abort_ccbh, tmf_req);
1531 
1532 	sglist_reset(sg);
1533 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1534 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1535 
1536 	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1537 	tmf_resp->response = -1;
1538 
1539 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1540 	    VTSCSI_EXECUTE_ASYNC);
1541 
1542 fail:
1543 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1544 	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1545 
1546 	return (error);
1547 }
1548 
1549 static void
1550 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1551     struct vtscsi_request *req)
1552 {
1553 	union ccb *ccb;
1554 	struct ccb_hdr *ccbh;
1555 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1556 
1557 	ccb = req->vsr_ccb;
1558 	ccbh = &ccb->ccb_h;
1559 	tmf_resp = &req->vsr_tmf_resp;
1560 
1561 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1562 	    req, ccb, tmf_resp->response);
1563 
1564 	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1565 		ccbh->status = CAM_REQ_CMP;
1566 		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1567 		    ccbh->target_lun);
1568 	} else
1569 		ccbh->status = CAM_REQ_CMP_ERR;
1570 
1571 	xpt_done(ccb);
1572 	vtscsi_enqueue_request(sc, req);
1573 }
1574 
1575 static int
1576 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1577     struct vtscsi_request *req)
1578 {
1579 	struct sglist *sg;
1580 	struct ccb_resetdev *crd;
1581 	struct ccb_hdr *ccbh;
1582 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1583 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1584 	uint32_t subtype;
1585 	int error;
1586 
1587 	sg = sc->vtscsi_sglist;
1588 	crd = &req->vsr_ccb->crd;
1589 	ccbh = &crd->ccb_h;
1590 	tmf_req = &req->vsr_tmf_req;
1591 	tmf_resp = &req->vsr_tmf_resp;
1592 
1593 	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1594 		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1595 	else
1596 		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1597 
1598 	vtscsi_init_ctrl_tmf_req(sc, ccbh, subtype, 0, tmf_req);
1599 
1600 	sglist_reset(sg);
1601 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1602 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1603 
1604 	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1605 	tmf_resp->response = -1;
1606 
1607 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1608 	    VTSCSI_EXECUTE_ASYNC);
1609 
1610 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1611 	    error, req, ccbh);
1612 
1613 	return (error);
1614 }
1615 
1616 static void
1617 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1618 {
1619 
1620 	*target_id = lun[1];
1621 	*lun_id = (lun[2] << 8) | lun[3];
1622 }
1623 
1624 static void
1625 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1626 {
1627 
1628 	lun[0] = 1;
1629 	lun[1] = ccbh->target_id;
1630 	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1631 	lun[3] = ccbh->target_lun & 0xFF;
1632 }
1633 
1634 static void
1635 vtscsi_init_scsi_cmd_req(struct vtscsi_softc *sc, struct ccb_scsiio *csio,
1636     struct virtio_scsi_cmd_req *cmd_req)
1637 {
1638 	uint8_t attr;
1639 
1640 	switch (csio->tag_action) {
1641 	case MSG_HEAD_OF_Q_TAG:
1642 		attr = VIRTIO_SCSI_S_HEAD;
1643 		break;
1644 	case MSG_ORDERED_Q_TAG:
1645 		attr = VIRTIO_SCSI_S_ORDERED;
1646 		break;
1647 	case MSG_ACA_TASK:
1648 		attr = VIRTIO_SCSI_S_ACA;
1649 		break;
1650 	default: /* MSG_SIMPLE_Q_TAG */
1651 		attr = VIRTIO_SCSI_S_SIMPLE;
1652 		break;
1653 	}
1654 
1655 	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1656 	cmd_req->tag = vtscsi_gtoh64(sc, (uintptr_t) csio);
1657 	cmd_req->task_attr = attr;
1658 
1659 	memcpy(cmd_req->cdb,
1660 	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1661 	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1662 	    csio->cdb_len);
1663 }
1664 
1665 static void
1666 vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *sc, struct ccb_hdr *ccbh,
1667     uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1668 {
1669 
1670 	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1671 
1672 	tmf_req->type = vtscsi_gtoh32(sc, VIRTIO_SCSI_T_TMF);
1673 	tmf_req->subtype = vtscsi_gtoh32(sc, subtype);
1674 	tmf_req->tag = vtscsi_gtoh64(sc, tag);
1675 }
1676 
1677 static void
1678 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1679 {
1680 	int frozen;
1681 
1682 	frozen = sc->vtscsi_frozen;
1683 
1684 	if (reason & VTSCSI_REQUEST &&
1685 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1686 		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1687 
1688 	if (reason & VTSCSI_REQUEST_VQ &&
1689 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1690 		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1691 
1692 	/* Freeze the SIMQ if transitioned to frozen. */
1693 	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1694 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1695 		xpt_freeze_simq(sc->vtscsi_sim, 1);
1696 	}
1697 }
1698 
1699 static int
1700 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1701 {
1702 	int thawed;
1703 
1704 	if (sc->vtscsi_frozen == 0 || reason == 0)
1705 		return (0);
1706 
1707 	if (reason & VTSCSI_REQUEST &&
1708 	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1709 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1710 
1711 	if (reason & VTSCSI_REQUEST_VQ &&
1712 	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1713 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1714 
1715 	thawed = sc->vtscsi_frozen == 0;
1716 	if (thawed != 0)
1717 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1718 
1719 	return (thawed);
1720 }
1721 
1722 static void
1723 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1724     target_id_t target_id, lun_id_t lun_id)
1725 {
1726 	struct cam_path *path;
1727 
1728 	/* Use the wildcard path from our softc for bus announcements. */
1729 	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1730 		xpt_async(ac_code, sc->vtscsi_path, NULL);
1731 		return;
1732 	}
1733 
1734 	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1735 	    target_id, lun_id) != CAM_REQ_CMP) {
1736 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1737 		return;
1738 	}
1739 
1740 	xpt_async(ac_code, path, NULL);
1741 	xpt_free_path(path);
1742 }
1743 
1744 static void
1745 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1746     lun_id_t lun_id)
1747 {
1748 	union ccb *ccb;
1749 	cam_status status;
1750 
1751 	ccb = xpt_alloc_ccb_nowait();
1752 	if (ccb == NULL) {
1753 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1754 		return;
1755 	}
1756 
1757 	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1758 	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1759 	if (status != CAM_REQ_CMP) {
1760 		xpt_free_ccb(ccb);
1761 		return;
1762 	}
1763 
1764 	xpt_rescan(ccb);
1765 }
1766 
1767 static void
1768 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1769 {
1770 
1771 	vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1772 }
1773 
1774 static void
1775 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1776     struct virtio_scsi_event *event)
1777 {
1778 	target_id_t target_id;
1779 	lun_id_t lun_id;
1780 
1781 	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1782 
1783 	switch (event->reason) {
1784 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1785 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1786 		vtscsi_execute_rescan(sc, target_id, lun_id);
1787 		break;
1788 	default:
1789 		device_printf(sc->vtscsi_dev,
1790 		    "unhandled transport event reason: %d\n", event->reason);
1791 		break;
1792 	}
1793 }
1794 
1795 static void
1796 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1797 {
1798 	int error;
1799 
1800 	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1801 		switch (event->event) {
1802 		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1803 			vtscsi_transport_reset_event(sc, event);
1804 			break;
1805 		default:
1806 			device_printf(sc->vtscsi_dev,
1807 			    "unhandled event: %d\n", event->event);
1808 			break;
1809 		}
1810 	} else
1811 		vtscsi_execute_rescan_bus(sc);
1812 
1813 	/*
1814 	 * This should always be successful since the buffer
1815 	 * was just dequeued.
1816 	 */
1817 	error = vtscsi_enqueue_event_buf(sc, event);
1818 	KASSERT(error == 0,
1819 	    ("cannot requeue event buffer: %d", error));
1820 }
1821 
1822 static int
1823 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1824     struct virtio_scsi_event *event)
1825 {
1826 	struct sglist *sg;
1827 	struct virtqueue *vq;
1828 	int size, error;
1829 
1830 	sg = sc->vtscsi_sglist;
1831 	vq = sc->vtscsi_event_vq;
1832 	size = sc->vtscsi_event_buf_size;
1833 
1834 	bzero(event, size);
1835 
1836 	sglist_reset(sg);
1837 	error = sglist_append(sg, event, size);
1838 	if (error)
1839 		return (error);
1840 
1841 	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1842 	if (error)
1843 		return (error);
1844 
1845 	virtqueue_notify(vq);
1846 
1847 	return (0);
1848 }
1849 
1850 static int
1851 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1852 {
1853 	struct virtio_scsi_event *event;
1854 	int i, size, error;
1855 
1856 	/*
1857 	 * The first release of QEMU with VirtIO SCSI support would crash
1858 	 * when attempting to notify the event virtqueue. This was fixed
1859 	 * when hotplug support was added.
1860 	 */
1861 	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1862 		size = sc->vtscsi_event_buf_size;
1863 	else
1864 		size = 0;
1865 
1866 	if (size < sizeof(struct virtio_scsi_event))
1867 		return (0);
1868 
1869 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1870 		event = &sc->vtscsi_event_bufs[i];
1871 
1872 		error = vtscsi_enqueue_event_buf(sc, event);
1873 		if (error)
1874 			break;
1875 	}
1876 
1877 	/*
1878 	 * Even just one buffer is enough. Missed events are
1879 	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1880 	 */
1881 	if (i > 0)
1882 		error = 0;
1883 
1884 	return (error);
1885 }
1886 
1887 static void
1888 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1889 {
1890 	struct virtio_scsi_event *event;
1891 	int i, error;
1892 
1893 	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1894 	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1895 		return;
1896 
1897 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1898 		event = &sc->vtscsi_event_bufs[i];
1899 
1900 		error = vtscsi_enqueue_event_buf(sc, event);
1901 		if (error)
1902 			break;
1903 	}
1904 
1905 	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1906 }
1907 
1908 static void
1909 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1910 {
1911 	struct virtqueue *vq;
1912 	int last;
1913 
1914 	vq = sc->vtscsi_event_vq;
1915 	last = 0;
1916 
1917 	while (virtqueue_drain(vq, &last) != NULL)
1918 		;
1919 
1920 	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1921 }
1922 
1923 static void
1924 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1925 {
1926 
1927 	VTSCSI_LOCK_OWNED(sc);
1928 
1929 	if (sc->vtscsi_request_vq != NULL)
1930 		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1931 	if (sc->vtscsi_control_vq != NULL)
1932 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1933 }
1934 
1935 static void
1936 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1937 {
1938 
1939 	VTSCSI_LOCK(sc);
1940 	vtscsi_complete_vqs_locked(sc);
1941 	VTSCSI_UNLOCK(sc);
1942 }
1943 
1944 static void
1945 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1946 {
1947 	union ccb *ccb;
1948 	int detach;
1949 
1950 	ccb = req->vsr_ccb;
1951 
1952 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1953 
1954 	/*
1955 	 * The callout must be drained when detaching since the request is
1956 	 * about to be freed. The VTSCSI_MTX must not be held for this in
1957 	 * case the callout is pending because there is a deadlock potential.
1958 	 * Otherwise, the virtqueue is being drained because of a bus reset
1959 	 * so we only need to attempt to stop the callouts.
1960 	 */
1961 	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1962 	if (detach != 0)
1963 		VTSCSI_LOCK_NOTOWNED(sc);
1964 	else
1965 		VTSCSI_LOCK_OWNED(sc);
1966 
1967 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1968 		if (detach != 0)
1969 			callout_drain(&req->vsr_callout);
1970 		else
1971 			callout_stop(&req->vsr_callout);
1972 	}
1973 
1974 	if (ccb != NULL) {
1975 		if (detach != 0) {
1976 			VTSCSI_LOCK(sc);
1977 			ccb->ccb_h.status = CAM_NO_HBA;
1978 		} else
1979 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1980 		xpt_done(ccb);
1981 		if (detach != 0)
1982 			VTSCSI_UNLOCK(sc);
1983 	}
1984 
1985 	vtscsi_enqueue_request(sc, req);
1986 }
1987 
1988 static void
1989 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1990 {
1991 	struct vtscsi_request *req;
1992 	int last;
1993 
1994 	last = 0;
1995 
1996 	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1997 
1998 	while ((req = virtqueue_drain(vq, &last)) != NULL)
1999 		vtscsi_cancel_request(sc, req);
2000 
2001 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
2002 }
2003 
2004 static void
2005 vtscsi_drain_vqs(struct vtscsi_softc *sc)
2006 {
2007 
2008 	if (sc->vtscsi_control_vq != NULL)
2009 		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
2010 	if (sc->vtscsi_request_vq != NULL)
2011 		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
2012 	if (sc->vtscsi_event_vq != NULL)
2013 		vtscsi_drain_event_vq(sc);
2014 }
2015 
2016 static void
2017 vtscsi_stop(struct vtscsi_softc *sc)
2018 {
2019 
2020 	vtscsi_disable_vqs_intr(sc);
2021 	virtio_stop(sc->vtscsi_dev);
2022 }
2023 
2024 static int
2025 vtscsi_reset_bus(struct vtscsi_softc *sc)
2026 {
2027 	int error;
2028 
2029 	VTSCSI_LOCK_OWNED(sc);
2030 
2031 	if (vtscsi_bus_reset_disable != 0) {
2032 		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
2033 		return (0);
2034 	}
2035 
2036 	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
2037 
2038 	/*
2039 	 * vtscsi_stop() will cause the in-flight requests to be canceled.
2040 	 * Those requests are then completed here so CAM will retry them
2041 	 * after the reset is complete.
2042 	 */
2043 	vtscsi_stop(sc);
2044 	vtscsi_complete_vqs_locked(sc);
2045 
2046 	/* Rid the virtqueues of any remaining requests. */
2047 	vtscsi_drain_vqs(sc);
2048 
2049 	/*
2050 	 * Any resource shortage that froze the SIMQ cannot persist across
2051 	 * a bus reset so ensure it gets thawed here.
2052 	 */
2053 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2054 		xpt_release_simq(sc->vtscsi_sim, 0);
2055 
2056 	error = vtscsi_reinit(sc);
2057 	if (error) {
2058 		device_printf(sc->vtscsi_dev,
2059 		    "reinitialization failed, stopping device...\n");
2060 		vtscsi_stop(sc);
2061 	} else
2062 		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2063 		    CAM_LUN_WILDCARD);
2064 
2065 	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2066 
2067 	return (error);
2068 }
2069 
2070 static void
2071 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2072 {
2073 
2074 #ifdef INVARIANTS
2075 	int req_nsegs, resp_nsegs;
2076 
2077 	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2078 	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2079 
2080 	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2081 	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2082 #endif
2083 
2084 	req->vsr_softc = sc;
2085 	callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2086 }
2087 
2088 static int
2089 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2090 {
2091 	struct vtscsi_request *req;
2092 	int i, nreqs;
2093 
2094 	/*
2095 	 * Commands destined for either the request or control queues come
2096 	 * from the same SIM queue. Use the size of the request virtqueue
2097 	 * as it (should) be much more frequently used. Some additional
2098 	 * requests are allocated for internal (TMF) use.
2099 	 */
2100 	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2101 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2102 		nreqs /= VTSCSI_MIN_SEGMENTS;
2103 	nreqs += VTSCSI_RESERVED_REQUESTS;
2104 
2105 	for (i = 0; i < nreqs; i++) {
2106 		req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2107 		    M_NOWAIT);
2108 		if (req == NULL)
2109 			return (ENOMEM);
2110 
2111 		vtscsi_init_request(sc, req);
2112 
2113 		sc->vtscsi_nrequests++;
2114 		vtscsi_enqueue_request(sc, req);
2115 	}
2116 
2117 	return (0);
2118 }
2119 
2120 static void
2121 vtscsi_free_requests(struct vtscsi_softc *sc)
2122 {
2123 	struct vtscsi_request *req;
2124 
2125 	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2126 		KASSERT(callout_active(&req->vsr_callout) == 0,
2127 		    ("request callout still active"));
2128 
2129 		sc->vtscsi_nrequests--;
2130 		free(req, M_DEVBUF);
2131 	}
2132 
2133 	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2134 	    sc->vtscsi_nrequests));
2135 }
2136 
2137 static void
2138 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2139 {
2140 
2141 	KASSERT(req->vsr_softc == sc,
2142 	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2143 
2144 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2145 
2146 	/* A request is available so the SIMQ could be released. */
2147 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2148 		xpt_release_simq(sc->vtscsi_sim, 1);
2149 
2150 	req->vsr_ccb = NULL;
2151 	req->vsr_complete = NULL;
2152 	req->vsr_ptr0 = NULL;
2153 	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2154 	req->vsr_flags = 0;
2155 
2156 	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2157 	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2158 
2159 	/*
2160 	 * We insert at the tail of the queue in order to make it
2161 	 * very unlikely a request will be reused if we race with
2162 	 * stopping its callout handler.
2163 	 */
2164 	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2165 }
2166 
2167 static struct vtscsi_request *
2168 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2169 {
2170 	struct vtscsi_request *req;
2171 
2172 	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2173 	if (req != NULL) {
2174 		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2175 		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2176 	} else
2177 		sc->vtscsi_stats.dequeue_no_requests++;
2178 
2179 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2180 
2181 	return (req);
2182 }
2183 
2184 static void
2185 vtscsi_complete_request(struct vtscsi_request *req)
2186 {
2187 
2188 	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2189 		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2190 
2191 	if (req->vsr_complete != NULL)
2192 		req->vsr_complete(req->vsr_softc, req);
2193 }
2194 
2195 static void
2196 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2197 {
2198 	struct vtscsi_request *req;
2199 
2200 	VTSCSI_LOCK_OWNED(sc);
2201 
2202 	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2203 		vtscsi_complete_request(req);
2204 }
2205 
2206 static void
2207 vtscsi_control_vq_intr(void *xsc)
2208 {
2209 	struct vtscsi_softc *sc;
2210 	struct virtqueue *vq;
2211 
2212 	sc = xsc;
2213 	vq = sc->vtscsi_control_vq;
2214 
2215 again:
2216 	VTSCSI_LOCK(sc);
2217 
2218 	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2219 
2220 	if (virtqueue_enable_intr(vq) != 0) {
2221 		virtqueue_disable_intr(vq);
2222 		VTSCSI_UNLOCK(sc);
2223 		goto again;
2224 	}
2225 
2226 	VTSCSI_UNLOCK(sc);
2227 }
2228 
2229 static void
2230 vtscsi_event_vq_intr(void *xsc)
2231 {
2232 	struct vtscsi_softc *sc;
2233 	struct virtqueue *vq;
2234 	struct virtio_scsi_event *event;
2235 
2236 	sc = xsc;
2237 	vq = sc->vtscsi_event_vq;
2238 
2239 again:
2240 	VTSCSI_LOCK(sc);
2241 
2242 	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2243 		vtscsi_handle_event(sc, event);
2244 
2245 	if (virtqueue_enable_intr(vq) != 0) {
2246 		virtqueue_disable_intr(vq);
2247 		VTSCSI_UNLOCK(sc);
2248 		goto again;
2249 	}
2250 
2251 	VTSCSI_UNLOCK(sc);
2252 }
2253 
2254 static void
2255 vtscsi_request_vq_intr(void *xsc)
2256 {
2257 	struct vtscsi_softc *sc;
2258 	struct virtqueue *vq;
2259 
2260 	sc = xsc;
2261 	vq = sc->vtscsi_request_vq;
2262 
2263 again:
2264 	VTSCSI_LOCK(sc);
2265 
2266 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2267 
2268 	if (virtqueue_enable_intr(vq) != 0) {
2269 		virtqueue_disable_intr(vq);
2270 		VTSCSI_UNLOCK(sc);
2271 		goto again;
2272 	}
2273 
2274 	VTSCSI_UNLOCK(sc);
2275 }
2276 
2277 static void
2278 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2279 {
2280 
2281 	virtqueue_disable_intr(sc->vtscsi_control_vq);
2282 	virtqueue_disable_intr(sc->vtscsi_event_vq);
2283 	virtqueue_disable_intr(sc->vtscsi_request_vq);
2284 }
2285 
2286 static void
2287 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2288 {
2289 
2290 	virtqueue_enable_intr(sc->vtscsi_control_vq);
2291 	virtqueue_enable_intr(sc->vtscsi_event_vq);
2292 	virtqueue_enable_intr(sc->vtscsi_request_vq);
2293 }
2294 
2295 static void
2296 vtscsi_get_tunables(struct vtscsi_softc *sc)
2297 {
2298 	char tmpstr[64];
2299 
2300 	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2301 
2302 	snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2303 	    device_get_unit(sc->vtscsi_dev));
2304 	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2305 }
2306 
2307 static void
2308 vtscsi_setup_sysctl(struct vtscsi_softc *sc)
2309 {
2310 	device_t dev;
2311 	struct vtscsi_statistics *stats;
2312         struct sysctl_ctx_list *ctx;
2313 	struct sysctl_oid *tree;
2314 	struct sysctl_oid_list *child;
2315 
2316 	dev = sc->vtscsi_dev;
2317 	stats = &sc->vtscsi_stats;
2318 	ctx = device_get_sysctl_ctx(dev);
2319 	tree = device_get_sysctl_tree(dev);
2320 	child = SYSCTL_CHILDREN(tree);
2321 
2322 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2323 	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2324 	    "Debug level");
2325 
2326 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2327 	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2328 	    "SCSI command timeouts");
2329 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2330 	    CTLFLAG_RD, &stats->dequeue_no_requests,
2331 	    "No available requests to dequeue");
2332 }
2333 
2334 static void
2335 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2336     const char *fmt, ...)
2337 {
2338 	struct vtscsi_softc *sc;
2339 	union ccb *ccb;
2340 	struct sbuf sb;
2341 	va_list ap;
2342 	char str[192];
2343 	char path_str[64];
2344 
2345 	if (req == NULL)
2346 		return;
2347 
2348 	sc = req->vsr_softc;
2349 	ccb = req->vsr_ccb;
2350 
2351 	va_start(ap, fmt);
2352 	sbuf_new(&sb, str, sizeof(str), 0);
2353 
2354 	if (ccb == NULL) {
2355 		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2356 		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2357 		    cam_sim_bus(sc->vtscsi_sim));
2358 	} else {
2359 		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2360 		sbuf_cat(&sb, path_str);
2361 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2362 			scsi_command_string(&ccb->csio, &sb);
2363 			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2364 		}
2365 	}
2366 
2367 	sbuf_vprintf(&sb, fmt, ap);
2368 	va_end(ap);
2369 
2370 	sbuf_finish(&sb);
2371 	printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2372 	    sbuf_data(&sb));
2373 }
2374