1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* Driver for VirtIO SCSI devices. */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/kthread.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/sglist.h> 41 #include <sys/sysctl.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/callout.h> 45 #include <sys/queue.h> 46 #include <sys/sbuf.h> 47 48 #include <machine/stdarg.h> 49 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 #include <sys/bus.h> 53 #include <sys/rman.h> 54 55 #include <cam/cam.h> 56 #include <cam/cam_ccb.h> 57 #include <cam/cam_sim.h> 58 #include <cam/cam_periph.h> 59 #include <cam/cam_xpt_sim.h> 60 #include <cam/cam_debug.h> 61 #include <cam/scsi/scsi_all.h> 62 #include <cam/scsi/scsi_message.h> 63 64 #include <dev/virtio/virtio.h> 65 #include <dev/virtio/virtqueue.h> 66 #include <dev/virtio/scsi/virtio_scsi.h> 67 #include <dev/virtio/scsi/virtio_scsivar.h> 68 69 #include "virtio_if.h" 70 71 static int vtscsi_modevent(module_t, int, void *); 72 73 static int vtscsi_probe(device_t); 74 static int vtscsi_attach(device_t); 75 static int vtscsi_detach(device_t); 76 static int vtscsi_suspend(device_t); 77 static int vtscsi_resume(device_t); 78 79 static void vtscsi_negotiate_features(struct vtscsi_softc *); 80 static void vtscsi_read_config(struct vtscsi_softc *, 81 struct virtio_scsi_config *); 82 static int vtscsi_maximum_segments(struct vtscsi_softc *, int); 83 static int vtscsi_alloc_virtqueues(struct vtscsi_softc *); 84 static void vtscsi_write_device_config(struct vtscsi_softc *); 85 static int vtscsi_reinit(struct vtscsi_softc *); 86 87 static int vtscsi_alloc_cam(struct vtscsi_softc *); 88 static int vtscsi_register_cam(struct vtscsi_softc *); 89 static void vtscsi_free_cam(struct vtscsi_softc *); 90 static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *); 91 static int vtscsi_register_async(struct vtscsi_softc *); 92 static void vtscsi_deregister_async(struct vtscsi_softc *); 93 static void vtscsi_cam_action(struct cam_sim *, union ccb *); 94 static void vtscsi_cam_poll(struct cam_sim *); 95 96 static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *, 97 union ccb *); 98 static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *, 99 union ccb *); 100 static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *); 101 static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *); 102 static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *); 103 static void vtscsi_cam_path_inquiry(struct vtscsi_softc *, 104 struct cam_sim *, union ccb *); 105 106 static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *, 107 struct sglist *, struct ccb_scsiio *); 108 static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *, 109 struct vtscsi_request *, int *, int *); 110 static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *, 111 struct vtscsi_request *); 112 static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *); 113 static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *, 114 struct vtscsi_request *); 115 static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *, 116 struct vtscsi_request *); 117 static void vtscsi_timedout_scsi_cmd(void *); 118 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *); 119 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *, 120 struct ccb_scsiio *, struct virtio_scsi_cmd_resp *); 121 static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *, 122 struct vtscsi_request *); 123 124 static void vtscsi_poll_ctrl_req(struct vtscsi_softc *, 125 struct vtscsi_request *); 126 static int vtscsi_execute_ctrl_req(struct vtscsi_softc *, 127 struct vtscsi_request *, struct sglist *, int, int, int); 128 static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c, 129 struct vtscsi_request *); 130 static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *, 131 struct vtscsi_request *); 132 static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *, 133 struct vtscsi_request *); 134 135 static void vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *); 136 static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []); 137 static void vtscsi_init_scsi_cmd_req(struct ccb_scsiio *, 138 struct virtio_scsi_cmd_req *); 139 static void vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t, 140 uintptr_t, struct virtio_scsi_ctrl_tmf_req *); 141 142 static void vtscsi_freeze_simq(struct vtscsi_softc *, int); 143 static int vtscsi_thaw_simq(struct vtscsi_softc *, int); 144 145 static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t, 146 lun_id_t); 147 static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t, 148 lun_id_t); 149 static void vtscsi_execute_rescan_bus(struct vtscsi_softc *); 150 151 static void vtscsi_handle_event(struct vtscsi_softc *, 152 struct virtio_scsi_event *); 153 static int vtscsi_enqueue_event_buf(struct vtscsi_softc *, 154 struct virtio_scsi_event *); 155 static int vtscsi_init_event_vq(struct vtscsi_softc *); 156 static void vtscsi_reinit_event_vq(struct vtscsi_softc *); 157 static void vtscsi_drain_event_vq(struct vtscsi_softc *); 158 159 static void vtscsi_complete_vqs_locked(struct vtscsi_softc *); 160 static void vtscsi_complete_vqs(struct vtscsi_softc *); 161 static void vtscsi_drain_vqs(struct vtscsi_softc *); 162 static void vtscsi_cancel_request(struct vtscsi_softc *, 163 struct vtscsi_request *); 164 static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *); 165 static void vtscsi_stop(struct vtscsi_softc *); 166 static int vtscsi_reset_bus(struct vtscsi_softc *); 167 168 static void vtscsi_init_request(struct vtscsi_softc *, 169 struct vtscsi_request *); 170 static int vtscsi_alloc_requests(struct vtscsi_softc *); 171 static void vtscsi_free_requests(struct vtscsi_softc *); 172 static void vtscsi_enqueue_request(struct vtscsi_softc *, 173 struct vtscsi_request *); 174 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *); 175 176 static void vtscsi_complete_request(struct vtscsi_request *); 177 static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *); 178 179 static void vtscsi_control_vq_intr(void *); 180 static void vtscsi_event_vq_intr(void *); 181 static void vtscsi_request_vq_intr(void *); 182 static void vtscsi_disable_vqs_intr(struct vtscsi_softc *); 183 static void vtscsi_enable_vqs_intr(struct vtscsi_softc *); 184 185 static void vtscsi_get_tunables(struct vtscsi_softc *); 186 static void vtscsi_add_sysctl(struct vtscsi_softc *); 187 188 static void vtscsi_printf_req(struct vtscsi_request *, const char *, 189 const char *, ...); 190 191 /* Global tunables. */ 192 /* 193 * The current QEMU VirtIO SCSI implementation does not cancel in-flight 194 * IO during virtio_stop(). So in-flight requests still complete after the 195 * device reset. We would have to wait for all the in-flight IO to complete, 196 * which defeats the typical purpose of a bus reset. We could simulate the 197 * bus reset with either I_T_NEXUS_RESET of all the targets, or with 198 * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the 199 * control virtqueue). But this isn't very useful if things really go off 200 * the rails, so default to disabled for now. 201 */ 202 static int vtscsi_bus_reset_disable = 1; 203 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable); 204 205 static struct virtio_feature_desc vtscsi_feature_desc[] = { 206 { VIRTIO_SCSI_F_INOUT, "InOut" }, 207 { VIRTIO_SCSI_F_HOTPLUG, "Hotplug" }, 208 209 { 0, NULL } 210 }; 211 212 static device_method_t vtscsi_methods[] = { 213 /* Device methods. */ 214 DEVMETHOD(device_probe, vtscsi_probe), 215 DEVMETHOD(device_attach, vtscsi_attach), 216 DEVMETHOD(device_detach, vtscsi_detach), 217 DEVMETHOD(device_suspend, vtscsi_suspend), 218 DEVMETHOD(device_resume, vtscsi_resume), 219 220 DEVMETHOD_END 221 }; 222 223 static driver_t vtscsi_driver = { 224 "vtscsi", 225 vtscsi_methods, 226 sizeof(struct vtscsi_softc) 227 }; 228 static devclass_t vtscsi_devclass; 229 230 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass, 231 vtscsi_modevent, 0); 232 MODULE_VERSION(virtio_scsi, 1); 233 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1); 234 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1); 235 236 static int 237 vtscsi_modevent(module_t mod, int type, void *unused) 238 { 239 int error; 240 241 switch (type) { 242 case MOD_LOAD: 243 case MOD_QUIESCE: 244 case MOD_UNLOAD: 245 case MOD_SHUTDOWN: 246 error = 0; 247 break; 248 default: 249 error = EOPNOTSUPP; 250 break; 251 } 252 253 return (error); 254 } 255 256 static int 257 vtscsi_probe(device_t dev) 258 { 259 260 if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI) 261 return (ENXIO); 262 263 device_set_desc(dev, "VirtIO SCSI Adapter"); 264 265 return (BUS_PROBE_DEFAULT); 266 } 267 268 static int 269 vtscsi_attach(device_t dev) 270 { 271 struct vtscsi_softc *sc; 272 struct virtio_scsi_config scsicfg; 273 int error; 274 275 sc = device_get_softc(dev); 276 sc->vtscsi_dev = dev; 277 278 VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev)); 279 TAILQ_INIT(&sc->vtscsi_req_free); 280 281 vtscsi_get_tunables(sc); 282 vtscsi_add_sysctl(sc); 283 284 virtio_set_feature_desc(dev, vtscsi_feature_desc); 285 vtscsi_negotiate_features(sc); 286 287 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) 288 sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT; 289 if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT)) 290 sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL; 291 if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG)) 292 sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG; 293 294 vtscsi_read_config(sc, &scsicfg); 295 296 sc->vtscsi_max_channel = scsicfg.max_channel; 297 sc->vtscsi_max_target = scsicfg.max_target; 298 sc->vtscsi_max_lun = scsicfg.max_lun; 299 sc->vtscsi_event_buf_size = scsicfg.event_info_size; 300 301 vtscsi_write_device_config(sc); 302 303 sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max); 304 sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT); 305 if (sc->vtscsi_sglist == NULL) { 306 error = ENOMEM; 307 device_printf(dev, "cannot allocate sglist\n"); 308 goto fail; 309 } 310 311 error = vtscsi_alloc_virtqueues(sc); 312 if (error) { 313 device_printf(dev, "cannot allocate virtqueues\n"); 314 goto fail; 315 } 316 317 error = vtscsi_init_event_vq(sc); 318 if (error) { 319 device_printf(dev, "cannot populate the eventvq\n"); 320 goto fail; 321 } 322 323 error = vtscsi_alloc_requests(sc); 324 if (error) { 325 device_printf(dev, "cannot allocate requests\n"); 326 goto fail; 327 } 328 329 error = vtscsi_alloc_cam(sc); 330 if (error) { 331 device_printf(dev, "cannot allocate CAM structures\n"); 332 goto fail; 333 } 334 335 error = virtio_setup_intr(dev, INTR_TYPE_CAM); 336 if (error) { 337 device_printf(dev, "cannot setup virtqueue interrupts\n"); 338 goto fail; 339 } 340 341 vtscsi_enable_vqs_intr(sc); 342 343 /* 344 * Register with CAM after interrupts are enabled so we will get 345 * notified of the probe responses. 346 */ 347 error = vtscsi_register_cam(sc); 348 if (error) { 349 device_printf(dev, "cannot register with CAM\n"); 350 goto fail; 351 } 352 353 fail: 354 if (error) 355 vtscsi_detach(dev); 356 357 return (error); 358 } 359 360 static int 361 vtscsi_detach(device_t dev) 362 { 363 struct vtscsi_softc *sc; 364 365 sc = device_get_softc(dev); 366 367 VTSCSI_LOCK(sc); 368 sc->vtscsi_flags |= VTSCSI_FLAG_DETACH; 369 if (device_is_attached(dev)) 370 vtscsi_stop(sc); 371 VTSCSI_UNLOCK(sc); 372 373 vtscsi_complete_vqs(sc); 374 vtscsi_drain_vqs(sc); 375 376 vtscsi_free_cam(sc); 377 vtscsi_free_requests(sc); 378 379 if (sc->vtscsi_sglist != NULL) { 380 sglist_free(sc->vtscsi_sglist); 381 sc->vtscsi_sglist = NULL; 382 } 383 384 VTSCSI_LOCK_DESTROY(sc); 385 386 return (0); 387 } 388 389 static int 390 vtscsi_suspend(device_t dev) 391 { 392 393 return (0); 394 } 395 396 static int 397 vtscsi_resume(device_t dev) 398 { 399 400 return (0); 401 } 402 403 static void 404 vtscsi_negotiate_features(struct vtscsi_softc *sc) 405 { 406 device_t dev; 407 uint64_t features; 408 409 dev = sc->vtscsi_dev; 410 features = virtio_negotiate_features(dev, VTSCSI_FEATURES); 411 sc->vtscsi_features = features; 412 } 413 414 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg) \ 415 virtio_read_device_config(_dev, \ 416 offsetof(struct virtio_scsi_config, _field), \ 417 &(_cfg)->_field, sizeof((_cfg)->_field)) \ 418 419 static void 420 vtscsi_read_config(struct vtscsi_softc *sc, 421 struct virtio_scsi_config *scsicfg) 422 { 423 device_t dev; 424 425 dev = sc->vtscsi_dev; 426 427 bzero(scsicfg, sizeof(struct virtio_scsi_config)); 428 429 VTSCSI_GET_CONFIG(dev, num_queues, scsicfg); 430 VTSCSI_GET_CONFIG(dev, seg_max, scsicfg); 431 VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg); 432 VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg); 433 VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg); 434 VTSCSI_GET_CONFIG(dev, sense_size, scsicfg); 435 VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg); 436 VTSCSI_GET_CONFIG(dev, max_channel, scsicfg); 437 VTSCSI_GET_CONFIG(dev, max_target, scsicfg); 438 VTSCSI_GET_CONFIG(dev, max_lun, scsicfg); 439 } 440 441 #undef VTSCSI_GET_CONFIG 442 443 static int 444 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max) 445 { 446 int nsegs; 447 448 nsegs = VTSCSI_MIN_SEGMENTS; 449 450 if (seg_max > 0) { 451 nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1); 452 if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) 453 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); 454 } else 455 nsegs += 1; 456 457 return (nsegs); 458 } 459 460 static int 461 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc) 462 { 463 device_t dev; 464 struct vq_alloc_info vq_info[3]; 465 int nvqs; 466 467 dev = sc->vtscsi_dev; 468 nvqs = 3; 469 470 VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc, 471 &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev)); 472 473 VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc, 474 &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev)); 475 476 VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs, 477 vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq, 478 "%s request", device_get_nameunit(dev)); 479 480 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); 481 } 482 483 static void 484 vtscsi_write_device_config(struct vtscsi_softc *sc) 485 { 486 487 virtio_write_dev_config_4(sc->vtscsi_dev, 488 offsetof(struct virtio_scsi_config, sense_size), 489 VIRTIO_SCSI_SENSE_SIZE); 490 491 /* 492 * This is the size in the virtio_scsi_cmd_req structure. Note 493 * this value (32) is larger than the maximum CAM CDB size (16). 494 */ 495 virtio_write_dev_config_4(sc->vtscsi_dev, 496 offsetof(struct virtio_scsi_config, cdb_size), 497 VIRTIO_SCSI_CDB_SIZE); 498 } 499 500 static int 501 vtscsi_reinit(struct vtscsi_softc *sc) 502 { 503 device_t dev; 504 int error; 505 506 dev = sc->vtscsi_dev; 507 508 error = virtio_reinit(dev, sc->vtscsi_features); 509 if (error == 0) { 510 vtscsi_write_device_config(sc); 511 vtscsi_reinit_event_vq(sc); 512 virtio_reinit_complete(dev); 513 514 vtscsi_enable_vqs_intr(sc); 515 } 516 517 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error); 518 519 return (error); 520 } 521 522 static int 523 vtscsi_alloc_cam(struct vtscsi_softc *sc) 524 { 525 device_t dev; 526 struct cam_devq *devq; 527 int openings; 528 529 dev = sc->vtscsi_dev; 530 openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS; 531 532 devq = cam_simq_alloc(openings); 533 if (devq == NULL) { 534 device_printf(dev, "cannot allocate SIM queue\n"); 535 return (ENOMEM); 536 } 537 538 sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll, 539 "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1, 540 openings, devq); 541 if (sc->vtscsi_sim == NULL) { 542 cam_simq_free(devq); 543 device_printf(dev, "cannot allocate SIM\n"); 544 return (ENOMEM); 545 } 546 547 return (0); 548 } 549 550 static int 551 vtscsi_register_cam(struct vtscsi_softc *sc) 552 { 553 device_t dev; 554 int registered, error; 555 556 dev = sc->vtscsi_dev; 557 registered = 0; 558 559 VTSCSI_LOCK(sc); 560 561 if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) { 562 error = ENOMEM; 563 device_printf(dev, "cannot register XPT bus\n"); 564 goto fail; 565 } 566 567 registered = 1; 568 569 if (xpt_create_path(&sc->vtscsi_path, NULL, 570 cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD, 571 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 572 error = ENOMEM; 573 device_printf(dev, "cannot create bus path\n"); 574 goto fail; 575 } 576 577 if (vtscsi_register_async(sc) != CAM_REQ_CMP) { 578 error = EIO; 579 device_printf(dev, "cannot register async callback\n"); 580 goto fail; 581 } 582 583 VTSCSI_UNLOCK(sc); 584 585 return (0); 586 587 fail: 588 if (sc->vtscsi_path != NULL) { 589 xpt_free_path(sc->vtscsi_path); 590 sc->vtscsi_path = NULL; 591 } 592 593 if (registered != 0) 594 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); 595 596 VTSCSI_UNLOCK(sc); 597 598 return (error); 599 } 600 601 static void 602 vtscsi_free_cam(struct vtscsi_softc *sc) 603 { 604 605 VTSCSI_LOCK(sc); 606 607 if (sc->vtscsi_path != NULL) { 608 vtscsi_deregister_async(sc); 609 610 xpt_free_path(sc->vtscsi_path); 611 sc->vtscsi_path = NULL; 612 613 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); 614 } 615 616 if (sc->vtscsi_sim != NULL) { 617 cam_sim_free(sc->vtscsi_sim, 1); 618 sc->vtscsi_sim = NULL; 619 } 620 621 VTSCSI_UNLOCK(sc); 622 } 623 624 static void 625 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg) 626 { 627 struct cam_sim *sim; 628 struct vtscsi_softc *sc; 629 630 sim = cb_arg; 631 sc = cam_sim_softc(sim); 632 633 vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code); 634 635 /* 636 * TODO Once QEMU supports event reporting, we should 637 * (un)subscribe to events here. 638 */ 639 switch (code) { 640 case AC_FOUND_DEVICE: 641 break; 642 case AC_LOST_DEVICE: 643 break; 644 } 645 } 646 647 static int 648 vtscsi_register_async(struct vtscsi_softc *sc) 649 { 650 struct ccb_setasync csa; 651 652 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); 653 csa.ccb_h.func_code = XPT_SASYNC_CB; 654 csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE; 655 csa.callback = vtscsi_cam_async; 656 csa.callback_arg = sc->vtscsi_sim; 657 658 xpt_action((union ccb *) &csa); 659 660 return (csa.ccb_h.status); 661 } 662 663 static void 664 vtscsi_deregister_async(struct vtscsi_softc *sc) 665 { 666 struct ccb_setasync csa; 667 668 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); 669 csa.ccb_h.func_code = XPT_SASYNC_CB; 670 csa.event_enable = 0; 671 csa.callback = vtscsi_cam_async; 672 csa.callback_arg = sc->vtscsi_sim; 673 674 xpt_action((union ccb *) &csa); 675 } 676 677 static void 678 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb) 679 { 680 struct vtscsi_softc *sc; 681 struct ccb_hdr *ccbh; 682 683 sc = cam_sim_softc(sim); 684 ccbh = &ccb->ccb_h; 685 686 VTSCSI_LOCK_OWNED(sc); 687 688 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) { 689 /* 690 * The VTSCSI_MTX is briefly dropped between setting 691 * VTSCSI_FLAG_DETACH and deregistering with CAM, so 692 * drop any CCBs that come in during that window. 693 */ 694 ccbh->status = CAM_NO_HBA; 695 xpt_done(ccb); 696 return; 697 } 698 699 switch (ccbh->func_code) { 700 case XPT_SCSI_IO: 701 vtscsi_cam_scsi_io(sc, sim, ccb); 702 break; 703 704 case XPT_SET_TRAN_SETTINGS: 705 ccbh->status = CAM_FUNC_NOTAVAIL; 706 xpt_done(ccb); 707 break; 708 709 case XPT_GET_TRAN_SETTINGS: 710 vtscsi_cam_get_tran_settings(sc, ccb); 711 break; 712 713 case XPT_RESET_BUS: 714 vtscsi_cam_reset_bus(sc, ccb); 715 break; 716 717 case XPT_RESET_DEV: 718 vtscsi_cam_reset_dev(sc, ccb); 719 break; 720 721 case XPT_ABORT: 722 vtscsi_cam_abort(sc, ccb); 723 break; 724 725 case XPT_CALC_GEOMETRY: 726 cam_calc_geometry(&ccb->ccg, 1); 727 xpt_done(ccb); 728 break; 729 730 case XPT_PATH_INQ: 731 vtscsi_cam_path_inquiry(sc, sim, ccb); 732 break; 733 734 default: 735 vtscsi_dprintf(sc, VTSCSI_ERROR, 736 "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code); 737 738 ccbh->status = CAM_REQ_INVALID; 739 xpt_done(ccb); 740 break; 741 } 742 } 743 744 static void 745 vtscsi_cam_poll(struct cam_sim *sim) 746 { 747 struct vtscsi_softc *sc; 748 749 sc = cam_sim_softc(sim); 750 751 vtscsi_complete_vqs_locked(sc); 752 } 753 754 static void 755 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim, 756 union ccb *ccb) 757 { 758 struct ccb_hdr *ccbh; 759 struct ccb_scsiio *csio; 760 int error; 761 762 ccbh = &ccb->ccb_h; 763 csio = &ccb->csio; 764 765 if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) { 766 error = EINVAL; 767 ccbh->status = CAM_REQ_INVALID; 768 goto done; 769 } 770 771 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH && 772 (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) { 773 error = EINVAL; 774 ccbh->status = CAM_REQ_INVALID; 775 goto done; 776 } 777 778 error = vtscsi_start_scsi_cmd(sc, ccb); 779 780 done: 781 if (error) { 782 vtscsi_dprintf(sc, VTSCSI_ERROR, 783 "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status); 784 xpt_done(ccb); 785 } 786 } 787 788 static void 789 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb) 790 { 791 struct ccb_trans_settings *cts; 792 struct ccb_trans_settings_scsi *scsi; 793 794 cts = &ccb->cts; 795 scsi = &cts->proto_specific.scsi; 796 797 cts->protocol = PROTO_SCSI; 798 cts->protocol_version = SCSI_REV_SPC3; 799 cts->transport = XPORT_SAS; 800 cts->transport_version = 0; 801 802 scsi->valid = CTS_SCSI_VALID_TQ; 803 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 804 805 ccb->ccb_h.status = CAM_REQ_CMP; 806 xpt_done(ccb); 807 } 808 809 static void 810 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb) 811 { 812 int error; 813 814 error = vtscsi_reset_bus(sc); 815 if (error == 0) 816 ccb->ccb_h.status = CAM_REQ_CMP; 817 else 818 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 819 820 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n", 821 error, ccb, ccb->ccb_h.status); 822 823 xpt_done(ccb); 824 } 825 826 static void 827 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb) 828 { 829 struct ccb_hdr *ccbh; 830 struct vtscsi_request *req; 831 int error; 832 833 ccbh = &ccb->ccb_h; 834 835 req = vtscsi_dequeue_request(sc); 836 if (req == NULL) { 837 error = EAGAIN; 838 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 839 goto fail; 840 } 841 842 req->vsr_ccb = ccb; 843 844 error = vtscsi_execute_reset_dev_cmd(sc, req); 845 if (error == 0) 846 return; 847 848 vtscsi_enqueue_request(sc, req); 849 850 fail: 851 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", 852 error, req, ccb); 853 854 if (error == EAGAIN) 855 ccbh->status = CAM_RESRC_UNAVAIL; 856 else 857 ccbh->status = CAM_REQ_CMP_ERR; 858 859 xpt_done(ccb); 860 } 861 862 static void 863 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb) 864 { 865 struct vtscsi_request *req; 866 struct ccb_hdr *ccbh; 867 int error; 868 869 ccbh = &ccb->ccb_h; 870 871 req = vtscsi_dequeue_request(sc); 872 if (req == NULL) { 873 error = EAGAIN; 874 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 875 goto fail; 876 } 877 878 req->vsr_ccb = ccb; 879 880 error = vtscsi_execute_abort_task_cmd(sc, req); 881 if (error == 0) 882 return; 883 884 vtscsi_enqueue_request(sc, req); 885 886 fail: 887 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", 888 error, req, ccb); 889 890 if (error == EAGAIN) 891 ccbh->status = CAM_RESRC_UNAVAIL; 892 else 893 ccbh->status = CAM_REQ_CMP_ERR; 894 895 xpt_done(ccb); 896 } 897 898 static void 899 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim, 900 union ccb *ccb) 901 { 902 device_t dev; 903 struct ccb_pathinq *cpi; 904 905 dev = sc->vtscsi_dev; 906 cpi = &ccb->cpi; 907 908 vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb); 909 910 cpi->version_num = 1; 911 cpi->hba_inquiry = PI_TAG_ABLE; 912 cpi->target_sprt = 0; 913 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; 914 if (vtscsi_bus_reset_disable != 0) 915 cpi->hba_misc |= PIM_NOBUSRESET; 916 cpi->hba_eng_cnt = 0; 917 918 cpi->max_target = sc->vtscsi_max_target; 919 cpi->max_lun = sc->vtscsi_max_lun; 920 cpi->initiator_id = VTSCSI_INITIATOR_ID; 921 922 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 923 strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN); 924 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 925 926 cpi->unit_number = cam_sim_unit(sim); 927 cpi->bus_id = cam_sim_bus(sim); 928 929 cpi->base_transfer_speed = 300000; 930 931 cpi->protocol = PROTO_SCSI; 932 cpi->protocol_version = SCSI_REV_SPC3; 933 cpi->transport = XPORT_SAS; 934 cpi->transport_version = 0; 935 936 cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) * 937 PAGE_SIZE; 938 939 cpi->hba_vendor = virtio_get_vendor(dev); 940 cpi->hba_device = virtio_get_device(dev); 941 cpi->hba_subvendor = virtio_get_subvendor(dev); 942 cpi->hba_subdevice = virtio_get_subdevice(dev); 943 944 ccb->ccb_h.status = CAM_REQ_CMP; 945 xpt_done(ccb); 946 } 947 948 static int 949 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg, 950 struct ccb_scsiio *csio) 951 { 952 struct ccb_hdr *ccbh; 953 struct bus_dma_segment *dseg; 954 int i, error; 955 956 ccbh = &csio->ccb_h; 957 error = 0; 958 959 switch ((ccbh->flags & CAM_DATA_MASK)) { 960 case CAM_DATA_VADDR: 961 error = sglist_append(sg, csio->data_ptr, csio->dxfer_len); 962 break; 963 case CAM_DATA_PADDR: 964 error = sglist_append_phys(sg, 965 (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len); 966 break; 967 case CAM_DATA_SG: 968 for (i = 0; i < csio->sglist_cnt && error == 0; i++) { 969 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; 970 error = sglist_append(sg, 971 (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len); 972 } 973 break; 974 case CAM_DATA_SG_PADDR: 975 for (i = 0; i < csio->sglist_cnt && error == 0; i++) { 976 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; 977 error = sglist_append_phys(sg, 978 (vm_paddr_t) dseg->ds_addr, dseg->ds_len); 979 } 980 break; 981 case CAM_DATA_BIO: 982 error = sglist_append_bio(sg, (struct bio *) csio->data_ptr); 983 break; 984 default: 985 error = EINVAL; 986 break; 987 } 988 989 return (error); 990 } 991 992 static int 993 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req, 994 int *readable, int *writable) 995 { 996 struct sglist *sg; 997 struct ccb_hdr *ccbh; 998 struct ccb_scsiio *csio; 999 struct virtio_scsi_cmd_req *cmd_req; 1000 struct virtio_scsi_cmd_resp *cmd_resp; 1001 int error; 1002 1003 sg = sc->vtscsi_sglist; 1004 csio = &req->vsr_ccb->csio; 1005 ccbh = &csio->ccb_h; 1006 cmd_req = &req->vsr_cmd_req; 1007 cmd_resp = &req->vsr_cmd_resp; 1008 1009 sglist_reset(sg); 1010 1011 sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req)); 1012 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1013 error = vtscsi_sg_append_scsi_buf(sc, sg, csio); 1014 /* At least one segment must be left for the response. */ 1015 if (error || sg->sg_nseg == sg->sg_maxseg) 1016 goto fail; 1017 } 1018 1019 *readable = sg->sg_nseg; 1020 1021 sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp)); 1022 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1023 error = vtscsi_sg_append_scsi_buf(sc, sg, csio); 1024 if (error) 1025 goto fail; 1026 } 1027 1028 *writable = sg->sg_nseg - *readable; 1029 1030 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d " 1031 "writable=%d\n", req, ccbh, *readable, *writable); 1032 1033 return (0); 1034 1035 fail: 1036 /* 1037 * This should never happen unless maxio was incorrectly set. 1038 */ 1039 vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0); 1040 1041 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p " 1042 "nseg=%d maxseg=%d\n", 1043 error, req, ccbh, sg->sg_nseg, sg->sg_maxseg); 1044 1045 return (EFBIG); 1046 } 1047 1048 static int 1049 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) 1050 { 1051 struct sglist *sg; 1052 struct virtqueue *vq; 1053 struct ccb_scsiio *csio; 1054 struct ccb_hdr *ccbh; 1055 struct virtio_scsi_cmd_req *cmd_req; 1056 struct virtio_scsi_cmd_resp *cmd_resp; 1057 int readable, writable, error; 1058 1059 sg = sc->vtscsi_sglist; 1060 vq = sc->vtscsi_request_vq; 1061 csio = &req->vsr_ccb->csio; 1062 ccbh = &csio->ccb_h; 1063 cmd_req = &req->vsr_cmd_req; 1064 cmd_resp = &req->vsr_cmd_resp; 1065 1066 vtscsi_init_scsi_cmd_req(csio, cmd_req); 1067 1068 error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable); 1069 if (error) 1070 return (error); 1071 1072 req->vsr_complete = vtscsi_complete_scsi_cmd; 1073 cmd_resp->response = -1; 1074 1075 error = virtqueue_enqueue(vq, req, sg, readable, writable); 1076 if (error) { 1077 vtscsi_dprintf(sc, VTSCSI_ERROR, 1078 "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh); 1079 1080 ccbh->status = CAM_REQUEUE_REQ; 1081 vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ); 1082 return (error); 1083 } 1084 1085 ccbh->status |= CAM_SIM_QUEUED; 1086 ccbh->ccbh_vtscsi_req = req; 1087 1088 virtqueue_notify(vq); 1089 1090 if (ccbh->timeout != CAM_TIME_INFINITY) { 1091 req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET; 1092 callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout, 1093 0, vtscsi_timedout_scsi_cmd, req, 0); 1094 } 1095 1096 vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n", 1097 req, ccbh); 1098 1099 return (0); 1100 } 1101 1102 static int 1103 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb) 1104 { 1105 struct vtscsi_request *req; 1106 int error; 1107 1108 req = vtscsi_dequeue_request(sc); 1109 if (req == NULL) { 1110 ccb->ccb_h.status = CAM_REQUEUE_REQ; 1111 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 1112 return (ENOBUFS); 1113 } 1114 1115 req->vsr_ccb = ccb; 1116 1117 error = vtscsi_execute_scsi_cmd(sc, req); 1118 if (error) 1119 vtscsi_enqueue_request(sc, req); 1120 1121 return (error); 1122 } 1123 1124 static void 1125 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, 1126 struct vtscsi_request *req) 1127 { 1128 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1129 struct vtscsi_request *to_req; 1130 uint8_t response; 1131 1132 tmf_resp = &req->vsr_tmf_resp; 1133 response = tmf_resp->response; 1134 to_req = req->vsr_timedout_req; 1135 1136 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n", 1137 req, to_req, response); 1138 1139 vtscsi_enqueue_request(sc, req); 1140 1141 /* 1142 * The timedout request could have completed between when the 1143 * abort task was sent and when the host processed it. 1144 */ 1145 if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT) 1146 return; 1147 1148 /* The timedout request was successfully aborted. */ 1149 if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) 1150 return; 1151 1152 /* Don't bother if the device is going away. */ 1153 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) 1154 return; 1155 1156 /* The timedout request will be aborted by the reset. */ 1157 if (sc->vtscsi_flags & VTSCSI_FLAG_RESET) 1158 return; 1159 1160 vtscsi_reset_bus(sc); 1161 } 1162 1163 static int 1164 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, 1165 struct vtscsi_request *to_req) 1166 { 1167 struct sglist *sg; 1168 struct ccb_hdr *to_ccbh; 1169 struct vtscsi_request *req; 1170 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1171 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1172 int error; 1173 1174 sg = sc->vtscsi_sglist; 1175 to_ccbh = &to_req->vsr_ccb->ccb_h; 1176 1177 req = vtscsi_dequeue_request(sc); 1178 if (req == NULL) { 1179 error = ENOBUFS; 1180 goto fail; 1181 } 1182 1183 tmf_req = &req->vsr_tmf_req; 1184 tmf_resp = &req->vsr_tmf_resp; 1185 1186 vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, 1187 (uintptr_t) to_ccbh, tmf_req); 1188 1189 sglist_reset(sg); 1190 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1191 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1192 1193 req->vsr_timedout_req = to_req; 1194 req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd; 1195 tmf_resp->response = -1; 1196 1197 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1198 VTSCSI_EXECUTE_ASYNC); 1199 if (error == 0) 1200 return (0); 1201 1202 vtscsi_enqueue_request(sc, req); 1203 1204 fail: 1205 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p " 1206 "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh); 1207 1208 return (error); 1209 } 1210 1211 static void 1212 vtscsi_timedout_scsi_cmd(void *xreq) 1213 { 1214 struct vtscsi_softc *sc; 1215 struct vtscsi_request *to_req; 1216 1217 to_req = xreq; 1218 sc = to_req->vsr_softc; 1219 1220 vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n", 1221 to_req, to_req->vsr_ccb, to_req->vsr_state); 1222 1223 /* Don't bother if the device is going away. */ 1224 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) 1225 return; 1226 1227 /* 1228 * Bail if the request is not in use. We likely raced when 1229 * stopping the callout handler or it has already been aborted. 1230 */ 1231 if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE || 1232 (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0) 1233 return; 1234 1235 /* 1236 * Complete the request queue in case the timedout request is 1237 * actually just pending. 1238 */ 1239 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 1240 if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE) 1241 return; 1242 1243 sc->vtscsi_stats.scsi_cmd_timeouts++; 1244 to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT; 1245 1246 if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0) 1247 return; 1248 1249 vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n"); 1250 vtscsi_reset_bus(sc); 1251 } 1252 1253 static cam_status 1254 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp) 1255 { 1256 cam_status status; 1257 1258 switch (cmd_resp->response) { 1259 case VIRTIO_SCSI_S_OK: 1260 status = CAM_REQ_CMP; 1261 break; 1262 case VIRTIO_SCSI_S_OVERRUN: 1263 status = CAM_DATA_RUN_ERR; 1264 break; 1265 case VIRTIO_SCSI_S_ABORTED: 1266 status = CAM_REQ_ABORTED; 1267 break; 1268 case VIRTIO_SCSI_S_BAD_TARGET: 1269 status = CAM_SEL_TIMEOUT; 1270 break; 1271 case VIRTIO_SCSI_S_RESET: 1272 status = CAM_SCSI_BUS_RESET; 1273 break; 1274 case VIRTIO_SCSI_S_BUSY: 1275 status = CAM_SCSI_BUSY; 1276 break; 1277 case VIRTIO_SCSI_S_TRANSPORT_FAILURE: 1278 case VIRTIO_SCSI_S_TARGET_FAILURE: 1279 case VIRTIO_SCSI_S_NEXUS_FAILURE: 1280 status = CAM_SCSI_IT_NEXUS_LOST; 1281 break; 1282 default: /* VIRTIO_SCSI_S_FAILURE */ 1283 status = CAM_REQ_CMP_ERR; 1284 break; 1285 } 1286 1287 return (status); 1288 } 1289 1290 static cam_status 1291 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc, 1292 struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp) 1293 { 1294 cam_status status; 1295 1296 csio->scsi_status = cmd_resp->status; 1297 csio->resid = cmd_resp->resid; 1298 1299 if (csio->scsi_status == SCSI_STATUS_OK) 1300 status = CAM_REQ_CMP; 1301 else 1302 status = CAM_SCSI_STATUS_ERROR; 1303 1304 if (cmd_resp->sense_len > 0) { 1305 status |= CAM_AUTOSNS_VALID; 1306 1307 if (cmd_resp->sense_len < csio->sense_len) 1308 csio->sense_resid = csio->sense_len - 1309 cmd_resp->sense_len; 1310 else 1311 csio->sense_resid = 0; 1312 1313 bzero(&csio->sense_data, sizeof(csio->sense_data)); 1314 memcpy(cmd_resp->sense, &csio->sense_data, 1315 csio->sense_len - csio->sense_resid); 1316 } 1317 1318 vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR, 1319 "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n", 1320 csio, csio->scsi_status, csio->resid, csio->sense_resid); 1321 1322 return (status); 1323 } 1324 1325 static void 1326 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) 1327 { 1328 struct ccb_hdr *ccbh; 1329 struct ccb_scsiio *csio; 1330 struct virtio_scsi_cmd_resp *cmd_resp; 1331 cam_status status; 1332 1333 csio = &req->vsr_ccb->csio; 1334 ccbh = &csio->ccb_h; 1335 cmd_resp = &req->vsr_cmd_resp; 1336 1337 KASSERT(ccbh->ccbh_vtscsi_req == req, 1338 ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req)); 1339 1340 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) 1341 callout_stop(&req->vsr_callout); 1342 1343 status = vtscsi_scsi_cmd_cam_status(cmd_resp); 1344 if (status == CAM_REQ_ABORTED) { 1345 if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT) 1346 status = CAM_CMD_TIMEOUT; 1347 } else if (status == CAM_REQ_CMP) 1348 status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp); 1349 1350 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1351 status |= CAM_DEV_QFRZN; 1352 xpt_freeze_devq(ccbh->path, 1); 1353 } 1354 1355 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) 1356 status |= CAM_RELEASE_SIMQ; 1357 1358 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n", 1359 req, ccbh, status); 1360 1361 ccbh->status = status; 1362 xpt_done(req->vsr_ccb); 1363 vtscsi_enqueue_request(sc, req); 1364 } 1365 1366 static void 1367 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req) 1368 { 1369 1370 /* XXX We probably shouldn't poll forever. */ 1371 req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED; 1372 do 1373 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 1374 while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0); 1375 1376 req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED; 1377 } 1378 1379 static int 1380 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req, 1381 struct sglist *sg, int readable, int writable, int flag) 1382 { 1383 struct virtqueue *vq; 1384 int error; 1385 1386 vq = sc->vtscsi_control_vq; 1387 1388 MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL); 1389 1390 error = virtqueue_enqueue(vq, req, sg, readable, writable); 1391 if (error) { 1392 /* 1393 * Return EAGAIN when the virtqueue does not have enough 1394 * descriptors available. 1395 */ 1396 if (error == ENOSPC || error == EMSGSIZE) 1397 error = EAGAIN; 1398 1399 return (error); 1400 } 1401 1402 virtqueue_notify(vq); 1403 if (flag == VTSCSI_EXECUTE_POLL) 1404 vtscsi_poll_ctrl_req(sc, req); 1405 1406 return (0); 1407 } 1408 1409 static void 1410 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc, 1411 struct vtscsi_request *req) 1412 { 1413 union ccb *ccb; 1414 struct ccb_hdr *ccbh; 1415 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1416 1417 ccb = req->vsr_ccb; 1418 ccbh = &ccb->ccb_h; 1419 tmf_resp = &req->vsr_tmf_resp; 1420 1421 switch (tmf_resp->response) { 1422 case VIRTIO_SCSI_S_FUNCTION_COMPLETE: 1423 ccbh->status = CAM_REQ_CMP; 1424 break; 1425 case VIRTIO_SCSI_S_FUNCTION_REJECTED: 1426 ccbh->status = CAM_UA_ABORT; 1427 break; 1428 default: 1429 ccbh->status = CAM_REQ_CMP_ERR; 1430 break; 1431 } 1432 1433 xpt_done(ccb); 1434 vtscsi_enqueue_request(sc, req); 1435 } 1436 1437 static int 1438 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc, 1439 struct vtscsi_request *req) 1440 { 1441 struct sglist *sg; 1442 struct ccb_abort *cab; 1443 struct ccb_hdr *ccbh; 1444 struct ccb_hdr *abort_ccbh; 1445 struct vtscsi_request *abort_req; 1446 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1447 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1448 int error; 1449 1450 sg = sc->vtscsi_sglist; 1451 cab = &req->vsr_ccb->cab; 1452 ccbh = &cab->ccb_h; 1453 tmf_req = &req->vsr_tmf_req; 1454 tmf_resp = &req->vsr_tmf_resp; 1455 1456 /* CCB header and request that's to be aborted. */ 1457 abort_ccbh = &cab->abort_ccb->ccb_h; 1458 abort_req = abort_ccbh->ccbh_vtscsi_req; 1459 1460 if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) { 1461 error = EINVAL; 1462 goto fail; 1463 } 1464 1465 /* Only attempt to abort requests that could be in-flight. */ 1466 if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) { 1467 error = EALREADY; 1468 goto fail; 1469 } 1470 1471 abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED; 1472 if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) 1473 callout_stop(&abort_req->vsr_callout); 1474 1475 vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, 1476 (uintptr_t) abort_ccbh, tmf_req); 1477 1478 sglist_reset(sg); 1479 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1480 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1481 1482 req->vsr_complete = vtscsi_complete_abort_task_cmd; 1483 tmf_resp->response = -1; 1484 1485 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1486 VTSCSI_EXECUTE_ASYNC); 1487 1488 fail: 1489 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p " 1490 "abort_req=%p\n", error, req, abort_ccbh, abort_req); 1491 1492 return (error); 1493 } 1494 1495 static void 1496 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc, 1497 struct vtscsi_request *req) 1498 { 1499 union ccb *ccb; 1500 struct ccb_hdr *ccbh; 1501 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1502 1503 ccb = req->vsr_ccb; 1504 ccbh = &ccb->ccb_h; 1505 tmf_resp = &req->vsr_tmf_resp; 1506 1507 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n", 1508 req, ccb, tmf_resp->response); 1509 1510 if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) { 1511 ccbh->status = CAM_REQ_CMP; 1512 vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id, 1513 ccbh->target_lun); 1514 } else 1515 ccbh->status = CAM_REQ_CMP_ERR; 1516 1517 xpt_done(ccb); 1518 vtscsi_enqueue_request(sc, req); 1519 } 1520 1521 static int 1522 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc, 1523 struct vtscsi_request *req) 1524 { 1525 struct sglist *sg; 1526 struct ccb_resetdev *crd; 1527 struct ccb_hdr *ccbh; 1528 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1529 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1530 uint32_t subtype; 1531 int error; 1532 1533 sg = sc->vtscsi_sglist; 1534 crd = &req->vsr_ccb->crd; 1535 ccbh = &crd->ccb_h; 1536 tmf_req = &req->vsr_tmf_req; 1537 tmf_resp = &req->vsr_tmf_resp; 1538 1539 if (ccbh->target_lun == CAM_LUN_WILDCARD) 1540 subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET; 1541 else 1542 subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; 1543 1544 vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req); 1545 1546 sglist_reset(sg); 1547 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1548 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1549 1550 req->vsr_complete = vtscsi_complete_reset_dev_cmd; 1551 tmf_resp->response = -1; 1552 1553 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1554 VTSCSI_EXECUTE_ASYNC); 1555 1556 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n", 1557 error, req, ccbh); 1558 1559 return (error); 1560 } 1561 1562 static void 1563 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id) 1564 { 1565 1566 *target_id = lun[1]; 1567 *lun_id = (lun[2] << 8) | lun[3]; 1568 } 1569 1570 static void 1571 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[]) 1572 { 1573 1574 lun[0] = 1; 1575 lun[1] = ccbh->target_id; 1576 lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F); 1577 lun[3] = ccbh->target_lun & 0xFF; 1578 } 1579 1580 static void 1581 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio, 1582 struct virtio_scsi_cmd_req *cmd_req) 1583 { 1584 uint8_t attr; 1585 1586 switch (csio->tag_action) { 1587 case MSG_HEAD_OF_Q_TAG: 1588 attr = VIRTIO_SCSI_S_HEAD; 1589 break; 1590 case MSG_ORDERED_Q_TAG: 1591 attr = VIRTIO_SCSI_S_ORDERED; 1592 break; 1593 case MSG_ACA_TASK: 1594 attr = VIRTIO_SCSI_S_ACA; 1595 break; 1596 default: /* MSG_SIMPLE_Q_TAG */ 1597 attr = VIRTIO_SCSI_S_SIMPLE; 1598 break; 1599 } 1600 1601 vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun); 1602 cmd_req->tag = (uintptr_t) csio; 1603 cmd_req->task_attr = attr; 1604 1605 memcpy(cmd_req->cdb, 1606 csio->ccb_h.flags & CAM_CDB_POINTER ? 1607 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes, 1608 csio->cdb_len); 1609 } 1610 1611 static void 1612 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype, 1613 uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req) 1614 { 1615 1616 vtscsi_set_request_lun(ccbh, tmf_req->lun); 1617 1618 tmf_req->type = VIRTIO_SCSI_T_TMF; 1619 tmf_req->subtype = subtype; 1620 tmf_req->tag = tag; 1621 } 1622 1623 static void 1624 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason) 1625 { 1626 int frozen; 1627 1628 frozen = sc->vtscsi_frozen; 1629 1630 if (reason & VTSCSI_REQUEST && 1631 (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0) 1632 sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS; 1633 1634 if (reason & VTSCSI_REQUEST_VQ && 1635 (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0) 1636 sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL; 1637 1638 /* Freeze the SIMQ if transitioned to frozen. */ 1639 if (frozen == 0 && sc->vtscsi_frozen != 0) { 1640 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n"); 1641 xpt_freeze_simq(sc->vtscsi_sim, 1); 1642 } 1643 } 1644 1645 static int 1646 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason) 1647 { 1648 int thawed; 1649 1650 if (sc->vtscsi_frozen == 0 || reason == 0) 1651 return (0); 1652 1653 if (reason & VTSCSI_REQUEST && 1654 sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) 1655 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS; 1656 1657 if (reason & VTSCSI_REQUEST_VQ && 1658 sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) 1659 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL; 1660 1661 thawed = sc->vtscsi_frozen == 0; 1662 if (thawed != 0) 1663 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n"); 1664 1665 return (thawed); 1666 } 1667 1668 static void 1669 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code, 1670 target_id_t target_id, lun_id_t lun_id) 1671 { 1672 struct cam_path *path; 1673 1674 /* Use the wildcard path from our softc for bus announcements. */ 1675 if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) { 1676 xpt_async(ac_code, sc->vtscsi_path, NULL); 1677 return; 1678 } 1679 1680 if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim), 1681 target_id, lun_id) != CAM_REQ_CMP) { 1682 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n"); 1683 return; 1684 } 1685 1686 xpt_async(ac_code, path, NULL); 1687 xpt_free_path(path); 1688 } 1689 1690 static void 1691 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id, 1692 lun_id_t lun_id) 1693 { 1694 union ccb *ccb; 1695 cam_status status; 1696 1697 ccb = xpt_alloc_ccb_nowait(); 1698 if (ccb == NULL) { 1699 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n"); 1700 return; 1701 } 1702 1703 status = xpt_create_path(&ccb->ccb_h.path, NULL, 1704 cam_sim_path(sc->vtscsi_sim), target_id, lun_id); 1705 if (status != CAM_REQ_CMP) { 1706 xpt_free_ccb(ccb); 1707 return; 1708 } 1709 1710 xpt_rescan(ccb); 1711 } 1712 1713 static void 1714 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc) 1715 { 1716 1717 vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1718 } 1719 1720 static void 1721 vtscsi_transport_reset_event(struct vtscsi_softc *sc, 1722 struct virtio_scsi_event *event) 1723 { 1724 target_id_t target_id; 1725 lun_id_t lun_id; 1726 1727 vtscsi_get_request_lun(event->lun, &target_id, &lun_id); 1728 1729 switch (event->reason) { 1730 case VIRTIO_SCSI_EVT_RESET_RESCAN: 1731 case VIRTIO_SCSI_EVT_RESET_REMOVED: 1732 vtscsi_execute_rescan(sc, target_id, lun_id); 1733 break; 1734 default: 1735 device_printf(sc->vtscsi_dev, 1736 "unhandled transport event reason: %d\n", event->reason); 1737 break; 1738 } 1739 } 1740 1741 static void 1742 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event) 1743 { 1744 int error; 1745 1746 if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) { 1747 switch (event->event) { 1748 case VIRTIO_SCSI_T_TRANSPORT_RESET: 1749 vtscsi_transport_reset_event(sc, event); 1750 break; 1751 default: 1752 device_printf(sc->vtscsi_dev, 1753 "unhandled event: %d\n", event->event); 1754 break; 1755 } 1756 } else 1757 vtscsi_execute_rescan_bus(sc); 1758 1759 /* 1760 * This should always be successful since the buffer 1761 * was just dequeued. 1762 */ 1763 error = vtscsi_enqueue_event_buf(sc, event); 1764 KASSERT(error == 0, 1765 ("cannot requeue event buffer: %d", error)); 1766 } 1767 1768 static int 1769 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc, 1770 struct virtio_scsi_event *event) 1771 { 1772 struct sglist *sg; 1773 struct virtqueue *vq; 1774 int size, error; 1775 1776 sg = sc->vtscsi_sglist; 1777 vq = sc->vtscsi_event_vq; 1778 size = sc->vtscsi_event_buf_size; 1779 1780 bzero(event, size); 1781 1782 sglist_reset(sg); 1783 error = sglist_append(sg, event, size); 1784 if (error) 1785 return (error); 1786 1787 error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg); 1788 if (error) 1789 return (error); 1790 1791 virtqueue_notify(vq); 1792 1793 return (0); 1794 } 1795 1796 static int 1797 vtscsi_init_event_vq(struct vtscsi_softc *sc) 1798 { 1799 struct virtio_scsi_event *event; 1800 int i, size, error; 1801 1802 /* 1803 * The first release of QEMU with VirtIO SCSI support would crash 1804 * when attempting to notify the event virtqueue. This was fixed 1805 * when hotplug support was added. 1806 */ 1807 if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) 1808 size = sc->vtscsi_event_buf_size; 1809 else 1810 size = 0; 1811 1812 if (size < sizeof(struct virtio_scsi_event)) 1813 return (0); 1814 1815 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { 1816 event = &sc->vtscsi_event_bufs[i]; 1817 1818 error = vtscsi_enqueue_event_buf(sc, event); 1819 if (error) 1820 break; 1821 } 1822 1823 /* 1824 * Even just one buffer is enough. Missed events are 1825 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag. 1826 */ 1827 if (i > 0) 1828 error = 0; 1829 1830 return (error); 1831 } 1832 1833 static void 1834 vtscsi_reinit_event_vq(struct vtscsi_softc *sc) 1835 { 1836 struct virtio_scsi_event *event; 1837 int i, error; 1838 1839 if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 || 1840 sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event)) 1841 return; 1842 1843 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { 1844 event = &sc->vtscsi_event_bufs[i]; 1845 1846 error = vtscsi_enqueue_event_buf(sc, event); 1847 if (error) 1848 break; 1849 } 1850 1851 KASSERT(i > 0, ("cannot reinit event vq: %d", error)); 1852 } 1853 1854 static void 1855 vtscsi_drain_event_vq(struct vtscsi_softc *sc) 1856 { 1857 struct virtqueue *vq; 1858 int last; 1859 1860 vq = sc->vtscsi_event_vq; 1861 last = 0; 1862 1863 while (virtqueue_drain(vq, &last) != NULL) 1864 ; 1865 1866 KASSERT(virtqueue_empty(vq), ("eventvq not empty")); 1867 } 1868 1869 static void 1870 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc) 1871 { 1872 1873 VTSCSI_LOCK_OWNED(sc); 1874 1875 if (sc->vtscsi_request_vq != NULL) 1876 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 1877 if (sc->vtscsi_control_vq != NULL) 1878 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 1879 } 1880 1881 static void 1882 vtscsi_complete_vqs(struct vtscsi_softc *sc) 1883 { 1884 1885 VTSCSI_LOCK(sc); 1886 vtscsi_complete_vqs_locked(sc); 1887 VTSCSI_UNLOCK(sc); 1888 } 1889 1890 static void 1891 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 1892 { 1893 union ccb *ccb; 1894 int detach; 1895 1896 ccb = req->vsr_ccb; 1897 1898 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb); 1899 1900 /* 1901 * The callout must be drained when detaching since the request is 1902 * about to be freed. The VTSCSI_MTX must not be held for this in 1903 * case the callout is pending because there is a deadlock potential. 1904 * Otherwise, the virtqueue is being drained because of a bus reset 1905 * so we only need to attempt to stop the callouts. 1906 */ 1907 detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0; 1908 if (detach != 0) 1909 VTSCSI_LOCK_NOTOWNED(sc); 1910 else 1911 VTSCSI_LOCK_OWNED(sc); 1912 1913 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) { 1914 if (detach != 0) 1915 callout_drain(&req->vsr_callout); 1916 else 1917 callout_stop(&req->vsr_callout); 1918 } 1919 1920 if (ccb != NULL) { 1921 if (detach != 0) { 1922 VTSCSI_LOCK(sc); 1923 ccb->ccb_h.status = CAM_NO_HBA; 1924 } else 1925 ccb->ccb_h.status = CAM_REQUEUE_REQ; 1926 xpt_done(ccb); 1927 if (detach != 0) 1928 VTSCSI_UNLOCK(sc); 1929 } 1930 1931 vtscsi_enqueue_request(sc, req); 1932 } 1933 1934 static void 1935 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq) 1936 { 1937 struct vtscsi_request *req; 1938 int last; 1939 1940 last = 0; 1941 1942 vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq); 1943 1944 while ((req = virtqueue_drain(vq, &last)) != NULL) 1945 vtscsi_cancel_request(sc, req); 1946 1947 KASSERT(virtqueue_empty(vq), ("virtqueue not empty")); 1948 } 1949 1950 static void 1951 vtscsi_drain_vqs(struct vtscsi_softc *sc) 1952 { 1953 1954 if (sc->vtscsi_control_vq != NULL) 1955 vtscsi_drain_vq(sc, sc->vtscsi_control_vq); 1956 if (sc->vtscsi_request_vq != NULL) 1957 vtscsi_drain_vq(sc, sc->vtscsi_request_vq); 1958 if (sc->vtscsi_event_vq != NULL) 1959 vtscsi_drain_event_vq(sc); 1960 } 1961 1962 static void 1963 vtscsi_stop(struct vtscsi_softc *sc) 1964 { 1965 1966 vtscsi_disable_vqs_intr(sc); 1967 virtio_stop(sc->vtscsi_dev); 1968 } 1969 1970 static int 1971 vtscsi_reset_bus(struct vtscsi_softc *sc) 1972 { 1973 int error; 1974 1975 VTSCSI_LOCK_OWNED(sc); 1976 1977 if (vtscsi_bus_reset_disable != 0) { 1978 device_printf(sc->vtscsi_dev, "bus reset disabled\n"); 1979 return (0); 1980 } 1981 1982 sc->vtscsi_flags |= VTSCSI_FLAG_RESET; 1983 1984 /* 1985 * vtscsi_stop() will cause the in-flight requests to be canceled. 1986 * Those requests are then completed here so CAM will retry them 1987 * after the reset is complete. 1988 */ 1989 vtscsi_stop(sc); 1990 vtscsi_complete_vqs_locked(sc); 1991 1992 /* Rid the virtqueues of any remaining requests. */ 1993 vtscsi_drain_vqs(sc); 1994 1995 /* 1996 * Any resource shortage that froze the SIMQ cannot persist across 1997 * a bus reset so ensure it gets thawed here. 1998 */ 1999 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) 2000 xpt_release_simq(sc->vtscsi_sim, 0); 2001 2002 error = vtscsi_reinit(sc); 2003 if (error) { 2004 device_printf(sc->vtscsi_dev, 2005 "reinitialization failed, stopping device...\n"); 2006 vtscsi_stop(sc); 2007 } else 2008 vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 2009 CAM_LUN_WILDCARD); 2010 2011 sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET; 2012 2013 return (error); 2014 } 2015 2016 static void 2017 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 2018 { 2019 2020 #ifdef INVARIANTS 2021 int req_nsegs, resp_nsegs; 2022 2023 req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq)); 2024 resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp)); 2025 2026 KASSERT(req_nsegs == 1, ("request crossed page boundary")); 2027 KASSERT(resp_nsegs == 1, ("response crossed page boundary")); 2028 #endif 2029 2030 req->vsr_softc = sc; 2031 callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0); 2032 } 2033 2034 static int 2035 vtscsi_alloc_requests(struct vtscsi_softc *sc) 2036 { 2037 struct vtscsi_request *req; 2038 int i, nreqs; 2039 2040 /* 2041 * Commands destined for either the request or control queues come 2042 * from the same SIM queue. Use the size of the request virtqueue 2043 * as it (should) be much more frequently used. Some additional 2044 * requests are allocated for internal (TMF) use. 2045 */ 2046 nreqs = virtqueue_size(sc->vtscsi_request_vq); 2047 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) 2048 nreqs /= VTSCSI_MIN_SEGMENTS; 2049 nreqs += VTSCSI_RESERVED_REQUESTS; 2050 2051 for (i = 0; i < nreqs; i++) { 2052 req = malloc(sizeof(struct vtscsi_request), M_DEVBUF, 2053 M_NOWAIT); 2054 if (req == NULL) 2055 return (ENOMEM); 2056 2057 vtscsi_init_request(sc, req); 2058 2059 sc->vtscsi_nrequests++; 2060 vtscsi_enqueue_request(sc, req); 2061 } 2062 2063 return (0); 2064 } 2065 2066 static void 2067 vtscsi_free_requests(struct vtscsi_softc *sc) 2068 { 2069 struct vtscsi_request *req; 2070 2071 while ((req = vtscsi_dequeue_request(sc)) != NULL) { 2072 KASSERT(callout_active(&req->vsr_callout) == 0, 2073 ("request callout still active")); 2074 2075 sc->vtscsi_nrequests--; 2076 free(req, M_DEVBUF); 2077 } 2078 2079 KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d", 2080 sc->vtscsi_nrequests)); 2081 } 2082 2083 static void 2084 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 2085 { 2086 2087 KASSERT(req->vsr_softc == sc, 2088 ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc)); 2089 2090 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); 2091 2092 /* A request is available so the SIMQ could be released. */ 2093 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0) 2094 xpt_release_simq(sc->vtscsi_sim, 1); 2095 2096 req->vsr_ccb = NULL; 2097 req->vsr_complete = NULL; 2098 req->vsr_ptr0 = NULL; 2099 req->vsr_state = VTSCSI_REQ_STATE_FREE; 2100 req->vsr_flags = 0; 2101 2102 bzero(&req->vsr_ureq, sizeof(req->vsr_ureq)); 2103 bzero(&req->vsr_uresp, sizeof(req->vsr_uresp)); 2104 2105 /* 2106 * We insert at the tail of the queue in order to make it 2107 * very unlikely a request will be reused if we race with 2108 * stopping its callout handler. 2109 */ 2110 TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link); 2111 } 2112 2113 static struct vtscsi_request * 2114 vtscsi_dequeue_request(struct vtscsi_softc *sc) 2115 { 2116 struct vtscsi_request *req; 2117 2118 req = TAILQ_FIRST(&sc->vtscsi_req_free); 2119 if (req != NULL) { 2120 req->vsr_state = VTSCSI_REQ_STATE_INUSE; 2121 TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link); 2122 } else 2123 sc->vtscsi_stats.dequeue_no_requests++; 2124 2125 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); 2126 2127 return (req); 2128 } 2129 2130 static void 2131 vtscsi_complete_request(struct vtscsi_request *req) 2132 { 2133 2134 if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED) 2135 req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE; 2136 2137 if (req->vsr_complete != NULL) 2138 req->vsr_complete(req->vsr_softc, req); 2139 } 2140 2141 static void 2142 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq) 2143 { 2144 struct vtscsi_request *req; 2145 2146 VTSCSI_LOCK_OWNED(sc); 2147 2148 while ((req = virtqueue_dequeue(vq, NULL)) != NULL) 2149 vtscsi_complete_request(req); 2150 } 2151 2152 static void 2153 vtscsi_control_vq_intr(void *xsc) 2154 { 2155 struct vtscsi_softc *sc; 2156 struct virtqueue *vq; 2157 2158 sc = xsc; 2159 vq = sc->vtscsi_control_vq; 2160 2161 again: 2162 VTSCSI_LOCK(sc); 2163 2164 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 2165 2166 if (virtqueue_enable_intr(vq) != 0) { 2167 virtqueue_disable_intr(vq); 2168 VTSCSI_UNLOCK(sc); 2169 goto again; 2170 } 2171 2172 VTSCSI_UNLOCK(sc); 2173 } 2174 2175 static void 2176 vtscsi_event_vq_intr(void *xsc) 2177 { 2178 struct vtscsi_softc *sc; 2179 struct virtqueue *vq; 2180 struct virtio_scsi_event *event; 2181 2182 sc = xsc; 2183 vq = sc->vtscsi_event_vq; 2184 2185 again: 2186 VTSCSI_LOCK(sc); 2187 2188 while ((event = virtqueue_dequeue(vq, NULL)) != NULL) 2189 vtscsi_handle_event(sc, event); 2190 2191 if (virtqueue_enable_intr(vq) != 0) { 2192 virtqueue_disable_intr(vq); 2193 VTSCSI_UNLOCK(sc); 2194 goto again; 2195 } 2196 2197 VTSCSI_UNLOCK(sc); 2198 } 2199 2200 static void 2201 vtscsi_request_vq_intr(void *xsc) 2202 { 2203 struct vtscsi_softc *sc; 2204 struct virtqueue *vq; 2205 2206 sc = xsc; 2207 vq = sc->vtscsi_request_vq; 2208 2209 again: 2210 VTSCSI_LOCK(sc); 2211 2212 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 2213 2214 if (virtqueue_enable_intr(vq) != 0) { 2215 virtqueue_disable_intr(vq); 2216 VTSCSI_UNLOCK(sc); 2217 goto again; 2218 } 2219 2220 VTSCSI_UNLOCK(sc); 2221 } 2222 2223 static void 2224 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc) 2225 { 2226 2227 virtqueue_disable_intr(sc->vtscsi_control_vq); 2228 virtqueue_disable_intr(sc->vtscsi_event_vq); 2229 virtqueue_disable_intr(sc->vtscsi_request_vq); 2230 } 2231 2232 static void 2233 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc) 2234 { 2235 2236 virtqueue_enable_intr(sc->vtscsi_control_vq); 2237 virtqueue_enable_intr(sc->vtscsi_event_vq); 2238 virtqueue_enable_intr(sc->vtscsi_request_vq); 2239 } 2240 2241 static void 2242 vtscsi_get_tunables(struct vtscsi_softc *sc) 2243 { 2244 char tmpstr[64]; 2245 2246 TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug); 2247 2248 snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level", 2249 device_get_unit(sc->vtscsi_dev)); 2250 TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug); 2251 } 2252 2253 static void 2254 vtscsi_add_sysctl(struct vtscsi_softc *sc) 2255 { 2256 device_t dev; 2257 struct vtscsi_statistics *stats; 2258 struct sysctl_ctx_list *ctx; 2259 struct sysctl_oid *tree; 2260 struct sysctl_oid_list *child; 2261 2262 dev = sc->vtscsi_dev; 2263 stats = &sc->vtscsi_stats; 2264 ctx = device_get_sysctl_ctx(dev); 2265 tree = device_get_sysctl_tree(dev); 2266 child = SYSCTL_CHILDREN(tree); 2267 2268 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level", 2269 CTLFLAG_RW, &sc->vtscsi_debug, 0, 2270 "Debug level"); 2271 2272 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts", 2273 CTLFLAG_RD, &stats->scsi_cmd_timeouts, 2274 "SCSI command timeouts"); 2275 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests", 2276 CTLFLAG_RD, &stats->dequeue_no_requests, 2277 "No available requests to dequeue"); 2278 } 2279 2280 static void 2281 vtscsi_printf_req(struct vtscsi_request *req, const char *func, 2282 const char *fmt, ...) 2283 { 2284 struct vtscsi_softc *sc; 2285 union ccb *ccb; 2286 struct sbuf sb; 2287 va_list ap; 2288 char str[192]; 2289 char path_str[64]; 2290 2291 if (req == NULL) 2292 return; 2293 2294 sc = req->vsr_softc; 2295 ccb = req->vsr_ccb; 2296 2297 va_start(ap, fmt); 2298 sbuf_new(&sb, str, sizeof(str), 0); 2299 2300 if (ccb == NULL) { 2301 sbuf_printf(&sb, "(noperiph:%s%d:%u): ", 2302 cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim), 2303 cam_sim_bus(sc->vtscsi_sim)); 2304 } else { 2305 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str)); 2306 sbuf_cat(&sb, path_str); 2307 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 2308 scsi_command_string(&ccb->csio, &sb); 2309 sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len); 2310 } 2311 } 2312 2313 sbuf_vprintf(&sb, fmt, ap); 2314 va_end(ap); 2315 2316 sbuf_finish(&sb); 2317 printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func, 2318 sbuf_data(&sb)); 2319 } 2320