1 /*- 2 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* Driver for VirtIO SCSI devices. */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/kthread.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/sglist.h> 39 #include <sys/sysctl.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/callout.h> 43 #include <sys/queue.h> 44 #include <sys/sbuf.h> 45 46 #include <machine/stdarg.h> 47 48 #include <machine/bus.h> 49 #include <machine/resource.h> 50 #include <sys/bus.h> 51 #include <sys/rman.h> 52 53 #include <cam/cam.h> 54 #include <cam/cam_ccb.h> 55 #include <cam/cam_sim.h> 56 #include <cam/cam_periph.h> 57 #include <cam/cam_xpt_sim.h> 58 #include <cam/cam_debug.h> 59 #include <cam/scsi/scsi_all.h> 60 #include <cam/scsi/scsi_message.h> 61 62 #include <dev/virtio/virtio.h> 63 #include <dev/virtio/virtqueue.h> 64 #include <dev/virtio/scsi/virtio_scsi.h> 65 #include <dev/virtio/scsi/virtio_scsivar.h> 66 67 #include "virtio_if.h" 68 69 static int vtscsi_modevent(module_t, int, void *); 70 71 static int vtscsi_probe(device_t); 72 static int vtscsi_attach(device_t); 73 static int vtscsi_detach(device_t); 74 static int vtscsi_suspend(device_t); 75 static int vtscsi_resume(device_t); 76 77 static void vtscsi_negotiate_features(struct vtscsi_softc *); 78 static int vtscsi_maximum_segments(struct vtscsi_softc *, int); 79 static int vtscsi_alloc_virtqueues(struct vtscsi_softc *); 80 static void vtscsi_write_device_config(struct vtscsi_softc *); 81 static int vtscsi_reinit(struct vtscsi_softc *); 82 83 static int vtscsi_alloc_cam(struct vtscsi_softc *); 84 static int vtscsi_register_cam(struct vtscsi_softc *); 85 static void vtscsi_free_cam(struct vtscsi_softc *); 86 static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *); 87 static int vtscsi_register_async(struct vtscsi_softc *); 88 static void vtscsi_deregister_async(struct vtscsi_softc *); 89 static void vtscsi_cam_action(struct cam_sim *, union ccb *); 90 static void vtscsi_cam_poll(struct cam_sim *); 91 92 static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *, 93 union ccb *); 94 static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *, 95 union ccb *); 96 static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *); 97 static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *); 98 static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *); 99 static void vtscsi_cam_path_inquiry(struct vtscsi_softc *, 100 struct cam_sim *, union ccb *); 101 102 static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *, 103 struct sglist *, struct ccb_scsiio *); 104 static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *, 105 struct vtscsi_request *, int *, int *); 106 static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *, 107 struct vtscsi_request *); 108 static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *); 109 static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *, 110 struct vtscsi_request *); 111 static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *, 112 struct vtscsi_request *); 113 static void vtscsi_timedout_scsi_cmd(void *); 114 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *); 115 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *, 116 struct ccb_scsiio *, struct virtio_scsi_cmd_resp *); 117 static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *, 118 struct vtscsi_request *); 119 120 static void vtscsi_poll_ctrl_req(struct vtscsi_softc *, 121 struct vtscsi_request *); 122 static int vtscsi_execute_ctrl_req(struct vtscsi_softc *, 123 struct vtscsi_request *, struct sglist *, int, int, int); 124 static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c, 125 struct vtscsi_request *); 126 static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *, 127 struct vtscsi_request *); 128 static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *, 129 struct vtscsi_request *); 130 131 static void vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *); 132 static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []); 133 static void vtscsi_init_scsi_cmd_req(struct ccb_scsiio *, 134 struct virtio_scsi_cmd_req *); 135 static void vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t, 136 uintptr_t, struct virtio_scsi_ctrl_tmf_req *); 137 138 static void vtscsi_freeze_simq(struct vtscsi_softc *, int); 139 static int vtscsi_thaw_simq(struct vtscsi_softc *, int); 140 141 static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t, 142 lun_id_t); 143 static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t, 144 lun_id_t); 145 static void vtscsi_execute_rescan_bus(struct vtscsi_softc *); 146 147 static void vtscsi_handle_event(struct vtscsi_softc *, 148 struct virtio_scsi_event *); 149 static int vtscsi_enqueue_event_buf(struct vtscsi_softc *, 150 struct virtio_scsi_event *); 151 static int vtscsi_init_event_vq(struct vtscsi_softc *); 152 static void vtscsi_reinit_event_vq(struct vtscsi_softc *); 153 static void vtscsi_drain_event_vq(struct vtscsi_softc *); 154 155 static void vtscsi_complete_vqs_locked(struct vtscsi_softc *); 156 static void vtscsi_complete_vqs(struct vtscsi_softc *); 157 static void vtscsi_drain_vqs(struct vtscsi_softc *); 158 static void vtscsi_cancel_request(struct vtscsi_softc *, 159 struct vtscsi_request *); 160 static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *); 161 static void vtscsi_stop(struct vtscsi_softc *); 162 static int vtscsi_reset_bus(struct vtscsi_softc *); 163 164 static void vtscsi_init_request(struct vtscsi_softc *, 165 struct vtscsi_request *); 166 static int vtscsi_alloc_requests(struct vtscsi_softc *); 167 static void vtscsi_free_requests(struct vtscsi_softc *); 168 static void vtscsi_enqueue_request(struct vtscsi_softc *, 169 struct vtscsi_request *); 170 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *); 171 172 static void vtscsi_complete_request(struct vtscsi_request *); 173 static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *); 174 175 static void vtscsi_control_vq_intr(void *); 176 static void vtscsi_event_vq_intr(void *); 177 static void vtscsi_request_vq_intr(void *); 178 static void vtscsi_disable_vqs_intr(struct vtscsi_softc *); 179 static void vtscsi_enable_vqs_intr(struct vtscsi_softc *); 180 181 static void vtscsi_get_tunables(struct vtscsi_softc *); 182 static void vtscsi_add_sysctl(struct vtscsi_softc *); 183 184 static void vtscsi_printf_req(struct vtscsi_request *, const char *, 185 const char *, ...); 186 187 /* Global tunables. */ 188 /* 189 * The current QEMU VirtIO SCSI implementation does not cancel in-flight 190 * IO during virtio_stop(). So in-flight requests still complete after the 191 * device reset. We would have to wait for all the in-flight IO to complete, 192 * which defeats the typical purpose of a bus reset. We could simulate the 193 * bus reset with either I_T_NEXUS_RESET of all the targets, or with 194 * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the 195 * control virtqueue). But this isn't very useful if things really go off 196 * the rails, so default to disabled for now. 197 */ 198 static int vtscsi_bus_reset_disable = 1; 199 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable); 200 201 static struct virtio_feature_desc vtscsi_feature_desc[] = { 202 { VIRTIO_SCSI_F_INOUT, "InOut" }, 203 { VIRTIO_SCSI_F_HOTPLUG, "Hotplug" }, 204 205 { 0, NULL } 206 }; 207 208 static device_method_t vtscsi_methods[] = { 209 /* Device methods. */ 210 DEVMETHOD(device_probe, vtscsi_probe), 211 DEVMETHOD(device_attach, vtscsi_attach), 212 DEVMETHOD(device_detach, vtscsi_detach), 213 DEVMETHOD(device_suspend, vtscsi_suspend), 214 DEVMETHOD(device_resume, vtscsi_resume), 215 216 DEVMETHOD_END 217 }; 218 219 static driver_t vtscsi_driver = { 220 "vtscsi", 221 vtscsi_methods, 222 sizeof(struct vtscsi_softc) 223 }; 224 static devclass_t vtscsi_devclass; 225 226 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass, 227 vtscsi_modevent, 0); 228 MODULE_VERSION(virtio_scsi, 1); 229 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1); 230 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1); 231 232 static int 233 vtscsi_modevent(module_t mod, int type, void *unused) 234 { 235 int error; 236 237 switch (type) { 238 case MOD_LOAD: 239 case MOD_QUIESCE: 240 case MOD_UNLOAD: 241 case MOD_SHUTDOWN: 242 error = 0; 243 break; 244 default: 245 error = EOPNOTSUPP; 246 break; 247 } 248 249 return (error); 250 } 251 252 static int 253 vtscsi_probe(device_t dev) 254 { 255 256 if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI) 257 return (ENXIO); 258 259 device_set_desc(dev, "VirtIO SCSI Adapter"); 260 261 return (BUS_PROBE_DEFAULT); 262 } 263 264 static int 265 vtscsi_attach(device_t dev) 266 { 267 struct vtscsi_softc *sc; 268 struct virtio_scsi_config scsicfg; 269 int error; 270 271 sc = device_get_softc(dev); 272 sc->vtscsi_dev = dev; 273 274 VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev)); 275 TAILQ_INIT(&sc->vtscsi_req_free); 276 277 vtscsi_get_tunables(sc); 278 vtscsi_add_sysctl(sc); 279 280 virtio_set_feature_desc(dev, vtscsi_feature_desc); 281 vtscsi_negotiate_features(sc); 282 283 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) 284 sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT; 285 if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT)) 286 sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL; 287 if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG)) 288 sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG; 289 290 virtio_read_device_config(dev, 0, &scsicfg, 291 sizeof(struct virtio_scsi_config)); 292 293 sc->vtscsi_max_channel = scsicfg.max_channel; 294 sc->vtscsi_max_target = scsicfg.max_target; 295 sc->vtscsi_max_lun = scsicfg.max_lun; 296 sc->vtscsi_event_buf_size = scsicfg.event_info_size; 297 298 vtscsi_write_device_config(sc); 299 300 sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max); 301 sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT); 302 if (sc->vtscsi_sglist == NULL) { 303 error = ENOMEM; 304 device_printf(dev, "cannot allocate sglist\n"); 305 goto fail; 306 } 307 308 error = vtscsi_alloc_virtqueues(sc); 309 if (error) { 310 device_printf(dev, "cannot allocate virtqueues\n"); 311 goto fail; 312 } 313 314 error = vtscsi_init_event_vq(sc); 315 if (error) { 316 device_printf(dev, "cannot populate the eventvq\n"); 317 goto fail; 318 } 319 320 error = vtscsi_alloc_requests(sc); 321 if (error) { 322 device_printf(dev, "cannot allocate requests\n"); 323 goto fail; 324 } 325 326 error = vtscsi_alloc_cam(sc); 327 if (error) { 328 device_printf(dev, "cannot allocate CAM structures\n"); 329 goto fail; 330 } 331 332 error = virtio_setup_intr(dev, INTR_TYPE_CAM); 333 if (error) { 334 device_printf(dev, "cannot setup virtqueue interrupts\n"); 335 goto fail; 336 } 337 338 vtscsi_enable_vqs_intr(sc); 339 340 /* 341 * Register with CAM after interrupts are enabled so we will get 342 * notified of the probe responses. 343 */ 344 error = vtscsi_register_cam(sc); 345 if (error) { 346 device_printf(dev, "cannot register with CAM\n"); 347 goto fail; 348 } 349 350 fail: 351 if (error) 352 vtscsi_detach(dev); 353 354 return (error); 355 } 356 357 static int 358 vtscsi_detach(device_t dev) 359 { 360 struct vtscsi_softc *sc; 361 362 sc = device_get_softc(dev); 363 364 VTSCSI_LOCK(sc); 365 sc->vtscsi_flags |= VTSCSI_FLAG_DETACH; 366 if (device_is_attached(dev)) 367 vtscsi_stop(sc); 368 VTSCSI_UNLOCK(sc); 369 370 vtscsi_complete_vqs(sc); 371 vtscsi_drain_vqs(sc); 372 373 vtscsi_free_cam(sc); 374 vtscsi_free_requests(sc); 375 376 if (sc->vtscsi_sglist != NULL) { 377 sglist_free(sc->vtscsi_sglist); 378 sc->vtscsi_sglist = NULL; 379 } 380 381 VTSCSI_LOCK_DESTROY(sc); 382 383 return (0); 384 } 385 386 static int 387 vtscsi_suspend(device_t dev) 388 { 389 390 return (0); 391 } 392 393 static int 394 vtscsi_resume(device_t dev) 395 { 396 397 return (0); 398 } 399 400 static void 401 vtscsi_negotiate_features(struct vtscsi_softc *sc) 402 { 403 device_t dev; 404 uint64_t features; 405 406 dev = sc->vtscsi_dev; 407 features = virtio_negotiate_features(dev, VTSCSI_FEATURES); 408 sc->vtscsi_features = features; 409 } 410 411 static int 412 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max) 413 { 414 int nsegs; 415 416 nsegs = VTSCSI_MIN_SEGMENTS; 417 418 if (seg_max > 0) { 419 nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1); 420 if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) 421 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); 422 } else 423 nsegs += 1; 424 425 return (nsegs); 426 } 427 428 static int 429 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc) 430 { 431 device_t dev; 432 struct vq_alloc_info vq_info[3]; 433 int nvqs; 434 435 dev = sc->vtscsi_dev; 436 nvqs = 3; 437 438 VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc, 439 &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev)); 440 441 VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc, 442 &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev)); 443 444 VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs, 445 vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq, 446 "%s request", device_get_nameunit(dev)); 447 448 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); 449 } 450 451 static void 452 vtscsi_write_device_config(struct vtscsi_softc *sc) 453 { 454 455 virtio_write_dev_config_4(sc->vtscsi_dev, 456 offsetof(struct virtio_scsi_config, sense_size), 457 VIRTIO_SCSI_SENSE_SIZE); 458 459 /* 460 * This is the size in the virtio_scsi_cmd_req structure. Note 461 * this value (32) is larger than the maximum CAM CDB size (16). 462 */ 463 virtio_write_dev_config_4(sc->vtscsi_dev, 464 offsetof(struct virtio_scsi_config, cdb_size), 465 VIRTIO_SCSI_CDB_SIZE); 466 } 467 468 static int 469 vtscsi_reinit(struct vtscsi_softc *sc) 470 { 471 device_t dev; 472 int error; 473 474 dev = sc->vtscsi_dev; 475 476 error = virtio_reinit(dev, sc->vtscsi_features); 477 if (error == 0) { 478 vtscsi_write_device_config(sc); 479 vtscsi_reinit_event_vq(sc); 480 virtio_reinit_complete(dev); 481 482 vtscsi_enable_vqs_intr(sc); 483 } 484 485 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error); 486 487 return (error); 488 } 489 490 static int 491 vtscsi_alloc_cam(struct vtscsi_softc *sc) 492 { 493 device_t dev; 494 struct cam_devq *devq; 495 int openings; 496 497 dev = sc->vtscsi_dev; 498 openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS; 499 500 devq = cam_simq_alloc(openings); 501 if (devq == NULL) { 502 device_printf(dev, "cannot allocate SIM queue\n"); 503 return (ENOMEM); 504 } 505 506 sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll, 507 "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1, 508 openings, devq); 509 if (sc->vtscsi_sim == NULL) { 510 cam_simq_free(devq); 511 device_printf(dev, "cannot allocate SIM\n"); 512 return (ENOMEM); 513 } 514 515 return (0); 516 } 517 518 static int 519 vtscsi_register_cam(struct vtscsi_softc *sc) 520 { 521 device_t dev; 522 int registered, error; 523 524 dev = sc->vtscsi_dev; 525 registered = 0; 526 527 VTSCSI_LOCK(sc); 528 529 if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) { 530 error = ENOMEM; 531 device_printf(dev, "cannot register XPT bus\n"); 532 goto fail; 533 } 534 535 registered = 1; 536 537 if (xpt_create_path(&sc->vtscsi_path, NULL, 538 cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD, 539 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 540 error = ENOMEM; 541 device_printf(dev, "cannot create bus path\n"); 542 goto fail; 543 } 544 545 if (vtscsi_register_async(sc) != CAM_REQ_CMP) { 546 error = EIO; 547 device_printf(dev, "cannot register async callback\n"); 548 goto fail; 549 } 550 551 VTSCSI_UNLOCK(sc); 552 553 return (0); 554 555 fail: 556 if (sc->vtscsi_path != NULL) { 557 xpt_free_path(sc->vtscsi_path); 558 sc->vtscsi_path = NULL; 559 } 560 561 if (registered != 0) 562 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); 563 564 VTSCSI_UNLOCK(sc); 565 566 return (error); 567 } 568 569 static void 570 vtscsi_free_cam(struct vtscsi_softc *sc) 571 { 572 573 VTSCSI_LOCK(sc); 574 575 if (sc->vtscsi_path != NULL) { 576 vtscsi_deregister_async(sc); 577 578 xpt_free_path(sc->vtscsi_path); 579 sc->vtscsi_path = NULL; 580 581 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); 582 } 583 584 if (sc->vtscsi_sim != NULL) { 585 cam_sim_free(sc->vtscsi_sim, 1); 586 sc->vtscsi_sim = NULL; 587 } 588 589 VTSCSI_UNLOCK(sc); 590 } 591 592 static void 593 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg) 594 { 595 struct cam_sim *sim; 596 struct vtscsi_softc *sc; 597 598 sim = cb_arg; 599 sc = cam_sim_softc(sim); 600 601 vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code); 602 603 /* 604 * TODO Once QEMU supports event reporting, we should 605 * (un)subscribe to events here. 606 */ 607 switch (code) { 608 case AC_FOUND_DEVICE: 609 break; 610 case AC_LOST_DEVICE: 611 break; 612 } 613 } 614 615 static int 616 vtscsi_register_async(struct vtscsi_softc *sc) 617 { 618 struct ccb_setasync csa; 619 620 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); 621 csa.ccb_h.func_code = XPT_SASYNC_CB; 622 csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE; 623 csa.callback = vtscsi_cam_async; 624 csa.callback_arg = sc->vtscsi_sim; 625 626 xpt_action((union ccb *) &csa); 627 628 return (csa.ccb_h.status); 629 } 630 631 static void 632 vtscsi_deregister_async(struct vtscsi_softc *sc) 633 { 634 struct ccb_setasync csa; 635 636 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); 637 csa.ccb_h.func_code = XPT_SASYNC_CB; 638 csa.event_enable = 0; 639 csa.callback = vtscsi_cam_async; 640 csa.callback_arg = sc->vtscsi_sim; 641 642 xpt_action((union ccb *) &csa); 643 } 644 645 static void 646 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb) 647 { 648 struct vtscsi_softc *sc; 649 struct ccb_hdr *ccbh; 650 651 sc = cam_sim_softc(sim); 652 ccbh = &ccb->ccb_h; 653 654 VTSCSI_LOCK_OWNED(sc); 655 656 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) { 657 /* 658 * The VTSCSI_MTX is briefly dropped between setting 659 * VTSCSI_FLAG_DETACH and deregistering with CAM, so 660 * drop any CCBs that come in during that window. 661 */ 662 ccbh->status = CAM_NO_HBA; 663 xpt_done(ccb); 664 return; 665 } 666 667 switch (ccbh->func_code) { 668 case XPT_SCSI_IO: 669 vtscsi_cam_scsi_io(sc, sim, ccb); 670 break; 671 672 case XPT_SET_TRAN_SETTINGS: 673 ccbh->status = CAM_FUNC_NOTAVAIL; 674 xpt_done(ccb); 675 break; 676 677 case XPT_GET_TRAN_SETTINGS: 678 vtscsi_cam_get_tran_settings(sc, ccb); 679 break; 680 681 case XPT_RESET_BUS: 682 vtscsi_cam_reset_bus(sc, ccb); 683 break; 684 685 case XPT_RESET_DEV: 686 vtscsi_cam_reset_dev(sc, ccb); 687 break; 688 689 case XPT_ABORT: 690 vtscsi_cam_abort(sc, ccb); 691 break; 692 693 case XPT_CALC_GEOMETRY: 694 cam_calc_geometry(&ccb->ccg, 1); 695 xpt_done(ccb); 696 break; 697 698 case XPT_PATH_INQ: 699 vtscsi_cam_path_inquiry(sc, sim, ccb); 700 break; 701 702 default: 703 vtscsi_dprintf(sc, VTSCSI_ERROR, 704 "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code); 705 706 ccbh->status = CAM_REQ_INVALID; 707 xpt_done(ccb); 708 break; 709 } 710 } 711 712 static void 713 vtscsi_cam_poll(struct cam_sim *sim) 714 { 715 struct vtscsi_softc *sc; 716 717 sc = cam_sim_softc(sim); 718 719 vtscsi_complete_vqs_locked(sc); 720 } 721 722 static void 723 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim, 724 union ccb *ccb) 725 { 726 struct ccb_hdr *ccbh; 727 struct ccb_scsiio *csio; 728 int error; 729 730 ccbh = &ccb->ccb_h; 731 csio = &ccb->csio; 732 733 if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) { 734 error = EINVAL; 735 ccbh->status = CAM_REQ_INVALID; 736 goto done; 737 } 738 739 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH && 740 (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) { 741 error = EINVAL; 742 ccbh->status = CAM_REQ_INVALID; 743 goto done; 744 } 745 746 error = vtscsi_start_scsi_cmd(sc, ccb); 747 748 done: 749 if (error) { 750 vtscsi_dprintf(sc, VTSCSI_ERROR, 751 "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status); 752 xpt_done(ccb); 753 } 754 } 755 756 static void 757 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb) 758 { 759 struct ccb_trans_settings *cts; 760 struct ccb_trans_settings_scsi *scsi; 761 762 cts = &ccb->cts; 763 scsi = &cts->proto_specific.scsi; 764 765 cts->protocol = PROTO_SCSI; 766 cts->protocol_version = SCSI_REV_SPC3; 767 cts->transport = XPORT_SAS; 768 cts->transport_version = 0; 769 770 scsi->valid = CTS_SCSI_VALID_TQ; 771 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 772 773 ccb->ccb_h.status = CAM_REQ_CMP; 774 xpt_done(ccb); 775 } 776 777 static void 778 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb) 779 { 780 int error; 781 782 error = vtscsi_reset_bus(sc); 783 if (error == 0) 784 ccb->ccb_h.status = CAM_REQ_CMP; 785 else 786 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 787 788 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n", 789 error, ccb, ccb->ccb_h.status); 790 791 xpt_done(ccb); 792 } 793 794 static void 795 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb) 796 { 797 struct ccb_hdr *ccbh; 798 struct vtscsi_request *req; 799 int error; 800 801 ccbh = &ccb->ccb_h; 802 803 req = vtscsi_dequeue_request(sc); 804 if (req == NULL) { 805 error = EAGAIN; 806 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 807 goto fail; 808 } 809 810 req->vsr_ccb = ccb; 811 812 error = vtscsi_execute_reset_dev_cmd(sc, req); 813 if (error == 0) 814 return; 815 816 vtscsi_enqueue_request(sc, req); 817 818 fail: 819 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", 820 error, req, ccb); 821 822 if (error == EAGAIN) 823 ccbh->status = CAM_RESRC_UNAVAIL; 824 else 825 ccbh->status = CAM_REQ_CMP_ERR; 826 827 xpt_done(ccb); 828 } 829 830 static void 831 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb) 832 { 833 struct vtscsi_request *req; 834 struct ccb_hdr *ccbh; 835 int error; 836 837 ccbh = &ccb->ccb_h; 838 839 req = vtscsi_dequeue_request(sc); 840 if (req == NULL) { 841 error = EAGAIN; 842 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 843 goto fail; 844 } 845 846 req->vsr_ccb = ccb; 847 848 error = vtscsi_execute_abort_task_cmd(sc, req); 849 if (error == 0) 850 return; 851 852 vtscsi_enqueue_request(sc, req); 853 854 fail: 855 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", 856 error, req, ccb); 857 858 if (error == EAGAIN) 859 ccbh->status = CAM_RESRC_UNAVAIL; 860 else 861 ccbh->status = CAM_REQ_CMP_ERR; 862 863 xpt_done(ccb); 864 } 865 866 static void 867 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim, 868 union ccb *ccb) 869 { 870 device_t dev; 871 struct ccb_pathinq *cpi; 872 873 dev = sc->vtscsi_dev; 874 cpi = &ccb->cpi; 875 876 vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb); 877 878 cpi->version_num = 1; 879 cpi->hba_inquiry = PI_TAG_ABLE; 880 cpi->target_sprt = 0; 881 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; 882 if (vtscsi_bus_reset_disable != 0) 883 cpi->hba_misc |= PIM_NOBUSRESET; 884 cpi->hba_eng_cnt = 0; 885 886 cpi->max_target = sc->vtscsi_max_target; 887 cpi->max_lun = sc->vtscsi_max_lun; 888 cpi->initiator_id = VTSCSI_INITIATOR_ID; 889 890 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 891 strncpy(cpi->hba_vid, "VirtIO", HBA_IDLEN); 892 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 893 894 cpi->unit_number = cam_sim_unit(sim); 895 cpi->bus_id = cam_sim_bus(sim); 896 897 cpi->base_transfer_speed = 300000; 898 899 cpi->protocol = PROTO_SCSI; 900 cpi->protocol_version = SCSI_REV_SPC3; 901 cpi->transport = XPORT_SAS; 902 cpi->transport_version = 0; 903 904 cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) * 905 PAGE_SIZE; 906 907 cpi->hba_vendor = virtio_get_vendor(dev); 908 cpi->hba_device = virtio_get_device(dev); 909 cpi->hba_subvendor = virtio_get_subvendor(dev); 910 cpi->hba_subdevice = virtio_get_subdevice(dev); 911 912 ccb->ccb_h.status = CAM_REQ_CMP; 913 xpt_done(ccb); 914 } 915 916 static int 917 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg, 918 struct ccb_scsiio *csio) 919 { 920 struct ccb_hdr *ccbh; 921 struct bus_dma_segment *dseg; 922 int i, error; 923 924 ccbh = &csio->ccb_h; 925 error = 0; 926 927 switch ((ccbh->flags & CAM_DATA_MASK)) { 928 case CAM_DATA_VADDR: 929 error = sglist_append(sg, csio->data_ptr, csio->dxfer_len); 930 break; 931 case CAM_DATA_PADDR: 932 error = sglist_append_phys(sg, 933 (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len); 934 break; 935 case CAM_DATA_SG: 936 for (i = 0; i < csio->sglist_cnt && error == 0; i++) { 937 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; 938 error = sglist_append(sg, 939 (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len); 940 } 941 break; 942 case CAM_DATA_SG_PADDR: 943 for (i = 0; i < csio->sglist_cnt && error == 0; i++) { 944 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; 945 error = sglist_append_phys(sg, 946 (vm_paddr_t) dseg->ds_addr, dseg->ds_len); 947 } 948 break; 949 case CAM_DATA_BIO: 950 error = sglist_append_bio(sg, (struct bio *) csio->data_ptr); 951 break; 952 default: 953 error = EINVAL; 954 break; 955 } 956 957 return (error); 958 } 959 960 static int 961 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req, 962 int *readable, int *writable) 963 { 964 struct sglist *sg; 965 struct ccb_hdr *ccbh; 966 struct ccb_scsiio *csio; 967 struct virtio_scsi_cmd_req *cmd_req; 968 struct virtio_scsi_cmd_resp *cmd_resp; 969 int error; 970 971 sg = sc->vtscsi_sglist; 972 csio = &req->vsr_ccb->csio; 973 ccbh = &csio->ccb_h; 974 cmd_req = &req->vsr_cmd_req; 975 cmd_resp = &req->vsr_cmd_resp; 976 977 sglist_reset(sg); 978 979 sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req)); 980 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 981 error = vtscsi_sg_append_scsi_buf(sc, sg, csio); 982 /* At least one segment must be left for the response. */ 983 if (error || sg->sg_nseg == sg->sg_maxseg) 984 goto fail; 985 } 986 987 *readable = sg->sg_nseg; 988 989 sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp)); 990 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) { 991 error = vtscsi_sg_append_scsi_buf(sc, sg, csio); 992 if (error) 993 goto fail; 994 } 995 996 *writable = sg->sg_nseg - *readable; 997 998 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d " 999 "writable=%d\n", req, ccbh, *readable, *writable); 1000 1001 return (0); 1002 1003 fail: 1004 /* 1005 * This should never happen unless maxio was incorrectly set. 1006 */ 1007 vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0); 1008 1009 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p " 1010 "nseg=%d maxseg=%d\n", 1011 error, req, ccbh, sg->sg_nseg, sg->sg_maxseg); 1012 1013 return (EFBIG); 1014 } 1015 1016 static int 1017 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) 1018 { 1019 struct sglist *sg; 1020 struct virtqueue *vq; 1021 struct ccb_scsiio *csio; 1022 struct ccb_hdr *ccbh; 1023 struct virtio_scsi_cmd_req *cmd_req; 1024 struct virtio_scsi_cmd_resp *cmd_resp; 1025 int readable, writable, error; 1026 1027 sg = sc->vtscsi_sglist; 1028 vq = sc->vtscsi_request_vq; 1029 csio = &req->vsr_ccb->csio; 1030 ccbh = &csio->ccb_h; 1031 cmd_req = &req->vsr_cmd_req; 1032 cmd_resp = &req->vsr_cmd_resp; 1033 1034 vtscsi_init_scsi_cmd_req(csio, cmd_req); 1035 1036 error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable); 1037 if (error) 1038 return (error); 1039 1040 req->vsr_complete = vtscsi_complete_scsi_cmd; 1041 cmd_resp->response = -1; 1042 1043 error = virtqueue_enqueue(vq, req, sg, readable, writable); 1044 if (error) { 1045 vtscsi_dprintf(sc, VTSCSI_ERROR, 1046 "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh); 1047 1048 ccbh->status = CAM_REQUEUE_REQ; 1049 vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ); 1050 return (error); 1051 } 1052 1053 ccbh->status |= CAM_SIM_QUEUED; 1054 ccbh->ccbh_vtscsi_req = req; 1055 1056 virtqueue_notify(vq); 1057 1058 if (ccbh->timeout != CAM_TIME_INFINITY) { 1059 req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET; 1060 callout_reset(&req->vsr_callout, ccbh->timeout * hz / 1000, 1061 vtscsi_timedout_scsi_cmd, req); 1062 } 1063 1064 vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n", 1065 req, ccbh); 1066 1067 return (0); 1068 } 1069 1070 static int 1071 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb) 1072 { 1073 struct vtscsi_request *req; 1074 int error; 1075 1076 req = vtscsi_dequeue_request(sc); 1077 if (req == NULL) { 1078 ccb->ccb_h.status = CAM_REQUEUE_REQ; 1079 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 1080 return (ENOBUFS); 1081 } 1082 1083 req->vsr_ccb = ccb; 1084 1085 error = vtscsi_execute_scsi_cmd(sc, req); 1086 if (error) 1087 vtscsi_enqueue_request(sc, req); 1088 1089 return (error); 1090 } 1091 1092 static void 1093 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, 1094 struct vtscsi_request *req) 1095 { 1096 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1097 struct vtscsi_request *to_req; 1098 uint8_t response; 1099 1100 tmf_resp = &req->vsr_tmf_resp; 1101 response = tmf_resp->response; 1102 to_req = req->vsr_timedout_req; 1103 1104 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n", 1105 req, to_req, response); 1106 1107 vtscsi_enqueue_request(sc, req); 1108 1109 /* 1110 * The timedout request could have completed between when the 1111 * abort task was sent and when the host processed it. 1112 */ 1113 if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT) 1114 return; 1115 1116 /* The timedout request was successfully aborted. */ 1117 if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) 1118 return; 1119 1120 /* Don't bother if the device is going away. */ 1121 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) 1122 return; 1123 1124 /* The timedout request will be aborted by the reset. */ 1125 if (sc->vtscsi_flags & VTSCSI_FLAG_RESET) 1126 return; 1127 1128 vtscsi_reset_bus(sc); 1129 } 1130 1131 static int 1132 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, 1133 struct vtscsi_request *to_req) 1134 { 1135 struct sglist *sg; 1136 struct ccb_hdr *to_ccbh; 1137 struct vtscsi_request *req; 1138 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1139 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1140 int error; 1141 1142 sg = sc->vtscsi_sglist; 1143 to_ccbh = &to_req->vsr_ccb->ccb_h; 1144 1145 req = vtscsi_dequeue_request(sc); 1146 if (req == NULL) { 1147 error = ENOBUFS; 1148 goto fail; 1149 } 1150 1151 tmf_req = &req->vsr_tmf_req; 1152 tmf_resp = &req->vsr_tmf_resp; 1153 1154 vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, 1155 (uintptr_t) to_ccbh, tmf_req); 1156 1157 sglist_reset(sg); 1158 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1159 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1160 1161 req->vsr_timedout_req = to_req; 1162 req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd; 1163 tmf_resp->response = -1; 1164 1165 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1166 VTSCSI_EXECUTE_ASYNC); 1167 if (error == 0) 1168 return (0); 1169 1170 vtscsi_enqueue_request(sc, req); 1171 1172 fail: 1173 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p " 1174 "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh); 1175 1176 return (error); 1177 } 1178 1179 static void 1180 vtscsi_timedout_scsi_cmd(void *xreq) 1181 { 1182 struct vtscsi_softc *sc; 1183 struct vtscsi_request *to_req; 1184 1185 to_req = xreq; 1186 sc = to_req->vsr_softc; 1187 1188 vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n", 1189 to_req, to_req->vsr_ccb, to_req->vsr_state); 1190 1191 /* Don't bother if the device is going away. */ 1192 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) 1193 return; 1194 1195 /* 1196 * Bail if the request is not in use. We likely raced when 1197 * stopping the callout handler or it has already been aborted. 1198 */ 1199 if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE || 1200 (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0) 1201 return; 1202 1203 /* 1204 * Complete the request queue in case the timedout request is 1205 * actually just pending. 1206 */ 1207 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 1208 if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE) 1209 return; 1210 1211 sc->vtscsi_stats.scsi_cmd_timeouts++; 1212 to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT; 1213 1214 if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0) 1215 return; 1216 1217 vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n"); 1218 vtscsi_reset_bus(sc); 1219 } 1220 1221 static cam_status 1222 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp) 1223 { 1224 cam_status status; 1225 1226 switch (cmd_resp->response) { 1227 case VIRTIO_SCSI_S_OK: 1228 status = CAM_REQ_CMP; 1229 break; 1230 case VIRTIO_SCSI_S_OVERRUN: 1231 status = CAM_DATA_RUN_ERR; 1232 break; 1233 case VIRTIO_SCSI_S_ABORTED: 1234 status = CAM_REQ_ABORTED; 1235 break; 1236 case VIRTIO_SCSI_S_BAD_TARGET: 1237 status = CAM_SEL_TIMEOUT; 1238 break; 1239 case VIRTIO_SCSI_S_RESET: 1240 status = CAM_SCSI_BUS_RESET; 1241 break; 1242 case VIRTIO_SCSI_S_BUSY: 1243 status = CAM_SCSI_BUSY; 1244 break; 1245 case VIRTIO_SCSI_S_TRANSPORT_FAILURE: 1246 case VIRTIO_SCSI_S_TARGET_FAILURE: 1247 case VIRTIO_SCSI_S_NEXUS_FAILURE: 1248 status = CAM_SCSI_IT_NEXUS_LOST; 1249 break; 1250 default: /* VIRTIO_SCSI_S_FAILURE */ 1251 status = CAM_REQ_CMP_ERR; 1252 break; 1253 } 1254 1255 return (status); 1256 } 1257 1258 static cam_status 1259 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc, 1260 struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp) 1261 { 1262 cam_status status; 1263 1264 csio->scsi_status = cmd_resp->status; 1265 csio->resid = cmd_resp->resid; 1266 1267 if (csio->scsi_status == SCSI_STATUS_OK) 1268 status = CAM_REQ_CMP; 1269 else 1270 status = CAM_SCSI_STATUS_ERROR; 1271 1272 if (cmd_resp->sense_len > 0) { 1273 status |= CAM_AUTOSNS_VALID; 1274 1275 if (cmd_resp->sense_len < csio->sense_len) 1276 csio->sense_resid = csio->sense_len - 1277 cmd_resp->sense_len; 1278 else 1279 csio->sense_resid = 0; 1280 1281 bzero(&csio->sense_data, sizeof(csio->sense_data)); 1282 memcpy(cmd_resp->sense, &csio->sense_data, 1283 csio->sense_len - csio->sense_resid); 1284 } 1285 1286 vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR, 1287 "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n", 1288 csio, csio->scsi_status, csio->resid, csio->sense_resid); 1289 1290 return (status); 1291 } 1292 1293 static void 1294 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) 1295 { 1296 struct ccb_hdr *ccbh; 1297 struct ccb_scsiio *csio; 1298 struct virtio_scsi_cmd_resp *cmd_resp; 1299 cam_status status; 1300 1301 csio = &req->vsr_ccb->csio; 1302 ccbh = &csio->ccb_h; 1303 cmd_resp = &req->vsr_cmd_resp; 1304 1305 KASSERT(ccbh->ccbh_vtscsi_req == req, 1306 ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req)); 1307 1308 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) 1309 callout_stop(&req->vsr_callout); 1310 1311 status = vtscsi_scsi_cmd_cam_status(cmd_resp); 1312 if (status == CAM_REQ_ABORTED) { 1313 if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT) 1314 status = CAM_CMD_TIMEOUT; 1315 } else if (status == CAM_REQ_CMP) 1316 status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp); 1317 1318 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1319 status |= CAM_DEV_QFRZN; 1320 xpt_freeze_devq(ccbh->path, 1); 1321 } 1322 1323 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) 1324 status |= CAM_RELEASE_SIMQ; 1325 1326 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n", 1327 req, ccbh, status); 1328 1329 ccbh->status = status; 1330 xpt_done(req->vsr_ccb); 1331 vtscsi_enqueue_request(sc, req); 1332 } 1333 1334 static void 1335 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req) 1336 { 1337 1338 /* XXX We probably shouldn't poll forever. */ 1339 req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED; 1340 do 1341 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 1342 while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0); 1343 1344 req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED; 1345 } 1346 1347 static int 1348 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req, 1349 struct sglist *sg, int readable, int writable, int flag) 1350 { 1351 struct virtqueue *vq; 1352 int error; 1353 1354 vq = sc->vtscsi_control_vq; 1355 1356 MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL); 1357 1358 error = virtqueue_enqueue(vq, req, sg, readable, writable); 1359 if (error) { 1360 /* 1361 * Return EAGAIN when the virtqueue does not have enough 1362 * descriptors available. 1363 */ 1364 if (error == ENOSPC || error == EMSGSIZE) 1365 error = EAGAIN; 1366 1367 return (error); 1368 } 1369 1370 virtqueue_notify(vq); 1371 if (flag == VTSCSI_EXECUTE_POLL) 1372 vtscsi_poll_ctrl_req(sc, req); 1373 1374 return (0); 1375 } 1376 1377 static void 1378 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc, 1379 struct vtscsi_request *req) 1380 { 1381 union ccb *ccb; 1382 struct ccb_hdr *ccbh; 1383 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1384 1385 ccb = req->vsr_ccb; 1386 ccbh = &ccb->ccb_h; 1387 tmf_resp = &req->vsr_tmf_resp; 1388 1389 switch (tmf_resp->response) { 1390 case VIRTIO_SCSI_S_FUNCTION_COMPLETE: 1391 ccbh->status = CAM_REQ_CMP; 1392 break; 1393 case VIRTIO_SCSI_S_FUNCTION_REJECTED: 1394 ccbh->status = CAM_UA_ABORT; 1395 break; 1396 default: 1397 ccbh->status = CAM_REQ_CMP_ERR; 1398 break; 1399 } 1400 1401 xpt_done(ccb); 1402 vtscsi_enqueue_request(sc, req); 1403 } 1404 1405 static int 1406 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc, 1407 struct vtscsi_request *req) 1408 { 1409 struct sglist *sg; 1410 struct ccb_abort *cab; 1411 struct ccb_hdr *ccbh; 1412 struct ccb_hdr *abort_ccbh; 1413 struct vtscsi_request *abort_req; 1414 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1415 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1416 int error; 1417 1418 sg = sc->vtscsi_sglist; 1419 cab = &req->vsr_ccb->cab; 1420 ccbh = &cab->ccb_h; 1421 tmf_req = &req->vsr_tmf_req; 1422 tmf_resp = &req->vsr_tmf_resp; 1423 1424 /* CCB header and request that's to be aborted. */ 1425 abort_ccbh = &cab->abort_ccb->ccb_h; 1426 abort_req = abort_ccbh->ccbh_vtscsi_req; 1427 1428 if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) { 1429 error = EINVAL; 1430 goto fail; 1431 } 1432 1433 /* Only attempt to abort requests that could be in-flight. */ 1434 if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) { 1435 error = EALREADY; 1436 goto fail; 1437 } 1438 1439 abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED; 1440 if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) 1441 callout_stop(&abort_req->vsr_callout); 1442 1443 vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, 1444 (uintptr_t) abort_ccbh, tmf_req); 1445 1446 sglist_reset(sg); 1447 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1448 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1449 1450 req->vsr_complete = vtscsi_complete_abort_task_cmd; 1451 tmf_resp->response = -1; 1452 1453 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1454 VTSCSI_EXECUTE_ASYNC); 1455 1456 fail: 1457 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p " 1458 "abort_req=%p\n", error, req, abort_ccbh, abort_req); 1459 1460 return (error); 1461 } 1462 1463 static void 1464 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc, 1465 struct vtscsi_request *req) 1466 { 1467 union ccb *ccb; 1468 struct ccb_hdr *ccbh; 1469 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1470 1471 ccb = req->vsr_ccb; 1472 ccbh = &ccb->ccb_h; 1473 tmf_resp = &req->vsr_tmf_resp; 1474 1475 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n", 1476 req, ccb, tmf_resp->response); 1477 1478 if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) { 1479 ccbh->status = CAM_REQ_CMP; 1480 vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id, 1481 ccbh->target_lun); 1482 } else 1483 ccbh->status = CAM_REQ_CMP_ERR; 1484 1485 xpt_done(ccb); 1486 vtscsi_enqueue_request(sc, req); 1487 } 1488 1489 static int 1490 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc, 1491 struct vtscsi_request *req) 1492 { 1493 struct sglist *sg; 1494 struct ccb_resetdev *crd; 1495 struct ccb_hdr *ccbh; 1496 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1497 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1498 uint32_t subtype; 1499 int error; 1500 1501 sg = sc->vtscsi_sglist; 1502 crd = &req->vsr_ccb->crd; 1503 ccbh = &crd->ccb_h; 1504 tmf_req = &req->vsr_tmf_req; 1505 tmf_resp = &req->vsr_tmf_resp; 1506 1507 if (ccbh->target_lun == CAM_LUN_WILDCARD) 1508 subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET; 1509 else 1510 subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; 1511 1512 vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req); 1513 1514 sglist_reset(sg); 1515 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1516 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1517 1518 req->vsr_complete = vtscsi_complete_reset_dev_cmd; 1519 tmf_resp->response = -1; 1520 1521 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1522 VTSCSI_EXECUTE_ASYNC); 1523 1524 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n", 1525 error, req, ccbh); 1526 1527 return (error); 1528 } 1529 1530 static void 1531 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id) 1532 { 1533 1534 *target_id = lun[1]; 1535 *lun_id = (lun[2] << 8) | lun[3]; 1536 } 1537 1538 static void 1539 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[]) 1540 { 1541 1542 lun[0] = 1; 1543 lun[1] = ccbh->target_id; 1544 lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F); 1545 lun[3] = ccbh->target_lun & 0xFF; 1546 } 1547 1548 static void 1549 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio, 1550 struct virtio_scsi_cmd_req *cmd_req) 1551 { 1552 uint8_t attr; 1553 1554 switch (csio->tag_action) { 1555 case MSG_HEAD_OF_Q_TAG: 1556 attr = VIRTIO_SCSI_S_HEAD; 1557 break; 1558 case MSG_ORDERED_Q_TAG: 1559 attr = VIRTIO_SCSI_S_ORDERED; 1560 break; 1561 case MSG_ACA_TASK: 1562 attr = VIRTIO_SCSI_S_ACA; 1563 break; 1564 default: /* MSG_SIMPLE_Q_TAG */ 1565 attr = VIRTIO_SCSI_S_SIMPLE; 1566 break; 1567 } 1568 1569 vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun); 1570 cmd_req->tag = (uintptr_t) csio; 1571 cmd_req->task_attr = attr; 1572 1573 memcpy(cmd_req->cdb, 1574 csio->ccb_h.flags & CAM_CDB_POINTER ? 1575 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes, 1576 csio->cdb_len); 1577 } 1578 1579 static void 1580 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype, 1581 uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req) 1582 { 1583 1584 vtscsi_set_request_lun(ccbh, tmf_req->lun); 1585 1586 tmf_req->type = VIRTIO_SCSI_T_TMF; 1587 tmf_req->subtype = subtype; 1588 tmf_req->tag = tag; 1589 } 1590 1591 static void 1592 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason) 1593 { 1594 int frozen; 1595 1596 frozen = sc->vtscsi_frozen; 1597 1598 if (reason & VTSCSI_REQUEST && 1599 (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0) 1600 sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS; 1601 1602 if (reason & VTSCSI_REQUEST_VQ && 1603 (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0) 1604 sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL; 1605 1606 /* Freeze the SIMQ if transitioned to frozen. */ 1607 if (frozen == 0 && sc->vtscsi_frozen != 0) { 1608 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n"); 1609 xpt_freeze_simq(sc->vtscsi_sim, 1); 1610 } 1611 } 1612 1613 static int 1614 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason) 1615 { 1616 int thawed; 1617 1618 if (sc->vtscsi_frozen == 0 || reason == 0) 1619 return (0); 1620 1621 if (reason & VTSCSI_REQUEST && 1622 sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) 1623 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS; 1624 1625 if (reason & VTSCSI_REQUEST_VQ && 1626 sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) 1627 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL; 1628 1629 thawed = sc->vtscsi_frozen == 0; 1630 if (thawed != 0) 1631 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n"); 1632 1633 return (thawed); 1634 } 1635 1636 static void 1637 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code, 1638 target_id_t target_id, lun_id_t lun_id) 1639 { 1640 struct cam_path *path; 1641 1642 /* Use the wildcard path from our softc for bus announcements. */ 1643 if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) { 1644 xpt_async(ac_code, sc->vtscsi_path, NULL); 1645 return; 1646 } 1647 1648 if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim), 1649 target_id, lun_id) != CAM_REQ_CMP) { 1650 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n"); 1651 return; 1652 } 1653 1654 xpt_async(ac_code, path, NULL); 1655 xpt_free_path(path); 1656 } 1657 1658 static void 1659 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id, 1660 lun_id_t lun_id) 1661 { 1662 union ccb *ccb; 1663 cam_status status; 1664 1665 ccb = xpt_alloc_ccb_nowait(); 1666 if (ccb == NULL) { 1667 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n"); 1668 return; 1669 } 1670 1671 status = xpt_create_path(&ccb->ccb_h.path, NULL, 1672 cam_sim_path(sc->vtscsi_sim), target_id, lun_id); 1673 if (status != CAM_REQ_CMP) { 1674 xpt_free_ccb(ccb); 1675 return; 1676 } 1677 1678 xpt_rescan(ccb); 1679 } 1680 1681 static void 1682 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc) 1683 { 1684 1685 vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1686 } 1687 1688 static void 1689 vtscsi_transport_reset_event(struct vtscsi_softc *sc, 1690 struct virtio_scsi_event *event) 1691 { 1692 target_id_t target_id; 1693 lun_id_t lun_id; 1694 1695 vtscsi_get_request_lun(event->lun, &target_id, &lun_id); 1696 1697 switch (event->reason) { 1698 case VIRTIO_SCSI_EVT_RESET_RESCAN: 1699 case VIRTIO_SCSI_EVT_RESET_REMOVED: 1700 vtscsi_execute_rescan(sc, target_id, lun_id); 1701 break; 1702 default: 1703 device_printf(sc->vtscsi_dev, 1704 "unhandled transport event reason: %d\n", event->reason); 1705 break; 1706 } 1707 } 1708 1709 static void 1710 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event) 1711 { 1712 int error; 1713 1714 if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) { 1715 switch (event->event) { 1716 case VIRTIO_SCSI_T_TRANSPORT_RESET: 1717 vtscsi_transport_reset_event(sc, event); 1718 break; 1719 default: 1720 device_printf(sc->vtscsi_dev, 1721 "unhandled event: %d\n", event->event); 1722 break; 1723 } 1724 } else 1725 vtscsi_execute_rescan_bus(sc); 1726 1727 /* 1728 * This should always be successful since the buffer 1729 * was just dequeued. 1730 */ 1731 error = vtscsi_enqueue_event_buf(sc, event); 1732 KASSERT(error == 0, 1733 ("cannot requeue event buffer: %d", error)); 1734 } 1735 1736 static int 1737 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc, 1738 struct virtio_scsi_event *event) 1739 { 1740 struct sglist *sg; 1741 struct virtqueue *vq; 1742 int size, error; 1743 1744 sg = sc->vtscsi_sglist; 1745 vq = sc->vtscsi_event_vq; 1746 size = sc->vtscsi_event_buf_size; 1747 1748 bzero(event, size); 1749 1750 sglist_reset(sg); 1751 error = sglist_append(sg, event, size); 1752 if (error) 1753 return (error); 1754 1755 error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg); 1756 if (error) 1757 return (error); 1758 1759 virtqueue_notify(vq); 1760 1761 return (0); 1762 } 1763 1764 static int 1765 vtscsi_init_event_vq(struct vtscsi_softc *sc) 1766 { 1767 struct virtio_scsi_event *event; 1768 int i, size, error; 1769 1770 /* 1771 * The first release of QEMU with VirtIO SCSI support would crash 1772 * when attempting to notify the event virtqueue. This was fixed 1773 * when hotplug support was added. 1774 */ 1775 if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) 1776 size = sc->vtscsi_event_buf_size; 1777 else 1778 size = 0; 1779 1780 if (size < sizeof(struct virtio_scsi_event)) 1781 return (0); 1782 1783 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { 1784 event = &sc->vtscsi_event_bufs[i]; 1785 1786 error = vtscsi_enqueue_event_buf(sc, event); 1787 if (error) 1788 break; 1789 } 1790 1791 /* 1792 * Even just one buffer is enough. Missed events are 1793 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag. 1794 */ 1795 if (i > 0) 1796 error = 0; 1797 1798 return (error); 1799 } 1800 1801 static void 1802 vtscsi_reinit_event_vq(struct vtscsi_softc *sc) 1803 { 1804 struct virtio_scsi_event *event; 1805 int i, error; 1806 1807 if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 || 1808 sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event)) 1809 return; 1810 1811 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { 1812 event = &sc->vtscsi_event_bufs[i]; 1813 1814 error = vtscsi_enqueue_event_buf(sc, event); 1815 if (error) 1816 break; 1817 } 1818 1819 KASSERT(i > 0, ("cannot reinit event vq: %d", error)); 1820 } 1821 1822 static void 1823 vtscsi_drain_event_vq(struct vtscsi_softc *sc) 1824 { 1825 struct virtqueue *vq; 1826 int last; 1827 1828 vq = sc->vtscsi_event_vq; 1829 last = 0; 1830 1831 while (virtqueue_drain(vq, &last) != NULL) 1832 ; 1833 1834 KASSERT(virtqueue_empty(vq), ("eventvq not empty")); 1835 } 1836 1837 static void 1838 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc) 1839 { 1840 1841 VTSCSI_LOCK_OWNED(sc); 1842 1843 if (sc->vtscsi_request_vq != NULL) 1844 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 1845 if (sc->vtscsi_control_vq != NULL) 1846 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 1847 } 1848 1849 static void 1850 vtscsi_complete_vqs(struct vtscsi_softc *sc) 1851 { 1852 1853 VTSCSI_LOCK(sc); 1854 vtscsi_complete_vqs_locked(sc); 1855 VTSCSI_UNLOCK(sc); 1856 } 1857 1858 static void 1859 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 1860 { 1861 union ccb *ccb; 1862 int detach; 1863 1864 ccb = req->vsr_ccb; 1865 1866 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb); 1867 1868 /* 1869 * The callout must be drained when detaching since the request is 1870 * about to be freed. The VTSCSI_MTX must not be held for this in 1871 * case the callout is pending because there is a deadlock potential. 1872 * Otherwise, the virtqueue is being drained because of a bus reset 1873 * so we only need to attempt to stop the callouts. 1874 */ 1875 detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0; 1876 if (detach != 0) 1877 VTSCSI_LOCK_NOTOWNED(sc); 1878 else 1879 VTSCSI_LOCK_OWNED(sc); 1880 1881 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) { 1882 if (detach != 0) 1883 callout_drain(&req->vsr_callout); 1884 else 1885 callout_stop(&req->vsr_callout); 1886 } 1887 1888 if (ccb != NULL) { 1889 if (detach != 0) { 1890 VTSCSI_LOCK(sc); 1891 ccb->ccb_h.status = CAM_NO_HBA; 1892 } else 1893 ccb->ccb_h.status = CAM_REQUEUE_REQ; 1894 xpt_done(ccb); 1895 if (detach != 0) 1896 VTSCSI_UNLOCK(sc); 1897 } 1898 1899 vtscsi_enqueue_request(sc, req); 1900 } 1901 1902 static void 1903 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq) 1904 { 1905 struct vtscsi_request *req; 1906 int last; 1907 1908 last = 0; 1909 1910 vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq); 1911 1912 while ((req = virtqueue_drain(vq, &last)) != NULL) 1913 vtscsi_cancel_request(sc, req); 1914 1915 KASSERT(virtqueue_empty(vq), ("virtqueue not empty")); 1916 } 1917 1918 static void 1919 vtscsi_drain_vqs(struct vtscsi_softc *sc) 1920 { 1921 1922 if (sc->vtscsi_control_vq != NULL) 1923 vtscsi_drain_vq(sc, sc->vtscsi_control_vq); 1924 if (sc->vtscsi_request_vq != NULL) 1925 vtscsi_drain_vq(sc, sc->vtscsi_request_vq); 1926 if (sc->vtscsi_event_vq != NULL) 1927 vtscsi_drain_event_vq(sc); 1928 } 1929 1930 static void 1931 vtscsi_stop(struct vtscsi_softc *sc) 1932 { 1933 1934 vtscsi_disable_vqs_intr(sc); 1935 virtio_stop(sc->vtscsi_dev); 1936 } 1937 1938 static int 1939 vtscsi_reset_bus(struct vtscsi_softc *sc) 1940 { 1941 int error; 1942 1943 VTSCSI_LOCK_OWNED(sc); 1944 1945 if (vtscsi_bus_reset_disable != 0) { 1946 device_printf(sc->vtscsi_dev, "bus reset disabled\n"); 1947 return (0); 1948 } 1949 1950 sc->vtscsi_flags |= VTSCSI_FLAG_RESET; 1951 1952 /* 1953 * vtscsi_stop() will cause the in-flight requests to be canceled. 1954 * Those requests are then completed here so CAM will retry them 1955 * after the reset is complete. 1956 */ 1957 vtscsi_stop(sc); 1958 vtscsi_complete_vqs_locked(sc); 1959 1960 /* Rid the virtqueues of any remaining requests. */ 1961 vtscsi_drain_vqs(sc); 1962 1963 /* 1964 * Any resource shortage that froze the SIMQ cannot persist across 1965 * a bus reset so ensure it gets thawed here. 1966 */ 1967 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) 1968 xpt_release_simq(sc->vtscsi_sim, 0); 1969 1970 error = vtscsi_reinit(sc); 1971 if (error) { 1972 device_printf(sc->vtscsi_dev, 1973 "reinitialization failed, stopping device...\n"); 1974 vtscsi_stop(sc); 1975 } else 1976 vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 1977 CAM_LUN_WILDCARD); 1978 1979 sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET; 1980 1981 return (error); 1982 } 1983 1984 static void 1985 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 1986 { 1987 1988 #ifdef INVARIANTS 1989 int req_nsegs, resp_nsegs; 1990 1991 req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq)); 1992 resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp)); 1993 1994 KASSERT(req_nsegs == 1, ("request crossed page boundary")); 1995 KASSERT(resp_nsegs == 1, ("response crossed page boundary")); 1996 #endif 1997 1998 req->vsr_softc = sc; 1999 callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0); 2000 } 2001 2002 static int 2003 vtscsi_alloc_requests(struct vtscsi_softc *sc) 2004 { 2005 struct vtscsi_request *req; 2006 int i, nreqs; 2007 2008 /* 2009 * Commands destined for either the request or control queues come 2010 * from the same SIM queue. Use the size of the request virtqueue 2011 * as it (should) be much more frequently used. Some additional 2012 * requests are allocated for internal (TMF) use. 2013 */ 2014 nreqs = virtqueue_size(sc->vtscsi_request_vq); 2015 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) 2016 nreqs /= VTSCSI_MIN_SEGMENTS; 2017 nreqs += VTSCSI_RESERVED_REQUESTS; 2018 2019 for (i = 0; i < nreqs; i++) { 2020 req = malloc(sizeof(struct vtscsi_request), M_DEVBUF, 2021 M_NOWAIT); 2022 if (req == NULL) 2023 return (ENOMEM); 2024 2025 vtscsi_init_request(sc, req); 2026 2027 sc->vtscsi_nrequests++; 2028 vtscsi_enqueue_request(sc, req); 2029 } 2030 2031 return (0); 2032 } 2033 2034 static void 2035 vtscsi_free_requests(struct vtscsi_softc *sc) 2036 { 2037 struct vtscsi_request *req; 2038 2039 while ((req = vtscsi_dequeue_request(sc)) != NULL) { 2040 KASSERT(callout_active(&req->vsr_callout) == 0, 2041 ("request callout still active")); 2042 2043 sc->vtscsi_nrequests--; 2044 free(req, M_DEVBUF); 2045 } 2046 2047 KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d", 2048 sc->vtscsi_nrequests)); 2049 } 2050 2051 static void 2052 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 2053 { 2054 2055 KASSERT(req->vsr_softc == sc, 2056 ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc)); 2057 2058 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); 2059 2060 /* A request is available so the SIMQ could be released. */ 2061 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0) 2062 xpt_release_simq(sc->vtscsi_sim, 1); 2063 2064 req->vsr_ccb = NULL; 2065 req->vsr_complete = NULL; 2066 req->vsr_ptr0 = NULL; 2067 req->vsr_state = VTSCSI_REQ_STATE_FREE; 2068 req->vsr_flags = 0; 2069 2070 bzero(&req->vsr_ureq, sizeof(req->vsr_ureq)); 2071 bzero(&req->vsr_uresp, sizeof(req->vsr_uresp)); 2072 2073 /* 2074 * We insert at the tail of the queue in order to make it 2075 * very unlikely a request will be reused if we race with 2076 * stopping its callout handler. 2077 */ 2078 TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link); 2079 } 2080 2081 static struct vtscsi_request * 2082 vtscsi_dequeue_request(struct vtscsi_softc *sc) 2083 { 2084 struct vtscsi_request *req; 2085 2086 req = TAILQ_FIRST(&sc->vtscsi_req_free); 2087 if (req != NULL) { 2088 req->vsr_state = VTSCSI_REQ_STATE_INUSE; 2089 TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link); 2090 } else 2091 sc->vtscsi_stats.dequeue_no_requests++; 2092 2093 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); 2094 2095 return (req); 2096 } 2097 2098 static void 2099 vtscsi_complete_request(struct vtscsi_request *req) 2100 { 2101 2102 if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED) 2103 req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE; 2104 2105 if (req->vsr_complete != NULL) 2106 req->vsr_complete(req->vsr_softc, req); 2107 } 2108 2109 static void 2110 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq) 2111 { 2112 struct vtscsi_request *req; 2113 2114 VTSCSI_LOCK_OWNED(sc); 2115 2116 while ((req = virtqueue_dequeue(vq, NULL)) != NULL) 2117 vtscsi_complete_request(req); 2118 } 2119 2120 static void 2121 vtscsi_control_vq_intr(void *xsc) 2122 { 2123 struct vtscsi_softc *sc; 2124 struct virtqueue *vq; 2125 2126 sc = xsc; 2127 vq = sc->vtscsi_control_vq; 2128 2129 again: 2130 VTSCSI_LOCK(sc); 2131 2132 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 2133 2134 if (virtqueue_enable_intr(vq) != 0) { 2135 virtqueue_disable_intr(vq); 2136 VTSCSI_UNLOCK(sc); 2137 goto again; 2138 } 2139 2140 VTSCSI_UNLOCK(sc); 2141 } 2142 2143 static void 2144 vtscsi_event_vq_intr(void *xsc) 2145 { 2146 struct vtscsi_softc *sc; 2147 struct virtqueue *vq; 2148 struct virtio_scsi_event *event; 2149 2150 sc = xsc; 2151 vq = sc->vtscsi_event_vq; 2152 2153 again: 2154 VTSCSI_LOCK(sc); 2155 2156 while ((event = virtqueue_dequeue(vq, NULL)) != NULL) 2157 vtscsi_handle_event(sc, event); 2158 2159 if (virtqueue_enable_intr(vq) != 0) { 2160 virtqueue_disable_intr(vq); 2161 VTSCSI_UNLOCK(sc); 2162 goto again; 2163 } 2164 2165 VTSCSI_UNLOCK(sc); 2166 } 2167 2168 static void 2169 vtscsi_request_vq_intr(void *xsc) 2170 { 2171 struct vtscsi_softc *sc; 2172 struct virtqueue *vq; 2173 2174 sc = xsc; 2175 vq = sc->vtscsi_request_vq; 2176 2177 again: 2178 VTSCSI_LOCK(sc); 2179 2180 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 2181 2182 if (virtqueue_enable_intr(vq) != 0) { 2183 virtqueue_disable_intr(vq); 2184 VTSCSI_UNLOCK(sc); 2185 goto again; 2186 } 2187 2188 VTSCSI_UNLOCK(sc); 2189 } 2190 2191 static void 2192 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc) 2193 { 2194 2195 virtqueue_disable_intr(sc->vtscsi_control_vq); 2196 virtqueue_disable_intr(sc->vtscsi_event_vq); 2197 virtqueue_disable_intr(sc->vtscsi_request_vq); 2198 } 2199 2200 static void 2201 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc) 2202 { 2203 2204 virtqueue_enable_intr(sc->vtscsi_control_vq); 2205 virtqueue_enable_intr(sc->vtscsi_event_vq); 2206 virtqueue_enable_intr(sc->vtscsi_request_vq); 2207 } 2208 2209 static void 2210 vtscsi_get_tunables(struct vtscsi_softc *sc) 2211 { 2212 char tmpstr[64]; 2213 2214 TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug); 2215 2216 snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level", 2217 device_get_unit(sc->vtscsi_dev)); 2218 TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug); 2219 } 2220 2221 static void 2222 vtscsi_add_sysctl(struct vtscsi_softc *sc) 2223 { 2224 device_t dev; 2225 struct vtscsi_statistics *stats; 2226 struct sysctl_ctx_list *ctx; 2227 struct sysctl_oid *tree; 2228 struct sysctl_oid_list *child; 2229 2230 dev = sc->vtscsi_dev; 2231 stats = &sc->vtscsi_stats; 2232 ctx = device_get_sysctl_ctx(dev); 2233 tree = device_get_sysctl_tree(dev); 2234 child = SYSCTL_CHILDREN(tree); 2235 2236 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level", 2237 CTLFLAG_RW, &sc->vtscsi_debug, 0, 2238 "Debug level"); 2239 2240 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts", 2241 CTLFLAG_RD, &stats->scsi_cmd_timeouts, 2242 "SCSI command timeouts"); 2243 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests", 2244 CTLFLAG_RD, &stats->dequeue_no_requests, 2245 "No available requests to dequeue"); 2246 } 2247 2248 static void 2249 vtscsi_printf_req(struct vtscsi_request *req, const char *func, 2250 const char *fmt, ...) 2251 { 2252 struct vtscsi_softc *sc; 2253 union ccb *ccb; 2254 struct sbuf sb; 2255 va_list ap; 2256 char str[192]; 2257 char path_str[64]; 2258 2259 if (req == NULL) 2260 return; 2261 2262 sc = req->vsr_softc; 2263 ccb = req->vsr_ccb; 2264 2265 va_start(ap, fmt); 2266 sbuf_new(&sb, str, sizeof(str), 0); 2267 2268 if (ccb == NULL) { 2269 sbuf_printf(&sb, "(noperiph:%s%d:%u): ", 2270 cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim), 2271 cam_sim_bus(sc->vtscsi_sim)); 2272 } else { 2273 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str)); 2274 sbuf_cat(&sb, path_str); 2275 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 2276 scsi_command_string(&ccb->csio, &sb); 2277 sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len); 2278 } 2279 } 2280 2281 sbuf_vprintf(&sb, fmt, ap); 2282 va_end(ap); 2283 2284 sbuf_finish(&sb); 2285 printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func, 2286 sbuf_data(&sb)); 2287 } 2288