1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* Driver for VirtIO SCSI devices. */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/kthread.h> 35 #include <sys/malloc.h> 36 #include <sys/module.h> 37 #include <sys/sglist.h> 38 #include <sys/sysctl.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/callout.h> 42 #include <sys/queue.h> 43 #include <sys/sbuf.h> 44 #include <sys/stdarg.h> 45 46 #include <machine/bus.h> 47 #include <machine/resource.h> 48 #include <sys/bus.h> 49 #include <sys/rman.h> 50 51 #include <cam/cam.h> 52 #include <cam/cam_ccb.h> 53 #include <cam/cam_sim.h> 54 #include <cam/cam_periph.h> 55 #include <cam/cam_xpt_sim.h> 56 #include <cam/cam_debug.h> 57 #include <cam/scsi/scsi_all.h> 58 #include <cam/scsi/scsi_message.h> 59 60 #include <dev/virtio/virtio.h> 61 #include <dev/virtio/virtqueue.h> 62 #include <dev/virtio/scsi/virtio_scsi.h> 63 #include <dev/virtio/scsi/virtio_scsivar.h> 64 65 #include "virtio_if.h" 66 67 static int vtscsi_modevent(module_t, int, void *); 68 69 static int vtscsi_probe(device_t); 70 static int vtscsi_attach(device_t); 71 static int vtscsi_detach(device_t); 72 static int vtscsi_suspend(device_t); 73 static int vtscsi_resume(device_t); 74 75 static int vtscsi_negotiate_features(struct vtscsi_softc *); 76 static int vtscsi_setup_features(struct vtscsi_softc *); 77 static void vtscsi_read_config(struct vtscsi_softc *, 78 struct virtio_scsi_config *); 79 static int vtscsi_maximum_segments(struct vtscsi_softc *, int); 80 static int vtscsi_alloc_virtqueues(struct vtscsi_softc *); 81 static void vtscsi_check_sizes(struct vtscsi_softc *); 82 static void vtscsi_write_device_config(struct vtscsi_softc *); 83 static int vtscsi_reinit(struct vtscsi_softc *); 84 85 static int vtscsi_alloc_cam(struct vtscsi_softc *); 86 static int vtscsi_register_cam(struct vtscsi_softc *); 87 static void vtscsi_free_cam(struct vtscsi_softc *); 88 static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *); 89 static int vtscsi_register_async(struct vtscsi_softc *); 90 static void vtscsi_deregister_async(struct vtscsi_softc *); 91 static void vtscsi_cam_action(struct cam_sim *, union ccb *); 92 static void vtscsi_cam_poll(struct cam_sim *); 93 94 static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *, 95 union ccb *); 96 static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *, 97 union ccb *); 98 static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *); 99 static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *); 100 static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *); 101 static void vtscsi_cam_path_inquiry(struct vtscsi_softc *, 102 struct cam_sim *, union ccb *); 103 104 static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *, 105 struct sglist *, struct ccb_scsiio *); 106 static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *, 107 struct vtscsi_request *, int *, int *); 108 static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *, 109 struct vtscsi_request *); 110 static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *); 111 static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *, 112 struct vtscsi_request *); 113 static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *, 114 struct vtscsi_request *); 115 static void vtscsi_timedout_scsi_cmd(void *); 116 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *); 117 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *, 118 struct ccb_scsiio *, struct virtio_scsi_cmd_resp *); 119 static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *, 120 struct vtscsi_request *); 121 122 static void vtscsi_poll_ctrl_req(struct vtscsi_softc *, 123 struct vtscsi_request *); 124 static int vtscsi_execute_ctrl_req(struct vtscsi_softc *, 125 struct vtscsi_request *, struct sglist *, int, int, int); 126 static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c, 127 struct vtscsi_request *); 128 static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *, 129 struct vtscsi_request *); 130 static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *, 131 struct vtscsi_request *); 132 133 static void vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *); 134 static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []); 135 static void vtscsi_init_scsi_cmd_req(struct vtscsi_softc *, 136 struct ccb_scsiio *, struct virtio_scsi_cmd_req *); 137 static void vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *, struct ccb_hdr *, 138 uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *); 139 140 static void vtscsi_freeze_simq(struct vtscsi_softc *, int); 141 static int vtscsi_thaw_simq(struct vtscsi_softc *, int); 142 143 static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t, 144 lun_id_t); 145 static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t, 146 lun_id_t); 147 static void vtscsi_execute_rescan_bus(struct vtscsi_softc *); 148 149 static void vtscsi_handle_event(struct vtscsi_softc *, 150 struct virtio_scsi_event *); 151 static int vtscsi_enqueue_event_buf(struct vtscsi_softc *, 152 struct virtio_scsi_event *); 153 static int vtscsi_init_event_vq(struct vtscsi_softc *); 154 static void vtscsi_reinit_event_vq(struct vtscsi_softc *); 155 static void vtscsi_drain_event_vq(struct vtscsi_softc *); 156 157 static void vtscsi_complete_vqs_locked(struct vtscsi_softc *); 158 static void vtscsi_complete_vqs(struct vtscsi_softc *); 159 static void vtscsi_drain_vqs(struct vtscsi_softc *); 160 static void vtscsi_cancel_request(struct vtscsi_softc *, 161 struct vtscsi_request *); 162 static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *); 163 static void vtscsi_stop(struct vtscsi_softc *); 164 static int vtscsi_reset_bus(struct vtscsi_softc *); 165 166 static void vtscsi_init_request(struct vtscsi_softc *, 167 struct vtscsi_request *); 168 static int vtscsi_alloc_requests(struct vtscsi_softc *); 169 static void vtscsi_free_requests(struct vtscsi_softc *); 170 static void vtscsi_enqueue_request(struct vtscsi_softc *, 171 struct vtscsi_request *); 172 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *); 173 174 static void vtscsi_complete_request(struct vtscsi_request *); 175 static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *); 176 177 static void vtscsi_control_vq_intr(void *); 178 static void vtscsi_event_vq_intr(void *); 179 static void vtscsi_request_vq_intr(void *); 180 static void vtscsi_disable_vqs_intr(struct vtscsi_softc *); 181 static void vtscsi_enable_vqs_intr(struct vtscsi_softc *); 182 183 static void vtscsi_get_tunables(struct vtscsi_softc *); 184 static void vtscsi_setup_sysctl(struct vtscsi_softc *); 185 186 static void vtscsi_printf_req(struct vtscsi_request *, const char *, 187 const char *, ...); 188 189 #define vtscsi_modern(_sc) (((_sc)->vtscsi_features & VIRTIO_F_VERSION_1) != 0) 190 #define vtscsi_htog16(_sc, _val) virtio_htog16(vtscsi_modern(_sc), _val) 191 #define vtscsi_htog32(_sc, _val) virtio_htog32(vtscsi_modern(_sc), _val) 192 #define vtscsi_htog64(_sc, _val) virtio_htog64(vtscsi_modern(_sc), _val) 193 #define vtscsi_gtoh16(_sc, _val) virtio_gtoh16(vtscsi_modern(_sc), _val) 194 #define vtscsi_gtoh32(_sc, _val) virtio_gtoh32(vtscsi_modern(_sc), _val) 195 #define vtscsi_gtoh64(_sc, _val) virtio_gtoh64(vtscsi_modern(_sc), _val) 196 197 /* Global tunables. */ 198 /* 199 * The current QEMU VirtIO SCSI implementation does not cancel in-flight 200 * IO during virtio_stop(). So in-flight requests still complete after the 201 * device reset. We would have to wait for all the in-flight IO to complete, 202 * which defeats the typical purpose of a bus reset. We could simulate the 203 * bus reset with either I_T_NEXUS_RESET of all the targets, or with 204 * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the 205 * control virtqueue). But this isn't very useful if things really go off 206 * the rails, so default to disabled for now. 207 */ 208 static int vtscsi_bus_reset_disable = 1; 209 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable); 210 211 static struct virtio_feature_desc vtscsi_feature_desc[] = { 212 { VIRTIO_SCSI_F_INOUT, "InOut" }, 213 { VIRTIO_SCSI_F_HOTPLUG, "Hotplug" }, 214 { VIRTIO_SCSI_F_CHANGE, "ChangeEvent" }, 215 { VIRTIO_SCSI_F_T10_PI, "T10PI" }, 216 217 { 0, NULL } 218 }; 219 220 static device_method_t vtscsi_methods[] = { 221 /* Device methods. */ 222 DEVMETHOD(device_probe, vtscsi_probe), 223 DEVMETHOD(device_attach, vtscsi_attach), 224 DEVMETHOD(device_detach, vtscsi_detach), 225 DEVMETHOD(device_suspend, vtscsi_suspend), 226 DEVMETHOD(device_resume, vtscsi_resume), 227 228 DEVMETHOD_END 229 }; 230 231 static driver_t vtscsi_driver = { 232 "vtscsi", 233 vtscsi_methods, 234 sizeof(struct vtscsi_softc) 235 }; 236 237 VIRTIO_DRIVER_MODULE(virtio_scsi, vtscsi_driver, vtscsi_modevent, NULL); 238 MODULE_VERSION(virtio_scsi, 1); 239 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1); 240 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1); 241 242 VIRTIO_SIMPLE_PNPINFO(virtio_scsi, VIRTIO_ID_SCSI, "VirtIO SCSI Adapter"); 243 244 static int 245 vtscsi_modevent(module_t mod, int type, void *unused) 246 { 247 int error; 248 249 switch (type) { 250 case MOD_LOAD: 251 case MOD_QUIESCE: 252 case MOD_UNLOAD: 253 case MOD_SHUTDOWN: 254 error = 0; 255 break; 256 default: 257 error = EOPNOTSUPP; 258 break; 259 } 260 261 return (error); 262 } 263 264 static int 265 vtscsi_probe(device_t dev) 266 { 267 return (VIRTIO_SIMPLE_PROBE(dev, virtio_scsi)); 268 } 269 270 static int 271 vtscsi_attach(device_t dev) 272 { 273 struct vtscsi_softc *sc; 274 struct virtio_scsi_config scsicfg; 275 int error; 276 277 sc = device_get_softc(dev); 278 sc->vtscsi_dev = dev; 279 virtio_set_feature_desc(dev, vtscsi_feature_desc); 280 281 VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev)); 282 TAILQ_INIT(&sc->vtscsi_req_free); 283 284 vtscsi_get_tunables(sc); 285 vtscsi_setup_sysctl(sc); 286 287 error = vtscsi_setup_features(sc); 288 if (error) { 289 device_printf(dev, "cannot setup features\n"); 290 goto fail; 291 } 292 293 vtscsi_read_config(sc, &scsicfg); 294 295 sc->vtscsi_max_channel = scsicfg.max_channel; 296 sc->vtscsi_max_target = scsicfg.max_target; 297 sc->vtscsi_max_lun = scsicfg.max_lun; 298 sc->vtscsi_event_buf_size = scsicfg.event_info_size; 299 300 vtscsi_write_device_config(sc); 301 302 sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max); 303 sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT); 304 if (sc->vtscsi_sglist == NULL) { 305 error = ENOMEM; 306 device_printf(dev, "cannot allocate sglist\n"); 307 goto fail; 308 } 309 310 error = vtscsi_alloc_virtqueues(sc); 311 if (error) { 312 device_printf(dev, "cannot allocate virtqueues\n"); 313 goto fail; 314 } 315 316 vtscsi_check_sizes(sc); 317 318 error = vtscsi_init_event_vq(sc); 319 if (error) { 320 device_printf(dev, "cannot populate the eventvq\n"); 321 goto fail; 322 } 323 324 error = vtscsi_alloc_requests(sc); 325 if (error) { 326 device_printf(dev, "cannot allocate requests\n"); 327 goto fail; 328 } 329 330 error = vtscsi_alloc_cam(sc); 331 if (error) { 332 device_printf(dev, "cannot allocate CAM structures\n"); 333 goto fail; 334 } 335 336 error = virtio_setup_intr(dev, INTR_TYPE_CAM); 337 if (error) { 338 device_printf(dev, "cannot setup virtqueue interrupts\n"); 339 goto fail; 340 } 341 342 vtscsi_enable_vqs_intr(sc); 343 344 /* 345 * Register with CAM after interrupts are enabled so we will get 346 * notified of the probe responses. 347 */ 348 error = vtscsi_register_cam(sc); 349 if (error) { 350 device_printf(dev, "cannot register with CAM\n"); 351 goto fail; 352 } 353 354 fail: 355 if (error) 356 vtscsi_detach(dev); 357 358 return (error); 359 } 360 361 static int 362 vtscsi_detach(device_t dev) 363 { 364 struct vtscsi_softc *sc; 365 366 sc = device_get_softc(dev); 367 368 VTSCSI_LOCK(sc); 369 sc->vtscsi_flags |= VTSCSI_FLAG_DETACH; 370 if (device_is_attached(dev)) 371 vtscsi_stop(sc); 372 VTSCSI_UNLOCK(sc); 373 374 vtscsi_complete_vqs(sc); 375 vtscsi_drain_vqs(sc); 376 377 vtscsi_free_cam(sc); 378 vtscsi_free_requests(sc); 379 380 if (sc->vtscsi_sglist != NULL) { 381 sglist_free(sc->vtscsi_sglist); 382 sc->vtscsi_sglist = NULL; 383 } 384 385 VTSCSI_LOCK_DESTROY(sc); 386 387 return (0); 388 } 389 390 static int 391 vtscsi_suspend(device_t dev) 392 { 393 394 return (0); 395 } 396 397 static int 398 vtscsi_resume(device_t dev) 399 { 400 401 return (0); 402 } 403 404 static int 405 vtscsi_negotiate_features(struct vtscsi_softc *sc) 406 { 407 device_t dev; 408 uint64_t features; 409 410 dev = sc->vtscsi_dev; 411 features = VTSCSI_FEATURES; 412 413 sc->vtscsi_features = virtio_negotiate_features(dev, features); 414 return (virtio_finalize_features(dev)); 415 } 416 417 static int 418 vtscsi_setup_features(struct vtscsi_softc *sc) 419 { 420 device_t dev; 421 int error; 422 423 dev = sc->vtscsi_dev; 424 425 error = vtscsi_negotiate_features(sc); 426 if (error) 427 return (error); 428 429 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) 430 sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT; 431 if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT)) 432 sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL; 433 if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG)) 434 sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG; 435 436 return (0); 437 } 438 439 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg) \ 440 virtio_read_device_config(_dev, \ 441 offsetof(struct virtio_scsi_config, _field), \ 442 &(_cfg)->_field, sizeof((_cfg)->_field)) \ 443 444 static void 445 vtscsi_read_config(struct vtscsi_softc *sc, 446 struct virtio_scsi_config *scsicfg) 447 { 448 device_t dev; 449 450 dev = sc->vtscsi_dev; 451 452 bzero(scsicfg, sizeof(struct virtio_scsi_config)); 453 454 VTSCSI_GET_CONFIG(dev, num_queues, scsicfg); 455 VTSCSI_GET_CONFIG(dev, seg_max, scsicfg); 456 VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg); 457 VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg); 458 VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg); 459 VTSCSI_GET_CONFIG(dev, sense_size, scsicfg); 460 VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg); 461 VTSCSI_GET_CONFIG(dev, max_channel, scsicfg); 462 VTSCSI_GET_CONFIG(dev, max_target, scsicfg); 463 VTSCSI_GET_CONFIG(dev, max_lun, scsicfg); 464 } 465 466 #undef VTSCSI_GET_CONFIG 467 468 static int 469 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max) 470 { 471 int nsegs; 472 473 nsegs = VTSCSI_MIN_SEGMENTS; 474 475 if (seg_max > 0) { 476 nsegs += MIN(seg_max, maxphys / PAGE_SIZE + 1); 477 if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) 478 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); 479 } else 480 nsegs += 1; 481 482 return (nsegs); 483 } 484 485 static int 486 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc) 487 { 488 device_t dev; 489 struct vq_alloc_info vq_info[3]; 490 int nvqs; 491 492 dev = sc->vtscsi_dev; 493 nvqs = 3; 494 495 VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc, 496 &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev)); 497 498 VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc, 499 &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev)); 500 501 VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs, 502 vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq, 503 "%s request", device_get_nameunit(dev)); 504 505 return (virtio_alloc_virtqueues(dev, nvqs, vq_info)); 506 } 507 508 static void 509 vtscsi_check_sizes(struct vtscsi_softc *sc) 510 { 511 int rqsize; 512 513 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) { 514 /* 515 * Ensure the assertions in virtqueue_enqueue(), 516 * even if the hypervisor reports a bad seg_max. 517 */ 518 rqsize = virtqueue_size(sc->vtscsi_request_vq); 519 if (sc->vtscsi_max_nsegs > rqsize) { 520 device_printf(sc->vtscsi_dev, 521 "clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs, 522 rqsize); 523 sc->vtscsi_max_nsegs = rqsize; 524 } 525 } 526 } 527 528 static void 529 vtscsi_write_device_config(struct vtscsi_softc *sc) 530 { 531 532 virtio_write_dev_config_4(sc->vtscsi_dev, 533 offsetof(struct virtio_scsi_config, sense_size), 534 VIRTIO_SCSI_SENSE_SIZE); 535 536 /* 537 * This is the size in the virtio_scsi_cmd_req structure. Note 538 * this value (32) is larger than the maximum CAM CDB size (16). 539 */ 540 virtio_write_dev_config_4(sc->vtscsi_dev, 541 offsetof(struct virtio_scsi_config, cdb_size), 542 VIRTIO_SCSI_CDB_SIZE); 543 } 544 545 static int 546 vtscsi_reinit(struct vtscsi_softc *sc) 547 { 548 device_t dev; 549 int error; 550 551 dev = sc->vtscsi_dev; 552 553 error = virtio_reinit(dev, sc->vtscsi_features); 554 if (error == 0) { 555 vtscsi_write_device_config(sc); 556 virtio_reinit_complete(dev); 557 vtscsi_reinit_event_vq(sc); 558 559 vtscsi_enable_vqs_intr(sc); 560 } 561 562 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error); 563 564 return (error); 565 } 566 567 static int 568 vtscsi_alloc_cam(struct vtscsi_softc *sc) 569 { 570 device_t dev; 571 struct cam_devq *devq; 572 int openings; 573 574 dev = sc->vtscsi_dev; 575 openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS; 576 577 devq = cam_simq_alloc(openings); 578 if (devq == NULL) { 579 device_printf(dev, "cannot allocate SIM queue\n"); 580 return (ENOMEM); 581 } 582 583 sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll, 584 "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1, 585 openings, devq); 586 if (sc->vtscsi_sim == NULL) { 587 cam_simq_free(devq); 588 device_printf(dev, "cannot allocate SIM\n"); 589 return (ENOMEM); 590 } 591 592 return (0); 593 } 594 595 static int 596 vtscsi_register_cam(struct vtscsi_softc *sc) 597 { 598 device_t dev; 599 int registered, error; 600 601 dev = sc->vtscsi_dev; 602 registered = 0; 603 604 VTSCSI_LOCK(sc); 605 606 if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) { 607 error = ENOMEM; 608 device_printf(dev, "cannot register XPT bus\n"); 609 goto fail; 610 } 611 612 registered = 1; 613 614 if (xpt_create_path(&sc->vtscsi_path, NULL, 615 cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD, 616 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 617 error = ENOMEM; 618 device_printf(dev, "cannot create bus path\n"); 619 goto fail; 620 } 621 622 if (vtscsi_register_async(sc) != CAM_REQ_CMP) { 623 error = EIO; 624 device_printf(dev, "cannot register async callback\n"); 625 goto fail; 626 } 627 628 VTSCSI_UNLOCK(sc); 629 630 return (0); 631 632 fail: 633 if (sc->vtscsi_path != NULL) { 634 xpt_free_path(sc->vtscsi_path); 635 sc->vtscsi_path = NULL; 636 } 637 638 if (registered != 0) 639 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); 640 641 VTSCSI_UNLOCK(sc); 642 643 return (error); 644 } 645 646 static void 647 vtscsi_free_cam(struct vtscsi_softc *sc) 648 { 649 650 VTSCSI_LOCK(sc); 651 652 if (sc->vtscsi_path != NULL) { 653 vtscsi_deregister_async(sc); 654 655 xpt_free_path(sc->vtscsi_path); 656 sc->vtscsi_path = NULL; 657 658 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); 659 } 660 661 if (sc->vtscsi_sim != NULL) { 662 cam_sim_free(sc->vtscsi_sim, 1); 663 sc->vtscsi_sim = NULL; 664 } 665 666 VTSCSI_UNLOCK(sc); 667 } 668 669 static void 670 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg) 671 { 672 struct cam_sim *sim; 673 struct vtscsi_softc *sc; 674 675 sim = cb_arg; 676 sc = cam_sim_softc(sim); 677 678 vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code); 679 680 /* 681 * TODO Once QEMU supports event reporting, we should 682 * (un)subscribe to events here. 683 */ 684 switch (code) { 685 case AC_FOUND_DEVICE: 686 break; 687 case AC_LOST_DEVICE: 688 break; 689 } 690 } 691 692 static int 693 vtscsi_register_async(struct vtscsi_softc *sc) 694 { 695 struct ccb_setasync csa; 696 697 memset(&csa, 0, sizeof(csa)); 698 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); 699 csa.ccb_h.func_code = XPT_SASYNC_CB; 700 csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE; 701 csa.callback = vtscsi_cam_async; 702 csa.callback_arg = sc->vtscsi_sim; 703 704 xpt_action((union ccb *) &csa); 705 706 return (csa.ccb_h.status); 707 } 708 709 static void 710 vtscsi_deregister_async(struct vtscsi_softc *sc) 711 { 712 struct ccb_setasync csa; 713 714 memset(&csa, 0, sizeof(csa)); 715 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); 716 csa.ccb_h.func_code = XPT_SASYNC_CB; 717 csa.event_enable = 0; 718 csa.callback = vtscsi_cam_async; 719 csa.callback_arg = sc->vtscsi_sim; 720 721 xpt_action((union ccb *) &csa); 722 } 723 724 static void 725 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb) 726 { 727 struct vtscsi_softc *sc; 728 struct ccb_hdr *ccbh; 729 730 sc = cam_sim_softc(sim); 731 ccbh = &ccb->ccb_h; 732 733 VTSCSI_LOCK_OWNED(sc); 734 735 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) { 736 /* 737 * The VTSCSI_MTX is briefly dropped between setting 738 * VTSCSI_FLAG_DETACH and deregistering with CAM, so 739 * drop any CCBs that come in during that window. 740 */ 741 ccbh->status = CAM_NO_HBA; 742 xpt_done(ccb); 743 return; 744 } 745 746 switch (ccbh->func_code) { 747 case XPT_SCSI_IO: 748 vtscsi_cam_scsi_io(sc, sim, ccb); 749 break; 750 751 case XPT_SET_TRAN_SETTINGS: 752 ccbh->status = CAM_FUNC_NOTAVAIL; 753 xpt_done(ccb); 754 break; 755 756 case XPT_GET_TRAN_SETTINGS: 757 vtscsi_cam_get_tran_settings(sc, ccb); 758 break; 759 760 case XPT_RESET_BUS: 761 vtscsi_cam_reset_bus(sc, ccb); 762 break; 763 764 case XPT_RESET_DEV: 765 vtscsi_cam_reset_dev(sc, ccb); 766 break; 767 768 case XPT_ABORT: 769 vtscsi_cam_abort(sc, ccb); 770 break; 771 772 case XPT_CALC_GEOMETRY: 773 cam_calc_geometry(&ccb->ccg, 1); 774 xpt_done(ccb); 775 break; 776 777 case XPT_PATH_INQ: 778 vtscsi_cam_path_inquiry(sc, sim, ccb); 779 break; 780 781 default: 782 vtscsi_dprintf(sc, VTSCSI_ERROR, 783 "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code); 784 785 ccbh->status = CAM_REQ_INVALID; 786 xpt_done(ccb); 787 break; 788 } 789 } 790 791 static void 792 vtscsi_cam_poll(struct cam_sim *sim) 793 { 794 struct vtscsi_softc *sc; 795 796 sc = cam_sim_softc(sim); 797 798 vtscsi_complete_vqs_locked(sc); 799 } 800 801 static void 802 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim, 803 union ccb *ccb) 804 { 805 struct ccb_hdr *ccbh; 806 struct ccb_scsiio *csio; 807 int error; 808 809 ccbh = &ccb->ccb_h; 810 csio = &ccb->csio; 811 812 if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) { 813 error = EINVAL; 814 ccbh->status = CAM_REQ_INVALID; 815 goto done; 816 } 817 818 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH && 819 (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) { 820 error = EINVAL; 821 ccbh->status = CAM_REQ_INVALID; 822 goto done; 823 } 824 825 error = vtscsi_start_scsi_cmd(sc, ccb); 826 827 done: 828 if (error) { 829 vtscsi_dprintf(sc, VTSCSI_ERROR, 830 "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status); 831 xpt_done(ccb); 832 } 833 } 834 835 static void 836 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb) 837 { 838 struct ccb_trans_settings *cts; 839 struct ccb_trans_settings_scsi *scsi; 840 841 cts = &ccb->cts; 842 scsi = &cts->proto_specific.scsi; 843 844 cts->protocol = PROTO_SCSI; 845 cts->protocol_version = SCSI_REV_SPC3; 846 cts->transport = XPORT_SAS; 847 cts->transport_version = 0; 848 849 scsi->valid = CTS_SCSI_VALID_TQ; 850 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 851 852 ccb->ccb_h.status = CAM_REQ_CMP; 853 xpt_done(ccb); 854 } 855 856 static void 857 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb) 858 { 859 int error; 860 861 error = vtscsi_reset_bus(sc); 862 if (error == 0) 863 ccb->ccb_h.status = CAM_REQ_CMP; 864 else 865 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 866 867 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n", 868 error, ccb, ccb->ccb_h.status); 869 870 xpt_done(ccb); 871 } 872 873 static void 874 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb) 875 { 876 struct ccb_hdr *ccbh; 877 struct vtscsi_request *req; 878 int error; 879 880 ccbh = &ccb->ccb_h; 881 882 req = vtscsi_dequeue_request(sc); 883 if (req == NULL) { 884 error = EAGAIN; 885 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 886 goto fail; 887 } 888 889 req->vsr_ccb = ccb; 890 891 error = vtscsi_execute_reset_dev_cmd(sc, req); 892 if (error == 0) 893 return; 894 895 vtscsi_enqueue_request(sc, req); 896 897 fail: 898 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", 899 error, req, ccb); 900 901 if (error == EAGAIN) 902 ccbh->status = CAM_RESRC_UNAVAIL; 903 else 904 ccbh->status = CAM_REQ_CMP_ERR; 905 906 xpt_done(ccb); 907 } 908 909 static void 910 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb) 911 { 912 struct vtscsi_request *req; 913 struct ccb_hdr *ccbh; 914 int error; 915 916 ccbh = &ccb->ccb_h; 917 918 req = vtscsi_dequeue_request(sc); 919 if (req == NULL) { 920 error = EAGAIN; 921 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 922 goto fail; 923 } 924 925 req->vsr_ccb = ccb; 926 927 error = vtscsi_execute_abort_task_cmd(sc, req); 928 if (error == 0) 929 return; 930 931 vtscsi_enqueue_request(sc, req); 932 933 fail: 934 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", 935 error, req, ccb); 936 937 if (error == EAGAIN) 938 ccbh->status = CAM_RESRC_UNAVAIL; 939 else 940 ccbh->status = CAM_REQ_CMP_ERR; 941 942 xpt_done(ccb); 943 } 944 945 static void 946 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim, 947 union ccb *ccb) 948 { 949 device_t dev; 950 struct ccb_pathinq *cpi; 951 952 dev = sc->vtscsi_dev; 953 cpi = &ccb->cpi; 954 955 vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb); 956 957 cpi->version_num = 1; 958 cpi->hba_inquiry = PI_TAG_ABLE; 959 cpi->target_sprt = 0; 960 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; 961 if (vtscsi_bus_reset_disable != 0) 962 cpi->hba_misc |= PIM_NOBUSRESET; 963 cpi->hba_eng_cnt = 0; 964 965 cpi->max_target = sc->vtscsi_max_target; 966 cpi->max_lun = sc->vtscsi_max_lun; 967 cpi->initiator_id = cpi->max_target + 1; 968 969 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 970 strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN); 971 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 972 973 cpi->unit_number = cam_sim_unit(sim); 974 cpi->bus_id = cam_sim_bus(sim); 975 976 cpi->base_transfer_speed = 300000; 977 978 cpi->protocol = PROTO_SCSI; 979 cpi->protocol_version = SCSI_REV_SPC3; 980 cpi->transport = XPORT_SAS; 981 cpi->transport_version = 0; 982 983 cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) * 984 PAGE_SIZE; 985 986 cpi->hba_vendor = virtio_get_vendor(dev); 987 cpi->hba_device = virtio_get_device(dev); 988 cpi->hba_subvendor = virtio_get_subvendor(dev); 989 cpi->hba_subdevice = virtio_get_subdevice(dev); 990 991 ccb->ccb_h.status = CAM_REQ_CMP; 992 xpt_done(ccb); 993 } 994 995 static int 996 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg, 997 struct ccb_scsiio *csio) 998 { 999 struct ccb_hdr *ccbh; 1000 struct bus_dma_segment *dseg; 1001 int i, error; 1002 1003 ccbh = &csio->ccb_h; 1004 error = 0; 1005 1006 switch ((ccbh->flags & CAM_DATA_MASK)) { 1007 case CAM_DATA_VADDR: 1008 error = sglist_append(sg, csio->data_ptr, csio->dxfer_len); 1009 break; 1010 case CAM_DATA_PADDR: 1011 error = sglist_append_phys(sg, 1012 (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len); 1013 break; 1014 case CAM_DATA_SG: 1015 for (i = 0; i < csio->sglist_cnt && error == 0; i++) { 1016 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; 1017 error = sglist_append(sg, 1018 (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len); 1019 } 1020 break; 1021 case CAM_DATA_SG_PADDR: 1022 for (i = 0; i < csio->sglist_cnt && error == 0; i++) { 1023 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; 1024 error = sglist_append_phys(sg, 1025 (vm_paddr_t) dseg->ds_addr, dseg->ds_len); 1026 } 1027 break; 1028 case CAM_DATA_BIO: 1029 error = sglist_append_bio(sg, (struct bio *) csio->data_ptr); 1030 break; 1031 default: 1032 error = EINVAL; 1033 break; 1034 } 1035 1036 return (error); 1037 } 1038 1039 static int 1040 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req, 1041 int *readable, int *writable) 1042 { 1043 struct sglist *sg; 1044 struct ccb_hdr *ccbh; 1045 struct ccb_scsiio *csio; 1046 struct virtio_scsi_cmd_req *cmd_req; 1047 struct virtio_scsi_cmd_resp *cmd_resp; 1048 int error; 1049 1050 sg = sc->vtscsi_sglist; 1051 csio = &req->vsr_ccb->csio; 1052 ccbh = &csio->ccb_h; 1053 cmd_req = &req->vsr_cmd_req; 1054 cmd_resp = &req->vsr_cmd_resp; 1055 1056 sglist_reset(sg); 1057 1058 sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req)); 1059 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1060 error = vtscsi_sg_append_scsi_buf(sc, sg, csio); 1061 /* At least one segment must be left for the response. */ 1062 if (error || sg->sg_nseg == sg->sg_maxseg) 1063 goto fail; 1064 } 1065 1066 *readable = sg->sg_nseg; 1067 1068 sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp)); 1069 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1070 error = vtscsi_sg_append_scsi_buf(sc, sg, csio); 1071 if (error) 1072 goto fail; 1073 } 1074 1075 *writable = sg->sg_nseg - *readable; 1076 1077 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d " 1078 "writable=%d\n", req, ccbh, *readable, *writable); 1079 1080 return (0); 1081 1082 fail: 1083 /* 1084 * This should never happen unless maxio was incorrectly set. 1085 */ 1086 vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0); 1087 1088 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p " 1089 "nseg=%d maxseg=%d\n", 1090 error, req, ccbh, sg->sg_nseg, sg->sg_maxseg); 1091 1092 return (EFBIG); 1093 } 1094 1095 static int 1096 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) 1097 { 1098 struct sglist *sg; 1099 struct virtqueue *vq; 1100 struct ccb_scsiio *csio; 1101 struct ccb_hdr *ccbh; 1102 struct virtio_scsi_cmd_req *cmd_req; 1103 struct virtio_scsi_cmd_resp *cmd_resp; 1104 int readable, writable, error; 1105 1106 sg = sc->vtscsi_sglist; 1107 vq = sc->vtscsi_request_vq; 1108 csio = &req->vsr_ccb->csio; 1109 ccbh = &csio->ccb_h; 1110 cmd_req = &req->vsr_cmd_req; 1111 cmd_resp = &req->vsr_cmd_resp; 1112 1113 vtscsi_init_scsi_cmd_req(sc, csio, cmd_req); 1114 1115 error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable); 1116 if (error) 1117 return (error); 1118 1119 req->vsr_complete = vtscsi_complete_scsi_cmd; 1120 cmd_resp->response = -1; 1121 1122 error = virtqueue_enqueue(vq, req, sg, readable, writable); 1123 if (error) { 1124 vtscsi_dprintf(sc, VTSCSI_ERROR, 1125 "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh); 1126 1127 ccbh->status = CAM_REQUEUE_REQ; 1128 vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ); 1129 return (error); 1130 } 1131 1132 ccbh->status |= CAM_SIM_QUEUED; 1133 ccbh->ccbh_vtscsi_req = req; 1134 1135 virtqueue_notify(vq); 1136 1137 if (ccbh->timeout != CAM_TIME_INFINITY) { 1138 req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET; 1139 callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout, 1140 0, vtscsi_timedout_scsi_cmd, req, 0); 1141 } 1142 1143 vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n", 1144 req, ccbh); 1145 1146 return (0); 1147 } 1148 1149 static int 1150 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb) 1151 { 1152 struct vtscsi_request *req; 1153 int error; 1154 1155 req = vtscsi_dequeue_request(sc); 1156 if (req == NULL) { 1157 ccb->ccb_h.status = CAM_REQUEUE_REQ; 1158 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 1159 return (ENOBUFS); 1160 } 1161 1162 req->vsr_ccb = ccb; 1163 1164 error = vtscsi_execute_scsi_cmd(sc, req); 1165 if (error) 1166 vtscsi_enqueue_request(sc, req); 1167 1168 return (error); 1169 } 1170 1171 static void 1172 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, 1173 struct vtscsi_request *req) 1174 { 1175 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1176 struct vtscsi_request *to_req; 1177 uint8_t response; 1178 1179 tmf_resp = &req->vsr_tmf_resp; 1180 response = tmf_resp->response; 1181 to_req = req->vsr_timedout_req; 1182 1183 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n", 1184 req, to_req, response); 1185 1186 vtscsi_enqueue_request(sc, req); 1187 1188 /* 1189 * The timedout request could have completed between when the 1190 * abort task was sent and when the host processed it. 1191 */ 1192 if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT) 1193 return; 1194 1195 /* The timedout request was successfully aborted. */ 1196 if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) 1197 return; 1198 1199 /* Don't bother if the device is going away. */ 1200 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) 1201 return; 1202 1203 /* The timedout request will be aborted by the reset. */ 1204 if (sc->vtscsi_flags & VTSCSI_FLAG_RESET) 1205 return; 1206 1207 vtscsi_reset_bus(sc); 1208 } 1209 1210 static int 1211 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, 1212 struct vtscsi_request *to_req) 1213 { 1214 struct sglist *sg; 1215 struct ccb_hdr *to_ccbh; 1216 struct vtscsi_request *req; 1217 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1218 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1219 int error; 1220 1221 sg = sc->vtscsi_sglist; 1222 to_ccbh = &to_req->vsr_ccb->ccb_h; 1223 1224 req = vtscsi_dequeue_request(sc); 1225 if (req == NULL) { 1226 error = ENOBUFS; 1227 goto fail; 1228 } 1229 1230 tmf_req = &req->vsr_tmf_req; 1231 tmf_resp = &req->vsr_tmf_resp; 1232 1233 vtscsi_init_ctrl_tmf_req(sc, to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, 1234 (uintptr_t) to_ccbh, tmf_req); 1235 1236 sglist_reset(sg); 1237 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1238 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1239 1240 req->vsr_timedout_req = to_req; 1241 req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd; 1242 tmf_resp->response = -1; 1243 1244 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1245 VTSCSI_EXECUTE_ASYNC); 1246 if (error == 0) 1247 return (0); 1248 1249 vtscsi_enqueue_request(sc, req); 1250 1251 fail: 1252 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p " 1253 "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh); 1254 1255 return (error); 1256 } 1257 1258 static void 1259 vtscsi_timedout_scsi_cmd(void *xreq) 1260 { 1261 struct vtscsi_softc *sc; 1262 struct vtscsi_request *to_req; 1263 1264 to_req = xreq; 1265 sc = to_req->vsr_softc; 1266 1267 vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n", 1268 to_req, to_req->vsr_ccb, to_req->vsr_state); 1269 1270 /* Don't bother if the device is going away. */ 1271 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) 1272 return; 1273 1274 /* 1275 * Bail if the request is not in use. We likely raced when 1276 * stopping the callout handler or it has already been aborted. 1277 */ 1278 if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE || 1279 (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0) 1280 return; 1281 1282 /* 1283 * Complete the request queue in case the timedout request is 1284 * actually just pending. 1285 */ 1286 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 1287 if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE) 1288 return; 1289 1290 sc->vtscsi_stats.scsi_cmd_timeouts++; 1291 to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT; 1292 1293 if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0) 1294 return; 1295 1296 vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n"); 1297 vtscsi_reset_bus(sc); 1298 } 1299 1300 static cam_status 1301 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp) 1302 { 1303 cam_status status; 1304 1305 switch (cmd_resp->response) { 1306 case VIRTIO_SCSI_S_OK: 1307 status = CAM_REQ_CMP; 1308 break; 1309 case VIRTIO_SCSI_S_OVERRUN: 1310 status = CAM_DATA_RUN_ERR; 1311 break; 1312 case VIRTIO_SCSI_S_ABORTED: 1313 status = CAM_REQ_ABORTED; 1314 break; 1315 case VIRTIO_SCSI_S_BAD_TARGET: 1316 status = CAM_SEL_TIMEOUT; 1317 break; 1318 case VIRTIO_SCSI_S_RESET: 1319 status = CAM_SCSI_BUS_RESET; 1320 break; 1321 case VIRTIO_SCSI_S_BUSY: 1322 status = CAM_SCSI_BUSY; 1323 break; 1324 case VIRTIO_SCSI_S_TRANSPORT_FAILURE: 1325 case VIRTIO_SCSI_S_TARGET_FAILURE: 1326 case VIRTIO_SCSI_S_NEXUS_FAILURE: 1327 status = CAM_SCSI_IT_NEXUS_LOST; 1328 break; 1329 default: /* VIRTIO_SCSI_S_FAILURE */ 1330 status = CAM_REQ_CMP_ERR; 1331 break; 1332 } 1333 1334 return (status); 1335 } 1336 1337 static cam_status 1338 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc, 1339 struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp) 1340 { 1341 uint32_t resp_sense_length; 1342 cam_status status; 1343 1344 csio->scsi_status = cmd_resp->status; 1345 csio->resid = vtscsi_htog32(sc, cmd_resp->resid); 1346 1347 if (csio->scsi_status == SCSI_STATUS_OK) 1348 status = CAM_REQ_CMP; 1349 else 1350 status = CAM_SCSI_STATUS_ERROR; 1351 1352 resp_sense_length = vtscsi_htog32(sc, cmd_resp->sense_len); 1353 1354 if (resp_sense_length > 0) { 1355 status |= CAM_AUTOSNS_VALID; 1356 1357 if (resp_sense_length < csio->sense_len) 1358 csio->sense_resid = csio->sense_len - resp_sense_length; 1359 else 1360 csio->sense_resid = 0; 1361 1362 memcpy(&csio->sense_data, cmd_resp->sense, 1363 csio->sense_len - csio->sense_resid); 1364 } 1365 1366 vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR, 1367 "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n", 1368 csio, csio->scsi_status, csio->resid, csio->sense_resid); 1369 1370 return (status); 1371 } 1372 1373 static void 1374 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) 1375 { 1376 struct ccb_hdr *ccbh; 1377 struct ccb_scsiio *csio; 1378 struct virtio_scsi_cmd_resp *cmd_resp; 1379 cam_status status; 1380 1381 csio = &req->vsr_ccb->csio; 1382 ccbh = &csio->ccb_h; 1383 cmd_resp = &req->vsr_cmd_resp; 1384 1385 KASSERT(ccbh->ccbh_vtscsi_req == req, 1386 ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req)); 1387 1388 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) 1389 callout_stop(&req->vsr_callout); 1390 1391 status = vtscsi_scsi_cmd_cam_status(cmd_resp); 1392 if (status == CAM_REQ_ABORTED) { 1393 if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT) 1394 status = CAM_CMD_TIMEOUT; 1395 } else if (status == CAM_REQ_CMP) 1396 status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp); 1397 1398 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1399 status |= CAM_DEV_QFRZN; 1400 xpt_freeze_devq(ccbh->path, 1); 1401 } 1402 1403 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) 1404 status |= CAM_RELEASE_SIMQ; 1405 1406 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n", 1407 req, ccbh, status); 1408 1409 ccbh->status = status; 1410 xpt_done(req->vsr_ccb); 1411 vtscsi_enqueue_request(sc, req); 1412 } 1413 1414 static void 1415 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req) 1416 { 1417 1418 /* XXX We probably shouldn't poll forever. */ 1419 req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED; 1420 do 1421 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 1422 while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0); 1423 1424 req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED; 1425 } 1426 1427 static int 1428 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req, 1429 struct sglist *sg, int readable, int writable, int flag) 1430 { 1431 struct virtqueue *vq; 1432 int error; 1433 1434 vq = sc->vtscsi_control_vq; 1435 1436 MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL); 1437 1438 error = virtqueue_enqueue(vq, req, sg, readable, writable); 1439 if (error) { 1440 /* 1441 * Return EAGAIN when the virtqueue does not have enough 1442 * descriptors available. 1443 */ 1444 if (error == ENOSPC || error == EMSGSIZE) 1445 error = EAGAIN; 1446 1447 return (error); 1448 } 1449 1450 virtqueue_notify(vq); 1451 if (flag == VTSCSI_EXECUTE_POLL) 1452 vtscsi_poll_ctrl_req(sc, req); 1453 1454 return (0); 1455 } 1456 1457 static void 1458 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc, 1459 struct vtscsi_request *req) 1460 { 1461 union ccb *ccb; 1462 struct ccb_hdr *ccbh; 1463 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1464 1465 ccb = req->vsr_ccb; 1466 ccbh = &ccb->ccb_h; 1467 tmf_resp = &req->vsr_tmf_resp; 1468 1469 switch (tmf_resp->response) { 1470 case VIRTIO_SCSI_S_FUNCTION_COMPLETE: 1471 ccbh->status = CAM_REQ_CMP; 1472 break; 1473 case VIRTIO_SCSI_S_FUNCTION_REJECTED: 1474 ccbh->status = CAM_UA_ABORT; 1475 break; 1476 default: 1477 ccbh->status = CAM_REQ_CMP_ERR; 1478 break; 1479 } 1480 1481 xpt_done(ccb); 1482 vtscsi_enqueue_request(sc, req); 1483 } 1484 1485 static int 1486 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc, 1487 struct vtscsi_request *req) 1488 { 1489 struct sglist *sg; 1490 struct ccb_abort *cab; 1491 struct ccb_hdr *ccbh; 1492 struct ccb_hdr *abort_ccbh; 1493 struct vtscsi_request *abort_req; 1494 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1495 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1496 int error; 1497 1498 sg = sc->vtscsi_sglist; 1499 cab = &req->vsr_ccb->cab; 1500 ccbh = &cab->ccb_h; 1501 tmf_req = &req->vsr_tmf_req; 1502 tmf_resp = &req->vsr_tmf_resp; 1503 1504 /* CCB header and request that's to be aborted. */ 1505 abort_ccbh = &cab->abort_ccb->ccb_h; 1506 abort_req = abort_ccbh->ccbh_vtscsi_req; 1507 1508 if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) { 1509 error = EINVAL; 1510 goto fail; 1511 } 1512 1513 /* Only attempt to abort requests that could be in-flight. */ 1514 if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) { 1515 error = EALREADY; 1516 goto fail; 1517 } 1518 1519 abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED; 1520 if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) 1521 callout_stop(&abort_req->vsr_callout); 1522 1523 vtscsi_init_ctrl_tmf_req(sc, ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, 1524 (uintptr_t) abort_ccbh, tmf_req); 1525 1526 sglist_reset(sg); 1527 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1528 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1529 1530 req->vsr_complete = vtscsi_complete_abort_task_cmd; 1531 tmf_resp->response = -1; 1532 1533 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1534 VTSCSI_EXECUTE_ASYNC); 1535 1536 fail: 1537 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p " 1538 "abort_req=%p\n", error, req, abort_ccbh, abort_req); 1539 1540 return (error); 1541 } 1542 1543 static void 1544 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc, 1545 struct vtscsi_request *req) 1546 { 1547 union ccb *ccb; 1548 struct ccb_hdr *ccbh; 1549 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1550 1551 ccb = req->vsr_ccb; 1552 ccbh = &ccb->ccb_h; 1553 tmf_resp = &req->vsr_tmf_resp; 1554 1555 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n", 1556 req, ccb, tmf_resp->response); 1557 1558 if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) { 1559 ccbh->status = CAM_REQ_CMP; 1560 vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id, 1561 ccbh->target_lun); 1562 } else 1563 ccbh->status = CAM_REQ_CMP_ERR; 1564 1565 xpt_done(ccb); 1566 vtscsi_enqueue_request(sc, req); 1567 } 1568 1569 static int 1570 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc, 1571 struct vtscsi_request *req) 1572 { 1573 struct sglist *sg; 1574 struct ccb_resetdev *crd; 1575 struct ccb_hdr *ccbh; 1576 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1577 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1578 uint32_t subtype; 1579 int error; 1580 1581 sg = sc->vtscsi_sglist; 1582 crd = &req->vsr_ccb->crd; 1583 ccbh = &crd->ccb_h; 1584 tmf_req = &req->vsr_tmf_req; 1585 tmf_resp = &req->vsr_tmf_resp; 1586 1587 if (ccbh->target_lun == CAM_LUN_WILDCARD) 1588 subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET; 1589 else 1590 subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; 1591 1592 vtscsi_init_ctrl_tmf_req(sc, ccbh, subtype, 0, tmf_req); 1593 1594 sglist_reset(sg); 1595 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1596 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1597 1598 req->vsr_complete = vtscsi_complete_reset_dev_cmd; 1599 tmf_resp->response = -1; 1600 1601 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1602 VTSCSI_EXECUTE_ASYNC); 1603 1604 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n", 1605 error, req, ccbh); 1606 1607 return (error); 1608 } 1609 1610 static void 1611 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id) 1612 { 1613 1614 *target_id = lun[1]; 1615 *lun_id = (lun[2] << 8) | lun[3]; 1616 } 1617 1618 static void 1619 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[]) 1620 { 1621 1622 lun[0] = 1; 1623 lun[1] = ccbh->target_id; 1624 lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F); 1625 lun[3] = ccbh->target_lun & 0xFF; 1626 } 1627 1628 static void 1629 vtscsi_init_scsi_cmd_req(struct vtscsi_softc *sc, struct ccb_scsiio *csio, 1630 struct virtio_scsi_cmd_req *cmd_req) 1631 { 1632 uint8_t attr; 1633 1634 switch (csio->tag_action) { 1635 case MSG_HEAD_OF_Q_TAG: 1636 attr = VIRTIO_SCSI_S_HEAD; 1637 break; 1638 case MSG_ORDERED_Q_TAG: 1639 attr = VIRTIO_SCSI_S_ORDERED; 1640 break; 1641 case MSG_ACA_TASK: 1642 attr = VIRTIO_SCSI_S_ACA; 1643 break; 1644 default: /* MSG_SIMPLE_Q_TAG */ 1645 attr = VIRTIO_SCSI_S_SIMPLE; 1646 break; 1647 } 1648 1649 vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun); 1650 cmd_req->tag = vtscsi_gtoh64(sc, (uintptr_t) csio); 1651 cmd_req->task_attr = attr; 1652 1653 memcpy(cmd_req->cdb, 1654 csio->ccb_h.flags & CAM_CDB_POINTER ? 1655 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes, 1656 csio->cdb_len); 1657 } 1658 1659 static void 1660 vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *sc, struct ccb_hdr *ccbh, 1661 uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req) 1662 { 1663 1664 vtscsi_set_request_lun(ccbh, tmf_req->lun); 1665 1666 tmf_req->type = vtscsi_gtoh32(sc, VIRTIO_SCSI_T_TMF); 1667 tmf_req->subtype = vtscsi_gtoh32(sc, subtype); 1668 tmf_req->tag = vtscsi_gtoh64(sc, tag); 1669 } 1670 1671 static void 1672 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason) 1673 { 1674 int frozen; 1675 1676 frozen = sc->vtscsi_frozen; 1677 1678 if (reason & VTSCSI_REQUEST && 1679 (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0) 1680 sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS; 1681 1682 if (reason & VTSCSI_REQUEST_VQ && 1683 (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0) 1684 sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL; 1685 1686 /* Freeze the SIMQ if transitioned to frozen. */ 1687 if (frozen == 0 && sc->vtscsi_frozen != 0) { 1688 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n"); 1689 xpt_freeze_simq(sc->vtscsi_sim, 1); 1690 } 1691 } 1692 1693 static int 1694 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason) 1695 { 1696 int thawed; 1697 1698 if (sc->vtscsi_frozen == 0 || reason == 0) 1699 return (0); 1700 1701 if (reason & VTSCSI_REQUEST && 1702 sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) 1703 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS; 1704 1705 if (reason & VTSCSI_REQUEST_VQ && 1706 sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) 1707 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL; 1708 1709 thawed = sc->vtscsi_frozen == 0; 1710 if (thawed != 0) 1711 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n"); 1712 1713 return (thawed); 1714 } 1715 1716 static void 1717 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code, 1718 target_id_t target_id, lun_id_t lun_id) 1719 { 1720 struct cam_path *path; 1721 1722 /* Use the wildcard path from our softc for bus announcements. */ 1723 if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) { 1724 xpt_async(ac_code, sc->vtscsi_path, NULL); 1725 return; 1726 } 1727 1728 if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim), 1729 target_id, lun_id) != CAM_REQ_CMP) { 1730 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n"); 1731 return; 1732 } 1733 1734 xpt_async(ac_code, path, NULL); 1735 xpt_free_path(path); 1736 } 1737 1738 static void 1739 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id, 1740 lun_id_t lun_id) 1741 { 1742 union ccb *ccb; 1743 cam_status status; 1744 1745 ccb = xpt_alloc_ccb_nowait(); 1746 if (ccb == NULL) { 1747 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n"); 1748 return; 1749 } 1750 1751 status = xpt_create_path(&ccb->ccb_h.path, NULL, 1752 cam_sim_path(sc->vtscsi_sim), target_id, lun_id); 1753 if (status != CAM_REQ_CMP) { 1754 xpt_free_ccb(ccb); 1755 return; 1756 } 1757 1758 xpt_rescan(ccb); 1759 } 1760 1761 static void 1762 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc) 1763 { 1764 1765 vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1766 } 1767 1768 static void 1769 vtscsi_transport_reset_event(struct vtscsi_softc *sc, 1770 struct virtio_scsi_event *event) 1771 { 1772 target_id_t target_id; 1773 lun_id_t lun_id; 1774 1775 vtscsi_get_request_lun(event->lun, &target_id, &lun_id); 1776 1777 switch (event->reason) { 1778 case VIRTIO_SCSI_EVT_RESET_RESCAN: 1779 case VIRTIO_SCSI_EVT_RESET_REMOVED: 1780 vtscsi_execute_rescan(sc, target_id, lun_id); 1781 break; 1782 default: 1783 device_printf(sc->vtscsi_dev, 1784 "unhandled transport event reason: %d\n", event->reason); 1785 break; 1786 } 1787 } 1788 1789 static void 1790 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event) 1791 { 1792 int error __diagused; 1793 1794 if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) { 1795 switch (event->event) { 1796 case VIRTIO_SCSI_T_TRANSPORT_RESET: 1797 vtscsi_transport_reset_event(sc, event); 1798 break; 1799 default: 1800 device_printf(sc->vtscsi_dev, 1801 "unhandled event: %d\n", event->event); 1802 break; 1803 } 1804 } else 1805 vtscsi_execute_rescan_bus(sc); 1806 1807 /* 1808 * This should always be successful since the buffer 1809 * was just dequeued. 1810 */ 1811 error = vtscsi_enqueue_event_buf(sc, event); 1812 KASSERT(error == 0, 1813 ("cannot requeue event buffer: %d", error)); 1814 } 1815 1816 static int 1817 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc, 1818 struct virtio_scsi_event *event) 1819 { 1820 struct sglist *sg; 1821 struct virtqueue *vq; 1822 int size, error; 1823 1824 sg = sc->vtscsi_sglist; 1825 vq = sc->vtscsi_event_vq; 1826 size = sc->vtscsi_event_buf_size; 1827 1828 bzero(event, size); 1829 1830 sglist_reset(sg); 1831 error = sglist_append(sg, event, size); 1832 if (error) 1833 return (error); 1834 1835 error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg); 1836 if (error) 1837 return (error); 1838 1839 virtqueue_notify(vq); 1840 1841 return (0); 1842 } 1843 1844 static int 1845 vtscsi_init_event_vq(struct vtscsi_softc *sc) 1846 { 1847 struct virtio_scsi_event *event; 1848 int i, size, error; 1849 1850 /* 1851 * The first release of QEMU with VirtIO SCSI support would crash 1852 * when attempting to notify the event virtqueue. This was fixed 1853 * when hotplug support was added. 1854 */ 1855 if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) 1856 size = sc->vtscsi_event_buf_size; 1857 else 1858 size = 0; 1859 1860 if (size < sizeof(struct virtio_scsi_event)) 1861 return (0); 1862 1863 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { 1864 event = &sc->vtscsi_event_bufs[i]; 1865 1866 error = vtscsi_enqueue_event_buf(sc, event); 1867 if (error) 1868 break; 1869 } 1870 1871 /* 1872 * Even just one buffer is enough. Missed events are 1873 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag. 1874 */ 1875 if (i > 0) 1876 error = 0; 1877 1878 return (error); 1879 } 1880 1881 static void 1882 vtscsi_reinit_event_vq(struct vtscsi_softc *sc) 1883 { 1884 struct virtio_scsi_event *event; 1885 int i, error; 1886 1887 if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 || 1888 sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event)) 1889 return; 1890 1891 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { 1892 event = &sc->vtscsi_event_bufs[i]; 1893 1894 error = vtscsi_enqueue_event_buf(sc, event); 1895 if (error) 1896 break; 1897 } 1898 1899 KASSERT(i > 0, ("cannot reinit event vq: %d", error)); 1900 } 1901 1902 static void 1903 vtscsi_drain_event_vq(struct vtscsi_softc *sc) 1904 { 1905 struct virtqueue *vq; 1906 int last; 1907 1908 vq = sc->vtscsi_event_vq; 1909 last = 0; 1910 1911 while (virtqueue_drain(vq, &last) != NULL) 1912 ; 1913 1914 KASSERT(virtqueue_empty(vq), ("eventvq not empty")); 1915 } 1916 1917 static void 1918 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc) 1919 { 1920 1921 VTSCSI_LOCK_OWNED(sc); 1922 1923 if (sc->vtscsi_request_vq != NULL) 1924 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 1925 if (sc->vtscsi_control_vq != NULL) 1926 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 1927 } 1928 1929 static void 1930 vtscsi_complete_vqs(struct vtscsi_softc *sc) 1931 { 1932 1933 VTSCSI_LOCK(sc); 1934 vtscsi_complete_vqs_locked(sc); 1935 VTSCSI_UNLOCK(sc); 1936 } 1937 1938 static void 1939 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 1940 { 1941 union ccb *ccb; 1942 int detach; 1943 1944 ccb = req->vsr_ccb; 1945 1946 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb); 1947 1948 /* 1949 * The callout must be drained when detaching since the request is 1950 * about to be freed. The VTSCSI_MTX must not be held for this in 1951 * case the callout is pending because there is a deadlock potential. 1952 * Otherwise, the virtqueue is being drained because of a bus reset 1953 * so we only need to attempt to stop the callouts. 1954 */ 1955 detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0; 1956 if (detach != 0) 1957 VTSCSI_LOCK_NOTOWNED(sc); 1958 else 1959 VTSCSI_LOCK_OWNED(sc); 1960 1961 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) { 1962 if (detach != 0) 1963 callout_drain(&req->vsr_callout); 1964 else 1965 callout_stop(&req->vsr_callout); 1966 } 1967 1968 if (ccb != NULL) { 1969 if (detach != 0) { 1970 VTSCSI_LOCK(sc); 1971 ccb->ccb_h.status = CAM_NO_HBA; 1972 } else 1973 ccb->ccb_h.status = CAM_REQUEUE_REQ; 1974 xpt_done(ccb); 1975 if (detach != 0) 1976 VTSCSI_UNLOCK(sc); 1977 } 1978 1979 vtscsi_enqueue_request(sc, req); 1980 } 1981 1982 static void 1983 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq) 1984 { 1985 struct vtscsi_request *req; 1986 int last; 1987 1988 last = 0; 1989 1990 vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq); 1991 1992 while ((req = virtqueue_drain(vq, &last)) != NULL) 1993 vtscsi_cancel_request(sc, req); 1994 1995 KASSERT(virtqueue_empty(vq), ("virtqueue not empty")); 1996 } 1997 1998 static void 1999 vtscsi_drain_vqs(struct vtscsi_softc *sc) 2000 { 2001 2002 if (sc->vtscsi_control_vq != NULL) 2003 vtscsi_drain_vq(sc, sc->vtscsi_control_vq); 2004 if (sc->vtscsi_request_vq != NULL) 2005 vtscsi_drain_vq(sc, sc->vtscsi_request_vq); 2006 if (sc->vtscsi_event_vq != NULL) 2007 vtscsi_drain_event_vq(sc); 2008 } 2009 2010 static void 2011 vtscsi_stop(struct vtscsi_softc *sc) 2012 { 2013 2014 vtscsi_disable_vqs_intr(sc); 2015 virtio_stop(sc->vtscsi_dev); 2016 } 2017 2018 static int 2019 vtscsi_reset_bus(struct vtscsi_softc *sc) 2020 { 2021 int error; 2022 2023 VTSCSI_LOCK_OWNED(sc); 2024 2025 if (vtscsi_bus_reset_disable != 0) { 2026 device_printf(sc->vtscsi_dev, "bus reset disabled\n"); 2027 return (0); 2028 } 2029 2030 sc->vtscsi_flags |= VTSCSI_FLAG_RESET; 2031 2032 /* 2033 * vtscsi_stop() will cause the in-flight requests to be canceled. 2034 * Those requests are then completed here so CAM will retry them 2035 * after the reset is complete. 2036 */ 2037 vtscsi_stop(sc); 2038 vtscsi_complete_vqs_locked(sc); 2039 2040 /* Rid the virtqueues of any remaining requests. */ 2041 vtscsi_drain_vqs(sc); 2042 2043 /* 2044 * Any resource shortage that froze the SIMQ cannot persist across 2045 * a bus reset so ensure it gets thawed here. 2046 */ 2047 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) 2048 xpt_release_simq(sc->vtscsi_sim, 0); 2049 2050 error = vtscsi_reinit(sc); 2051 if (error) { 2052 device_printf(sc->vtscsi_dev, 2053 "reinitialization failed, stopping device...\n"); 2054 vtscsi_stop(sc); 2055 } else 2056 vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 2057 CAM_LUN_WILDCARD); 2058 2059 sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET; 2060 2061 return (error); 2062 } 2063 2064 static void 2065 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 2066 { 2067 2068 #ifdef INVARIANTS 2069 int req_nsegs, resp_nsegs; 2070 2071 req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq)); 2072 resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp)); 2073 2074 KASSERT(req_nsegs == 1, ("request crossed page boundary")); 2075 KASSERT(resp_nsegs == 1, ("response crossed page boundary")); 2076 #endif 2077 2078 req->vsr_softc = sc; 2079 callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0); 2080 } 2081 2082 static int 2083 vtscsi_alloc_requests(struct vtscsi_softc *sc) 2084 { 2085 struct vtscsi_request *req; 2086 int i, nreqs; 2087 2088 /* 2089 * Commands destined for either the request or control queues come 2090 * from the same SIM queue. Use the size of the request virtqueue 2091 * as it (should) be much more frequently used. Some additional 2092 * requests are allocated for internal (TMF) use. 2093 */ 2094 nreqs = virtqueue_size(sc->vtscsi_request_vq); 2095 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) 2096 nreqs /= VTSCSI_MIN_SEGMENTS; 2097 nreqs += VTSCSI_RESERVED_REQUESTS; 2098 2099 for (i = 0; i < nreqs; i++) { 2100 req = malloc(sizeof(struct vtscsi_request), M_DEVBUF, 2101 M_NOWAIT); 2102 if (req == NULL) 2103 return (ENOMEM); 2104 2105 vtscsi_init_request(sc, req); 2106 2107 sc->vtscsi_nrequests++; 2108 vtscsi_enqueue_request(sc, req); 2109 } 2110 2111 return (0); 2112 } 2113 2114 static void 2115 vtscsi_free_requests(struct vtscsi_softc *sc) 2116 { 2117 struct vtscsi_request *req; 2118 2119 while ((req = vtscsi_dequeue_request(sc)) != NULL) { 2120 KASSERT(callout_active(&req->vsr_callout) == 0, 2121 ("request callout still active")); 2122 2123 sc->vtscsi_nrequests--; 2124 free(req, M_DEVBUF); 2125 } 2126 2127 KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d", 2128 sc->vtscsi_nrequests)); 2129 } 2130 2131 static void 2132 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 2133 { 2134 2135 KASSERT(req->vsr_softc == sc, 2136 ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc)); 2137 2138 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); 2139 2140 /* A request is available so the SIMQ could be released. */ 2141 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0) 2142 xpt_release_simq(sc->vtscsi_sim, 1); 2143 2144 req->vsr_ccb = NULL; 2145 req->vsr_complete = NULL; 2146 req->vsr_ptr0 = NULL; 2147 req->vsr_state = VTSCSI_REQ_STATE_FREE; 2148 req->vsr_flags = 0; 2149 2150 bzero(&req->vsr_ureq, sizeof(req->vsr_ureq)); 2151 bzero(&req->vsr_uresp, sizeof(req->vsr_uresp)); 2152 2153 /* 2154 * We insert at the tail of the queue in order to make it 2155 * very unlikely a request will be reused if we race with 2156 * stopping its callout handler. 2157 */ 2158 TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link); 2159 } 2160 2161 static struct vtscsi_request * 2162 vtscsi_dequeue_request(struct vtscsi_softc *sc) 2163 { 2164 struct vtscsi_request *req; 2165 2166 req = TAILQ_FIRST(&sc->vtscsi_req_free); 2167 if (req != NULL) { 2168 req->vsr_state = VTSCSI_REQ_STATE_INUSE; 2169 TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link); 2170 } else 2171 sc->vtscsi_stats.dequeue_no_requests++; 2172 2173 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); 2174 2175 return (req); 2176 } 2177 2178 static void 2179 vtscsi_complete_request(struct vtscsi_request *req) 2180 { 2181 2182 if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED) 2183 req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE; 2184 2185 if (req->vsr_complete != NULL) 2186 req->vsr_complete(req->vsr_softc, req); 2187 } 2188 2189 static void 2190 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq) 2191 { 2192 struct vtscsi_request *req; 2193 2194 VTSCSI_LOCK_OWNED(sc); 2195 2196 while ((req = virtqueue_dequeue(vq, NULL)) != NULL) 2197 vtscsi_complete_request(req); 2198 } 2199 2200 static void 2201 vtscsi_control_vq_intr(void *xsc) 2202 { 2203 struct vtscsi_softc *sc; 2204 struct virtqueue *vq; 2205 2206 sc = xsc; 2207 vq = sc->vtscsi_control_vq; 2208 2209 again: 2210 VTSCSI_LOCK(sc); 2211 2212 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 2213 2214 if (virtqueue_enable_intr(vq) != 0) { 2215 virtqueue_disable_intr(vq); 2216 VTSCSI_UNLOCK(sc); 2217 goto again; 2218 } 2219 2220 VTSCSI_UNLOCK(sc); 2221 } 2222 2223 static void 2224 vtscsi_event_vq_intr(void *xsc) 2225 { 2226 struct vtscsi_softc *sc; 2227 struct virtqueue *vq; 2228 struct virtio_scsi_event *event; 2229 2230 sc = xsc; 2231 vq = sc->vtscsi_event_vq; 2232 2233 again: 2234 VTSCSI_LOCK(sc); 2235 2236 while ((event = virtqueue_dequeue(vq, NULL)) != NULL) 2237 vtscsi_handle_event(sc, event); 2238 2239 if (virtqueue_enable_intr(vq) != 0) { 2240 virtqueue_disable_intr(vq); 2241 VTSCSI_UNLOCK(sc); 2242 goto again; 2243 } 2244 2245 VTSCSI_UNLOCK(sc); 2246 } 2247 2248 static void 2249 vtscsi_request_vq_intr(void *xsc) 2250 { 2251 struct vtscsi_softc *sc; 2252 struct virtqueue *vq; 2253 2254 sc = xsc; 2255 vq = sc->vtscsi_request_vq; 2256 2257 again: 2258 VTSCSI_LOCK(sc); 2259 2260 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 2261 2262 if (virtqueue_enable_intr(vq) != 0) { 2263 virtqueue_disable_intr(vq); 2264 VTSCSI_UNLOCK(sc); 2265 goto again; 2266 } 2267 2268 VTSCSI_UNLOCK(sc); 2269 } 2270 2271 static void 2272 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc) 2273 { 2274 2275 virtqueue_disable_intr(sc->vtscsi_control_vq); 2276 virtqueue_disable_intr(sc->vtscsi_event_vq); 2277 virtqueue_disable_intr(sc->vtscsi_request_vq); 2278 } 2279 2280 static void 2281 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc) 2282 { 2283 2284 virtqueue_enable_intr(sc->vtscsi_control_vq); 2285 virtqueue_enable_intr(sc->vtscsi_event_vq); 2286 virtqueue_enable_intr(sc->vtscsi_request_vq); 2287 } 2288 2289 static void 2290 vtscsi_get_tunables(struct vtscsi_softc *sc) 2291 { 2292 char tmpstr[64]; 2293 2294 TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug); 2295 2296 snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level", 2297 device_get_unit(sc->vtscsi_dev)); 2298 TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug); 2299 } 2300 2301 static void 2302 vtscsi_setup_sysctl(struct vtscsi_softc *sc) 2303 { 2304 device_t dev; 2305 struct vtscsi_statistics *stats; 2306 struct sysctl_ctx_list *ctx; 2307 struct sysctl_oid *tree; 2308 struct sysctl_oid_list *child; 2309 2310 dev = sc->vtscsi_dev; 2311 stats = &sc->vtscsi_stats; 2312 ctx = device_get_sysctl_ctx(dev); 2313 tree = device_get_sysctl_tree(dev); 2314 child = SYSCTL_CHILDREN(tree); 2315 2316 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level", 2317 CTLFLAG_RW, &sc->vtscsi_debug, 0, 2318 "Debug level"); 2319 2320 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts", 2321 CTLFLAG_RD, &stats->scsi_cmd_timeouts, 2322 "SCSI command timeouts"); 2323 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests", 2324 CTLFLAG_RD, &stats->dequeue_no_requests, 2325 "No available requests to dequeue"); 2326 } 2327 2328 static void 2329 vtscsi_printf_req(struct vtscsi_request *req, const char *func, 2330 const char *fmt, ...) 2331 { 2332 struct vtscsi_softc *sc; 2333 union ccb *ccb; 2334 struct sbuf sb; 2335 va_list ap; 2336 char str[192]; 2337 2338 if (req == NULL) 2339 return; 2340 2341 sc = req->vsr_softc; 2342 ccb = req->vsr_ccb; 2343 2344 va_start(ap, fmt); 2345 sbuf_new(&sb, str, sizeof(str), 0); 2346 2347 if (ccb == NULL) { 2348 sbuf_printf(&sb, "(noperiph:%s%d:%u): ", 2349 cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim), 2350 cam_sim_bus(sc->vtscsi_sim)); 2351 } else { 2352 xpt_path_sbuf(ccb->ccb_h.path, &sb); 2353 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 2354 scsi_command_string(&ccb->csio, &sb); 2355 sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len); 2356 } 2357 } 2358 2359 sbuf_vprintf(&sb, fmt, ap); 2360 va_end(ap); 2361 2362 sbuf_finish(&sb); 2363 printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func, 2364 sbuf_data(&sb)); 2365 } 2366