1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* Driver for VirtIO SCSI devices. */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/kthread.h> 35 #include <sys/malloc.h> 36 #include <sys/module.h> 37 #include <sys/sglist.h> 38 #include <sys/sysctl.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/callout.h> 42 #include <sys/queue.h> 43 #include <sys/sbuf.h> 44 45 #include <machine/stdarg.h> 46 47 #include <machine/bus.h> 48 #include <machine/resource.h> 49 #include <sys/bus.h> 50 #include <sys/rman.h> 51 52 #include <cam/cam.h> 53 #include <cam/cam_ccb.h> 54 #include <cam/cam_sim.h> 55 #include <cam/cam_periph.h> 56 #include <cam/cam_xpt_sim.h> 57 #include <cam/cam_debug.h> 58 #include <cam/scsi/scsi_all.h> 59 #include <cam/scsi/scsi_message.h> 60 61 #include <dev/virtio/virtio.h> 62 #include <dev/virtio/virtqueue.h> 63 #include <dev/virtio/scsi/virtio_scsi.h> 64 #include <dev/virtio/scsi/virtio_scsivar.h> 65 66 #include "virtio_if.h" 67 68 static int vtscsi_modevent(module_t, int, void *); 69 70 static int vtscsi_probe(device_t); 71 static int vtscsi_attach(device_t); 72 static int vtscsi_detach(device_t); 73 static int vtscsi_suspend(device_t); 74 static int vtscsi_resume(device_t); 75 76 static int vtscsi_negotiate_features(struct vtscsi_softc *); 77 static int vtscsi_setup_features(struct vtscsi_softc *); 78 static void vtscsi_read_config(struct vtscsi_softc *, 79 struct virtio_scsi_config *); 80 static int vtscsi_maximum_segments(struct vtscsi_softc *, int); 81 static int vtscsi_alloc_virtqueues(struct vtscsi_softc *); 82 static void vtscsi_check_sizes(struct vtscsi_softc *); 83 static void vtscsi_write_device_config(struct vtscsi_softc *); 84 static int vtscsi_reinit(struct vtscsi_softc *); 85 86 static int vtscsi_alloc_cam(struct vtscsi_softc *); 87 static int vtscsi_register_cam(struct vtscsi_softc *); 88 static void vtscsi_free_cam(struct vtscsi_softc *); 89 static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *); 90 static int vtscsi_register_async(struct vtscsi_softc *); 91 static void vtscsi_deregister_async(struct vtscsi_softc *); 92 static void vtscsi_cam_action(struct cam_sim *, union ccb *); 93 static void vtscsi_cam_poll(struct cam_sim *); 94 95 static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *, 96 union ccb *); 97 static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *, 98 union ccb *); 99 static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *); 100 static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *); 101 static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *); 102 static void vtscsi_cam_path_inquiry(struct vtscsi_softc *, 103 struct cam_sim *, union ccb *); 104 105 static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *, 106 struct sglist *, struct ccb_scsiio *); 107 static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *, 108 struct vtscsi_request *, int *, int *); 109 static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *, 110 struct vtscsi_request *); 111 static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *); 112 static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *, 113 struct vtscsi_request *); 114 static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *, 115 struct vtscsi_request *); 116 static void vtscsi_timedout_scsi_cmd(void *); 117 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *); 118 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *, 119 struct ccb_scsiio *, struct virtio_scsi_cmd_resp *); 120 static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *, 121 struct vtscsi_request *); 122 123 static void vtscsi_poll_ctrl_req(struct vtscsi_softc *, 124 struct vtscsi_request *); 125 static int vtscsi_execute_ctrl_req(struct vtscsi_softc *, 126 struct vtscsi_request *, struct sglist *, int, int, int); 127 static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c, 128 struct vtscsi_request *); 129 static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *, 130 struct vtscsi_request *); 131 static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *, 132 struct vtscsi_request *); 133 134 static void vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *); 135 static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []); 136 static void vtscsi_init_scsi_cmd_req(struct vtscsi_softc *, 137 struct ccb_scsiio *, struct virtio_scsi_cmd_req *); 138 static void vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *, struct ccb_hdr *, 139 uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *); 140 141 static void vtscsi_freeze_simq(struct vtscsi_softc *, int); 142 static int vtscsi_thaw_simq(struct vtscsi_softc *, int); 143 144 static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t, 145 lun_id_t); 146 static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t, 147 lun_id_t); 148 static void vtscsi_execute_rescan_bus(struct vtscsi_softc *); 149 150 static void vtscsi_handle_event(struct vtscsi_softc *, 151 struct virtio_scsi_event *); 152 static int vtscsi_enqueue_event_buf(struct vtscsi_softc *, 153 struct virtio_scsi_event *); 154 static int vtscsi_init_event_vq(struct vtscsi_softc *); 155 static void vtscsi_reinit_event_vq(struct vtscsi_softc *); 156 static void vtscsi_drain_event_vq(struct vtscsi_softc *); 157 158 static void vtscsi_complete_vqs_locked(struct vtscsi_softc *); 159 static void vtscsi_complete_vqs(struct vtscsi_softc *); 160 static void vtscsi_drain_vqs(struct vtscsi_softc *); 161 static void vtscsi_cancel_request(struct vtscsi_softc *, 162 struct vtscsi_request *); 163 static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *); 164 static void vtscsi_stop(struct vtscsi_softc *); 165 static int vtscsi_reset_bus(struct vtscsi_softc *); 166 167 static void vtscsi_init_request(struct vtscsi_softc *, 168 struct vtscsi_request *); 169 static int vtscsi_alloc_requests(struct vtscsi_softc *); 170 static void vtscsi_free_requests(struct vtscsi_softc *); 171 static void vtscsi_enqueue_request(struct vtscsi_softc *, 172 struct vtscsi_request *); 173 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *); 174 175 static void vtscsi_complete_request(struct vtscsi_request *); 176 static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *); 177 178 static void vtscsi_control_vq_intr(void *); 179 static void vtscsi_event_vq_intr(void *); 180 static void vtscsi_request_vq_intr(void *); 181 static void vtscsi_disable_vqs_intr(struct vtscsi_softc *); 182 static void vtscsi_enable_vqs_intr(struct vtscsi_softc *); 183 184 static void vtscsi_get_tunables(struct vtscsi_softc *); 185 static void vtscsi_setup_sysctl(struct vtscsi_softc *); 186 187 static void vtscsi_printf_req(struct vtscsi_request *, const char *, 188 const char *, ...); 189 190 #define vtscsi_modern(_sc) (((_sc)->vtscsi_features & VIRTIO_F_VERSION_1) != 0) 191 #define vtscsi_htog16(_sc, _val) virtio_htog16(vtscsi_modern(_sc), _val) 192 #define vtscsi_htog32(_sc, _val) virtio_htog32(vtscsi_modern(_sc), _val) 193 #define vtscsi_htog64(_sc, _val) virtio_htog64(vtscsi_modern(_sc), _val) 194 #define vtscsi_gtoh16(_sc, _val) virtio_gtoh16(vtscsi_modern(_sc), _val) 195 #define vtscsi_gtoh32(_sc, _val) virtio_gtoh32(vtscsi_modern(_sc), _val) 196 #define vtscsi_gtoh64(_sc, _val) virtio_gtoh64(vtscsi_modern(_sc), _val) 197 198 /* Global tunables. */ 199 /* 200 * The current QEMU VirtIO SCSI implementation does not cancel in-flight 201 * IO during virtio_stop(). So in-flight requests still complete after the 202 * device reset. We would have to wait for all the in-flight IO to complete, 203 * which defeats the typical purpose of a bus reset. We could simulate the 204 * bus reset with either I_T_NEXUS_RESET of all the targets, or with 205 * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the 206 * control virtqueue). But this isn't very useful if things really go off 207 * the rails, so default to disabled for now. 208 */ 209 static int vtscsi_bus_reset_disable = 1; 210 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable); 211 212 static struct virtio_feature_desc vtscsi_feature_desc[] = { 213 { VIRTIO_SCSI_F_INOUT, "InOut" }, 214 { VIRTIO_SCSI_F_HOTPLUG, "Hotplug" }, 215 { VIRTIO_SCSI_F_CHANGE, "ChangeEvent" }, 216 { VIRTIO_SCSI_F_T10_PI, "T10PI" }, 217 218 { 0, NULL } 219 }; 220 221 static device_method_t vtscsi_methods[] = { 222 /* Device methods. */ 223 DEVMETHOD(device_probe, vtscsi_probe), 224 DEVMETHOD(device_attach, vtscsi_attach), 225 DEVMETHOD(device_detach, vtscsi_detach), 226 DEVMETHOD(device_suspend, vtscsi_suspend), 227 DEVMETHOD(device_resume, vtscsi_resume), 228 229 DEVMETHOD_END 230 }; 231 232 static driver_t vtscsi_driver = { 233 "vtscsi", 234 vtscsi_methods, 235 sizeof(struct vtscsi_softc) 236 }; 237 238 VIRTIO_DRIVER_MODULE(virtio_scsi, vtscsi_driver, vtscsi_modevent, NULL); 239 MODULE_VERSION(virtio_scsi, 1); 240 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1); 241 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1); 242 243 VIRTIO_SIMPLE_PNPINFO(virtio_scsi, VIRTIO_ID_SCSI, "VirtIO SCSI Adapter"); 244 245 static int 246 vtscsi_modevent(module_t mod, int type, void *unused) 247 { 248 int error; 249 250 switch (type) { 251 case MOD_LOAD: 252 case MOD_QUIESCE: 253 case MOD_UNLOAD: 254 case MOD_SHUTDOWN: 255 error = 0; 256 break; 257 default: 258 error = EOPNOTSUPP; 259 break; 260 } 261 262 return (error); 263 } 264 265 static int 266 vtscsi_probe(device_t dev) 267 { 268 return (VIRTIO_SIMPLE_PROBE(dev, virtio_scsi)); 269 } 270 271 static int 272 vtscsi_attach(device_t dev) 273 { 274 struct vtscsi_softc *sc; 275 struct virtio_scsi_config scsicfg; 276 int error; 277 278 sc = device_get_softc(dev); 279 sc->vtscsi_dev = dev; 280 virtio_set_feature_desc(dev, vtscsi_feature_desc); 281 282 VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev)); 283 TAILQ_INIT(&sc->vtscsi_req_free); 284 285 vtscsi_get_tunables(sc); 286 vtscsi_setup_sysctl(sc); 287 288 error = vtscsi_setup_features(sc); 289 if (error) { 290 device_printf(dev, "cannot setup features\n"); 291 goto fail; 292 } 293 294 vtscsi_read_config(sc, &scsicfg); 295 296 sc->vtscsi_max_channel = scsicfg.max_channel; 297 sc->vtscsi_max_target = scsicfg.max_target; 298 sc->vtscsi_max_lun = scsicfg.max_lun; 299 sc->vtscsi_event_buf_size = scsicfg.event_info_size; 300 301 vtscsi_write_device_config(sc); 302 303 sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max); 304 sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT); 305 if (sc->vtscsi_sglist == NULL) { 306 error = ENOMEM; 307 device_printf(dev, "cannot allocate sglist\n"); 308 goto fail; 309 } 310 311 error = vtscsi_alloc_virtqueues(sc); 312 if (error) { 313 device_printf(dev, "cannot allocate virtqueues\n"); 314 goto fail; 315 } 316 317 vtscsi_check_sizes(sc); 318 319 error = vtscsi_init_event_vq(sc); 320 if (error) { 321 device_printf(dev, "cannot populate the eventvq\n"); 322 goto fail; 323 } 324 325 error = vtscsi_alloc_requests(sc); 326 if (error) { 327 device_printf(dev, "cannot allocate requests\n"); 328 goto fail; 329 } 330 331 error = vtscsi_alloc_cam(sc); 332 if (error) { 333 device_printf(dev, "cannot allocate CAM structures\n"); 334 goto fail; 335 } 336 337 error = virtio_setup_intr(dev, INTR_TYPE_CAM); 338 if (error) { 339 device_printf(dev, "cannot setup virtqueue interrupts\n"); 340 goto fail; 341 } 342 343 vtscsi_enable_vqs_intr(sc); 344 345 /* 346 * Register with CAM after interrupts are enabled so we will get 347 * notified of the probe responses. 348 */ 349 error = vtscsi_register_cam(sc); 350 if (error) { 351 device_printf(dev, "cannot register with CAM\n"); 352 goto fail; 353 } 354 355 fail: 356 if (error) 357 vtscsi_detach(dev); 358 359 return (error); 360 } 361 362 static int 363 vtscsi_detach(device_t dev) 364 { 365 struct vtscsi_softc *sc; 366 367 sc = device_get_softc(dev); 368 369 VTSCSI_LOCK(sc); 370 sc->vtscsi_flags |= VTSCSI_FLAG_DETACH; 371 if (device_is_attached(dev)) 372 vtscsi_stop(sc); 373 VTSCSI_UNLOCK(sc); 374 375 vtscsi_complete_vqs(sc); 376 vtscsi_drain_vqs(sc); 377 378 vtscsi_free_cam(sc); 379 vtscsi_free_requests(sc); 380 381 if (sc->vtscsi_sglist != NULL) { 382 sglist_free(sc->vtscsi_sglist); 383 sc->vtscsi_sglist = NULL; 384 } 385 386 VTSCSI_LOCK_DESTROY(sc); 387 388 return (0); 389 } 390 391 static int 392 vtscsi_suspend(device_t dev) 393 { 394 395 return (0); 396 } 397 398 static int 399 vtscsi_resume(device_t dev) 400 { 401 402 return (0); 403 } 404 405 static int 406 vtscsi_negotiate_features(struct vtscsi_softc *sc) 407 { 408 device_t dev; 409 uint64_t features; 410 411 dev = sc->vtscsi_dev; 412 features = VTSCSI_FEATURES; 413 414 sc->vtscsi_features = virtio_negotiate_features(dev, features); 415 return (virtio_finalize_features(dev)); 416 } 417 418 static int 419 vtscsi_setup_features(struct vtscsi_softc *sc) 420 { 421 device_t dev; 422 int error; 423 424 dev = sc->vtscsi_dev; 425 426 error = vtscsi_negotiate_features(sc); 427 if (error) 428 return (error); 429 430 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) 431 sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT; 432 if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT)) 433 sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL; 434 if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG)) 435 sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG; 436 437 return (0); 438 } 439 440 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg) \ 441 virtio_read_device_config(_dev, \ 442 offsetof(struct virtio_scsi_config, _field), \ 443 &(_cfg)->_field, sizeof((_cfg)->_field)) \ 444 445 static void 446 vtscsi_read_config(struct vtscsi_softc *sc, 447 struct virtio_scsi_config *scsicfg) 448 { 449 device_t dev; 450 451 dev = sc->vtscsi_dev; 452 453 bzero(scsicfg, sizeof(struct virtio_scsi_config)); 454 455 VTSCSI_GET_CONFIG(dev, num_queues, scsicfg); 456 VTSCSI_GET_CONFIG(dev, seg_max, scsicfg); 457 VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg); 458 VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg); 459 VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg); 460 VTSCSI_GET_CONFIG(dev, sense_size, scsicfg); 461 VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg); 462 VTSCSI_GET_CONFIG(dev, max_channel, scsicfg); 463 VTSCSI_GET_CONFIG(dev, max_target, scsicfg); 464 VTSCSI_GET_CONFIG(dev, max_lun, scsicfg); 465 } 466 467 #undef VTSCSI_GET_CONFIG 468 469 static int 470 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max) 471 { 472 int nsegs; 473 474 nsegs = VTSCSI_MIN_SEGMENTS; 475 476 if (seg_max > 0) { 477 nsegs += MIN(seg_max, maxphys / PAGE_SIZE + 1); 478 if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) 479 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); 480 } else 481 nsegs += 1; 482 483 return (nsegs); 484 } 485 486 static int 487 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc) 488 { 489 device_t dev; 490 struct vq_alloc_info vq_info[3]; 491 int nvqs; 492 493 dev = sc->vtscsi_dev; 494 nvqs = 3; 495 496 VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc, 497 &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev)); 498 499 VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc, 500 &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev)); 501 502 VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs, 503 vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq, 504 "%s request", device_get_nameunit(dev)); 505 506 return (virtio_alloc_virtqueues(dev, nvqs, vq_info)); 507 } 508 509 static void 510 vtscsi_check_sizes(struct vtscsi_softc *sc) 511 { 512 int rqsize; 513 514 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) { 515 /* 516 * Ensure the assertions in virtqueue_enqueue(), 517 * even if the hypervisor reports a bad seg_max. 518 */ 519 rqsize = virtqueue_size(sc->vtscsi_request_vq); 520 if (sc->vtscsi_max_nsegs > rqsize) { 521 device_printf(sc->vtscsi_dev, 522 "clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs, 523 rqsize); 524 sc->vtscsi_max_nsegs = rqsize; 525 } 526 } 527 } 528 529 static void 530 vtscsi_write_device_config(struct vtscsi_softc *sc) 531 { 532 533 virtio_write_dev_config_4(sc->vtscsi_dev, 534 offsetof(struct virtio_scsi_config, sense_size), 535 VIRTIO_SCSI_SENSE_SIZE); 536 537 /* 538 * This is the size in the virtio_scsi_cmd_req structure. Note 539 * this value (32) is larger than the maximum CAM CDB size (16). 540 */ 541 virtio_write_dev_config_4(sc->vtscsi_dev, 542 offsetof(struct virtio_scsi_config, cdb_size), 543 VIRTIO_SCSI_CDB_SIZE); 544 } 545 546 static int 547 vtscsi_reinit(struct vtscsi_softc *sc) 548 { 549 device_t dev; 550 int error; 551 552 dev = sc->vtscsi_dev; 553 554 error = virtio_reinit(dev, sc->vtscsi_features); 555 if (error == 0) { 556 vtscsi_write_device_config(sc); 557 virtio_reinit_complete(dev); 558 vtscsi_reinit_event_vq(sc); 559 560 vtscsi_enable_vqs_intr(sc); 561 } 562 563 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error); 564 565 return (error); 566 } 567 568 static int 569 vtscsi_alloc_cam(struct vtscsi_softc *sc) 570 { 571 device_t dev; 572 struct cam_devq *devq; 573 int openings; 574 575 dev = sc->vtscsi_dev; 576 openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS; 577 578 devq = cam_simq_alloc(openings); 579 if (devq == NULL) { 580 device_printf(dev, "cannot allocate SIM queue\n"); 581 return (ENOMEM); 582 } 583 584 sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll, 585 "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1, 586 openings, devq); 587 if (sc->vtscsi_sim == NULL) { 588 cam_simq_free(devq); 589 device_printf(dev, "cannot allocate SIM\n"); 590 return (ENOMEM); 591 } 592 593 return (0); 594 } 595 596 static int 597 vtscsi_register_cam(struct vtscsi_softc *sc) 598 { 599 device_t dev; 600 int registered, error; 601 602 dev = sc->vtscsi_dev; 603 registered = 0; 604 605 VTSCSI_LOCK(sc); 606 607 if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) { 608 error = ENOMEM; 609 device_printf(dev, "cannot register XPT bus\n"); 610 goto fail; 611 } 612 613 registered = 1; 614 615 if (xpt_create_path(&sc->vtscsi_path, NULL, 616 cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD, 617 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 618 error = ENOMEM; 619 device_printf(dev, "cannot create bus path\n"); 620 goto fail; 621 } 622 623 if (vtscsi_register_async(sc) != CAM_REQ_CMP) { 624 error = EIO; 625 device_printf(dev, "cannot register async callback\n"); 626 goto fail; 627 } 628 629 VTSCSI_UNLOCK(sc); 630 631 return (0); 632 633 fail: 634 if (sc->vtscsi_path != NULL) { 635 xpt_free_path(sc->vtscsi_path); 636 sc->vtscsi_path = NULL; 637 } 638 639 if (registered != 0) 640 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); 641 642 VTSCSI_UNLOCK(sc); 643 644 return (error); 645 } 646 647 static void 648 vtscsi_free_cam(struct vtscsi_softc *sc) 649 { 650 651 VTSCSI_LOCK(sc); 652 653 if (sc->vtscsi_path != NULL) { 654 vtscsi_deregister_async(sc); 655 656 xpt_free_path(sc->vtscsi_path); 657 sc->vtscsi_path = NULL; 658 659 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim)); 660 } 661 662 if (sc->vtscsi_sim != NULL) { 663 cam_sim_free(sc->vtscsi_sim, 1); 664 sc->vtscsi_sim = NULL; 665 } 666 667 VTSCSI_UNLOCK(sc); 668 } 669 670 static void 671 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg) 672 { 673 struct cam_sim *sim; 674 struct vtscsi_softc *sc; 675 676 sim = cb_arg; 677 sc = cam_sim_softc(sim); 678 679 vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code); 680 681 /* 682 * TODO Once QEMU supports event reporting, we should 683 * (un)subscribe to events here. 684 */ 685 switch (code) { 686 case AC_FOUND_DEVICE: 687 break; 688 case AC_LOST_DEVICE: 689 break; 690 } 691 } 692 693 static int 694 vtscsi_register_async(struct vtscsi_softc *sc) 695 { 696 struct ccb_setasync csa; 697 698 memset(&csa, 0, sizeof(csa)); 699 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); 700 csa.ccb_h.func_code = XPT_SASYNC_CB; 701 csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE; 702 csa.callback = vtscsi_cam_async; 703 csa.callback_arg = sc->vtscsi_sim; 704 705 xpt_action((union ccb *) &csa); 706 707 return (csa.ccb_h.status); 708 } 709 710 static void 711 vtscsi_deregister_async(struct vtscsi_softc *sc) 712 { 713 struct ccb_setasync csa; 714 715 memset(&csa, 0, sizeof(csa)); 716 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5); 717 csa.ccb_h.func_code = XPT_SASYNC_CB; 718 csa.event_enable = 0; 719 csa.callback = vtscsi_cam_async; 720 csa.callback_arg = sc->vtscsi_sim; 721 722 xpt_action((union ccb *) &csa); 723 } 724 725 static void 726 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb) 727 { 728 struct vtscsi_softc *sc; 729 struct ccb_hdr *ccbh; 730 731 sc = cam_sim_softc(sim); 732 ccbh = &ccb->ccb_h; 733 734 VTSCSI_LOCK_OWNED(sc); 735 736 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) { 737 /* 738 * The VTSCSI_MTX is briefly dropped between setting 739 * VTSCSI_FLAG_DETACH and deregistering with CAM, so 740 * drop any CCBs that come in during that window. 741 */ 742 ccbh->status = CAM_NO_HBA; 743 xpt_done(ccb); 744 return; 745 } 746 747 switch (ccbh->func_code) { 748 case XPT_SCSI_IO: 749 vtscsi_cam_scsi_io(sc, sim, ccb); 750 break; 751 752 case XPT_SET_TRAN_SETTINGS: 753 ccbh->status = CAM_FUNC_NOTAVAIL; 754 xpt_done(ccb); 755 break; 756 757 case XPT_GET_TRAN_SETTINGS: 758 vtscsi_cam_get_tran_settings(sc, ccb); 759 break; 760 761 case XPT_RESET_BUS: 762 vtscsi_cam_reset_bus(sc, ccb); 763 break; 764 765 case XPT_RESET_DEV: 766 vtscsi_cam_reset_dev(sc, ccb); 767 break; 768 769 case XPT_ABORT: 770 vtscsi_cam_abort(sc, ccb); 771 break; 772 773 case XPT_CALC_GEOMETRY: 774 cam_calc_geometry(&ccb->ccg, 1); 775 xpt_done(ccb); 776 break; 777 778 case XPT_PATH_INQ: 779 vtscsi_cam_path_inquiry(sc, sim, ccb); 780 break; 781 782 default: 783 vtscsi_dprintf(sc, VTSCSI_ERROR, 784 "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code); 785 786 ccbh->status = CAM_REQ_INVALID; 787 xpt_done(ccb); 788 break; 789 } 790 } 791 792 static void 793 vtscsi_cam_poll(struct cam_sim *sim) 794 { 795 struct vtscsi_softc *sc; 796 797 sc = cam_sim_softc(sim); 798 799 vtscsi_complete_vqs_locked(sc); 800 } 801 802 static void 803 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim, 804 union ccb *ccb) 805 { 806 struct ccb_hdr *ccbh; 807 struct ccb_scsiio *csio; 808 int error; 809 810 ccbh = &ccb->ccb_h; 811 csio = &ccb->csio; 812 813 if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) { 814 error = EINVAL; 815 ccbh->status = CAM_REQ_INVALID; 816 goto done; 817 } 818 819 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH && 820 (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) { 821 error = EINVAL; 822 ccbh->status = CAM_REQ_INVALID; 823 goto done; 824 } 825 826 error = vtscsi_start_scsi_cmd(sc, ccb); 827 828 done: 829 if (error) { 830 vtscsi_dprintf(sc, VTSCSI_ERROR, 831 "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status); 832 xpt_done(ccb); 833 } 834 } 835 836 static void 837 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb) 838 { 839 struct ccb_trans_settings *cts; 840 struct ccb_trans_settings_scsi *scsi; 841 842 cts = &ccb->cts; 843 scsi = &cts->proto_specific.scsi; 844 845 cts->protocol = PROTO_SCSI; 846 cts->protocol_version = SCSI_REV_SPC3; 847 cts->transport = XPORT_SAS; 848 cts->transport_version = 0; 849 850 scsi->valid = CTS_SCSI_VALID_TQ; 851 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 852 853 ccb->ccb_h.status = CAM_REQ_CMP; 854 xpt_done(ccb); 855 } 856 857 static void 858 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb) 859 { 860 int error; 861 862 error = vtscsi_reset_bus(sc); 863 if (error == 0) 864 ccb->ccb_h.status = CAM_REQ_CMP; 865 else 866 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 867 868 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n", 869 error, ccb, ccb->ccb_h.status); 870 871 xpt_done(ccb); 872 } 873 874 static void 875 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb) 876 { 877 struct ccb_hdr *ccbh; 878 struct vtscsi_request *req; 879 int error; 880 881 ccbh = &ccb->ccb_h; 882 883 req = vtscsi_dequeue_request(sc); 884 if (req == NULL) { 885 error = EAGAIN; 886 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 887 goto fail; 888 } 889 890 req->vsr_ccb = ccb; 891 892 error = vtscsi_execute_reset_dev_cmd(sc, req); 893 if (error == 0) 894 return; 895 896 vtscsi_enqueue_request(sc, req); 897 898 fail: 899 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", 900 error, req, ccb); 901 902 if (error == EAGAIN) 903 ccbh->status = CAM_RESRC_UNAVAIL; 904 else 905 ccbh->status = CAM_REQ_CMP_ERR; 906 907 xpt_done(ccb); 908 } 909 910 static void 911 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb) 912 { 913 struct vtscsi_request *req; 914 struct ccb_hdr *ccbh; 915 int error; 916 917 ccbh = &ccb->ccb_h; 918 919 req = vtscsi_dequeue_request(sc); 920 if (req == NULL) { 921 error = EAGAIN; 922 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 923 goto fail; 924 } 925 926 req->vsr_ccb = ccb; 927 928 error = vtscsi_execute_abort_task_cmd(sc, req); 929 if (error == 0) 930 return; 931 932 vtscsi_enqueue_request(sc, req); 933 934 fail: 935 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n", 936 error, req, ccb); 937 938 if (error == EAGAIN) 939 ccbh->status = CAM_RESRC_UNAVAIL; 940 else 941 ccbh->status = CAM_REQ_CMP_ERR; 942 943 xpt_done(ccb); 944 } 945 946 static void 947 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim, 948 union ccb *ccb) 949 { 950 device_t dev; 951 struct ccb_pathinq *cpi; 952 953 dev = sc->vtscsi_dev; 954 cpi = &ccb->cpi; 955 956 vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb); 957 958 cpi->version_num = 1; 959 cpi->hba_inquiry = PI_TAG_ABLE; 960 cpi->target_sprt = 0; 961 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; 962 if (vtscsi_bus_reset_disable != 0) 963 cpi->hba_misc |= PIM_NOBUSRESET; 964 cpi->hba_eng_cnt = 0; 965 966 cpi->max_target = sc->vtscsi_max_target; 967 cpi->max_lun = sc->vtscsi_max_lun; 968 cpi->initiator_id = cpi->max_target + 1; 969 970 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 971 strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN); 972 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 973 974 cpi->unit_number = cam_sim_unit(sim); 975 cpi->bus_id = cam_sim_bus(sim); 976 977 cpi->base_transfer_speed = 300000; 978 979 cpi->protocol = PROTO_SCSI; 980 cpi->protocol_version = SCSI_REV_SPC3; 981 cpi->transport = XPORT_SAS; 982 cpi->transport_version = 0; 983 984 cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) * 985 PAGE_SIZE; 986 987 cpi->hba_vendor = virtio_get_vendor(dev); 988 cpi->hba_device = virtio_get_device(dev); 989 cpi->hba_subvendor = virtio_get_subvendor(dev); 990 cpi->hba_subdevice = virtio_get_subdevice(dev); 991 992 ccb->ccb_h.status = CAM_REQ_CMP; 993 xpt_done(ccb); 994 } 995 996 static int 997 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg, 998 struct ccb_scsiio *csio) 999 { 1000 struct ccb_hdr *ccbh; 1001 struct bus_dma_segment *dseg; 1002 int i, error; 1003 1004 ccbh = &csio->ccb_h; 1005 error = 0; 1006 1007 switch ((ccbh->flags & CAM_DATA_MASK)) { 1008 case CAM_DATA_VADDR: 1009 error = sglist_append(sg, csio->data_ptr, csio->dxfer_len); 1010 break; 1011 case CAM_DATA_PADDR: 1012 error = sglist_append_phys(sg, 1013 (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len); 1014 break; 1015 case CAM_DATA_SG: 1016 for (i = 0; i < csio->sglist_cnt && error == 0; i++) { 1017 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; 1018 error = sglist_append(sg, 1019 (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len); 1020 } 1021 break; 1022 case CAM_DATA_SG_PADDR: 1023 for (i = 0; i < csio->sglist_cnt && error == 0; i++) { 1024 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i]; 1025 error = sglist_append_phys(sg, 1026 (vm_paddr_t) dseg->ds_addr, dseg->ds_len); 1027 } 1028 break; 1029 case CAM_DATA_BIO: 1030 error = sglist_append_bio(sg, (struct bio *) csio->data_ptr); 1031 break; 1032 default: 1033 error = EINVAL; 1034 break; 1035 } 1036 1037 return (error); 1038 } 1039 1040 static int 1041 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req, 1042 int *readable, int *writable) 1043 { 1044 struct sglist *sg; 1045 struct ccb_hdr *ccbh; 1046 struct ccb_scsiio *csio; 1047 struct virtio_scsi_cmd_req *cmd_req; 1048 struct virtio_scsi_cmd_resp *cmd_resp; 1049 int error; 1050 1051 sg = sc->vtscsi_sglist; 1052 csio = &req->vsr_ccb->csio; 1053 ccbh = &csio->ccb_h; 1054 cmd_req = &req->vsr_cmd_req; 1055 cmd_resp = &req->vsr_cmd_resp; 1056 1057 sglist_reset(sg); 1058 1059 sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req)); 1060 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1061 error = vtscsi_sg_append_scsi_buf(sc, sg, csio); 1062 /* At least one segment must be left for the response. */ 1063 if (error || sg->sg_nseg == sg->sg_maxseg) 1064 goto fail; 1065 } 1066 1067 *readable = sg->sg_nseg; 1068 1069 sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp)); 1070 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1071 error = vtscsi_sg_append_scsi_buf(sc, sg, csio); 1072 if (error) 1073 goto fail; 1074 } 1075 1076 *writable = sg->sg_nseg - *readable; 1077 1078 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d " 1079 "writable=%d\n", req, ccbh, *readable, *writable); 1080 1081 return (0); 1082 1083 fail: 1084 /* 1085 * This should never happen unless maxio was incorrectly set. 1086 */ 1087 vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0); 1088 1089 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p " 1090 "nseg=%d maxseg=%d\n", 1091 error, req, ccbh, sg->sg_nseg, sg->sg_maxseg); 1092 1093 return (EFBIG); 1094 } 1095 1096 static int 1097 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) 1098 { 1099 struct sglist *sg; 1100 struct virtqueue *vq; 1101 struct ccb_scsiio *csio; 1102 struct ccb_hdr *ccbh; 1103 struct virtio_scsi_cmd_req *cmd_req; 1104 struct virtio_scsi_cmd_resp *cmd_resp; 1105 int readable, writable, error; 1106 1107 sg = sc->vtscsi_sglist; 1108 vq = sc->vtscsi_request_vq; 1109 csio = &req->vsr_ccb->csio; 1110 ccbh = &csio->ccb_h; 1111 cmd_req = &req->vsr_cmd_req; 1112 cmd_resp = &req->vsr_cmd_resp; 1113 1114 vtscsi_init_scsi_cmd_req(sc, csio, cmd_req); 1115 1116 error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable); 1117 if (error) 1118 return (error); 1119 1120 req->vsr_complete = vtscsi_complete_scsi_cmd; 1121 cmd_resp->response = -1; 1122 1123 error = virtqueue_enqueue(vq, req, sg, readable, writable); 1124 if (error) { 1125 vtscsi_dprintf(sc, VTSCSI_ERROR, 1126 "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh); 1127 1128 ccbh->status = CAM_REQUEUE_REQ; 1129 vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ); 1130 return (error); 1131 } 1132 1133 ccbh->status |= CAM_SIM_QUEUED; 1134 ccbh->ccbh_vtscsi_req = req; 1135 1136 virtqueue_notify(vq); 1137 1138 if (ccbh->timeout != CAM_TIME_INFINITY) { 1139 req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET; 1140 callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout, 1141 0, vtscsi_timedout_scsi_cmd, req, 0); 1142 } 1143 1144 vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n", 1145 req, ccbh); 1146 1147 return (0); 1148 } 1149 1150 static int 1151 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb) 1152 { 1153 struct vtscsi_request *req; 1154 int error; 1155 1156 req = vtscsi_dequeue_request(sc); 1157 if (req == NULL) { 1158 ccb->ccb_h.status = CAM_REQUEUE_REQ; 1159 vtscsi_freeze_simq(sc, VTSCSI_REQUEST); 1160 return (ENOBUFS); 1161 } 1162 1163 req->vsr_ccb = ccb; 1164 1165 error = vtscsi_execute_scsi_cmd(sc, req); 1166 if (error) 1167 vtscsi_enqueue_request(sc, req); 1168 1169 return (error); 1170 } 1171 1172 static void 1173 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, 1174 struct vtscsi_request *req) 1175 { 1176 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1177 struct vtscsi_request *to_req; 1178 uint8_t response; 1179 1180 tmf_resp = &req->vsr_tmf_resp; 1181 response = tmf_resp->response; 1182 to_req = req->vsr_timedout_req; 1183 1184 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n", 1185 req, to_req, response); 1186 1187 vtscsi_enqueue_request(sc, req); 1188 1189 /* 1190 * The timedout request could have completed between when the 1191 * abort task was sent and when the host processed it. 1192 */ 1193 if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT) 1194 return; 1195 1196 /* The timedout request was successfully aborted. */ 1197 if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) 1198 return; 1199 1200 /* Don't bother if the device is going away. */ 1201 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) 1202 return; 1203 1204 /* The timedout request will be aborted by the reset. */ 1205 if (sc->vtscsi_flags & VTSCSI_FLAG_RESET) 1206 return; 1207 1208 vtscsi_reset_bus(sc); 1209 } 1210 1211 static int 1212 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc, 1213 struct vtscsi_request *to_req) 1214 { 1215 struct sglist *sg; 1216 struct ccb_hdr *to_ccbh; 1217 struct vtscsi_request *req; 1218 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1219 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1220 int error; 1221 1222 sg = sc->vtscsi_sglist; 1223 to_ccbh = &to_req->vsr_ccb->ccb_h; 1224 1225 req = vtscsi_dequeue_request(sc); 1226 if (req == NULL) { 1227 error = ENOBUFS; 1228 goto fail; 1229 } 1230 1231 tmf_req = &req->vsr_tmf_req; 1232 tmf_resp = &req->vsr_tmf_resp; 1233 1234 vtscsi_init_ctrl_tmf_req(sc, to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, 1235 (uintptr_t) to_ccbh, tmf_req); 1236 1237 sglist_reset(sg); 1238 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1239 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1240 1241 req->vsr_timedout_req = to_req; 1242 req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd; 1243 tmf_resp->response = -1; 1244 1245 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1246 VTSCSI_EXECUTE_ASYNC); 1247 if (error == 0) 1248 return (0); 1249 1250 vtscsi_enqueue_request(sc, req); 1251 1252 fail: 1253 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p " 1254 "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh); 1255 1256 return (error); 1257 } 1258 1259 static void 1260 vtscsi_timedout_scsi_cmd(void *xreq) 1261 { 1262 struct vtscsi_softc *sc; 1263 struct vtscsi_request *to_req; 1264 1265 to_req = xreq; 1266 sc = to_req->vsr_softc; 1267 1268 vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n", 1269 to_req, to_req->vsr_ccb, to_req->vsr_state); 1270 1271 /* Don't bother if the device is going away. */ 1272 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) 1273 return; 1274 1275 /* 1276 * Bail if the request is not in use. We likely raced when 1277 * stopping the callout handler or it has already been aborted. 1278 */ 1279 if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE || 1280 (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0) 1281 return; 1282 1283 /* 1284 * Complete the request queue in case the timedout request is 1285 * actually just pending. 1286 */ 1287 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 1288 if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE) 1289 return; 1290 1291 sc->vtscsi_stats.scsi_cmd_timeouts++; 1292 to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT; 1293 1294 if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0) 1295 return; 1296 1297 vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n"); 1298 vtscsi_reset_bus(sc); 1299 } 1300 1301 static cam_status 1302 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp) 1303 { 1304 cam_status status; 1305 1306 switch (cmd_resp->response) { 1307 case VIRTIO_SCSI_S_OK: 1308 status = CAM_REQ_CMP; 1309 break; 1310 case VIRTIO_SCSI_S_OVERRUN: 1311 status = CAM_DATA_RUN_ERR; 1312 break; 1313 case VIRTIO_SCSI_S_ABORTED: 1314 status = CAM_REQ_ABORTED; 1315 break; 1316 case VIRTIO_SCSI_S_BAD_TARGET: 1317 status = CAM_SEL_TIMEOUT; 1318 break; 1319 case VIRTIO_SCSI_S_RESET: 1320 status = CAM_SCSI_BUS_RESET; 1321 break; 1322 case VIRTIO_SCSI_S_BUSY: 1323 status = CAM_SCSI_BUSY; 1324 break; 1325 case VIRTIO_SCSI_S_TRANSPORT_FAILURE: 1326 case VIRTIO_SCSI_S_TARGET_FAILURE: 1327 case VIRTIO_SCSI_S_NEXUS_FAILURE: 1328 status = CAM_SCSI_IT_NEXUS_LOST; 1329 break; 1330 default: /* VIRTIO_SCSI_S_FAILURE */ 1331 status = CAM_REQ_CMP_ERR; 1332 break; 1333 } 1334 1335 return (status); 1336 } 1337 1338 static cam_status 1339 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc, 1340 struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp) 1341 { 1342 uint32_t resp_sense_length; 1343 cam_status status; 1344 1345 csio->scsi_status = cmd_resp->status; 1346 csio->resid = vtscsi_htog32(sc, cmd_resp->resid); 1347 1348 if (csio->scsi_status == SCSI_STATUS_OK) 1349 status = CAM_REQ_CMP; 1350 else 1351 status = CAM_SCSI_STATUS_ERROR; 1352 1353 resp_sense_length = vtscsi_htog32(sc, cmd_resp->sense_len); 1354 1355 if (resp_sense_length > 0) { 1356 status |= CAM_AUTOSNS_VALID; 1357 1358 if (resp_sense_length < csio->sense_len) 1359 csio->sense_resid = csio->sense_len - resp_sense_length; 1360 else 1361 csio->sense_resid = 0; 1362 1363 memcpy(&csio->sense_data, cmd_resp->sense, 1364 csio->sense_len - csio->sense_resid); 1365 } 1366 1367 vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR, 1368 "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n", 1369 csio, csio->scsi_status, csio->resid, csio->sense_resid); 1370 1371 return (status); 1372 } 1373 1374 static void 1375 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req) 1376 { 1377 struct ccb_hdr *ccbh; 1378 struct ccb_scsiio *csio; 1379 struct virtio_scsi_cmd_resp *cmd_resp; 1380 cam_status status; 1381 1382 csio = &req->vsr_ccb->csio; 1383 ccbh = &csio->ccb_h; 1384 cmd_resp = &req->vsr_cmd_resp; 1385 1386 KASSERT(ccbh->ccbh_vtscsi_req == req, 1387 ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req)); 1388 1389 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) 1390 callout_stop(&req->vsr_callout); 1391 1392 status = vtscsi_scsi_cmd_cam_status(cmd_resp); 1393 if (status == CAM_REQ_ABORTED) { 1394 if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT) 1395 status = CAM_CMD_TIMEOUT; 1396 } else if (status == CAM_REQ_CMP) 1397 status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp); 1398 1399 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1400 status |= CAM_DEV_QFRZN; 1401 xpt_freeze_devq(ccbh->path, 1); 1402 } 1403 1404 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) 1405 status |= CAM_RELEASE_SIMQ; 1406 1407 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n", 1408 req, ccbh, status); 1409 1410 ccbh->status = status; 1411 xpt_done(req->vsr_ccb); 1412 vtscsi_enqueue_request(sc, req); 1413 } 1414 1415 static void 1416 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req) 1417 { 1418 1419 /* XXX We probably shouldn't poll forever. */ 1420 req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED; 1421 do 1422 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 1423 while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0); 1424 1425 req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED; 1426 } 1427 1428 static int 1429 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req, 1430 struct sglist *sg, int readable, int writable, int flag) 1431 { 1432 struct virtqueue *vq; 1433 int error; 1434 1435 vq = sc->vtscsi_control_vq; 1436 1437 MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL); 1438 1439 error = virtqueue_enqueue(vq, req, sg, readable, writable); 1440 if (error) { 1441 /* 1442 * Return EAGAIN when the virtqueue does not have enough 1443 * descriptors available. 1444 */ 1445 if (error == ENOSPC || error == EMSGSIZE) 1446 error = EAGAIN; 1447 1448 return (error); 1449 } 1450 1451 virtqueue_notify(vq); 1452 if (flag == VTSCSI_EXECUTE_POLL) 1453 vtscsi_poll_ctrl_req(sc, req); 1454 1455 return (0); 1456 } 1457 1458 static void 1459 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc, 1460 struct vtscsi_request *req) 1461 { 1462 union ccb *ccb; 1463 struct ccb_hdr *ccbh; 1464 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1465 1466 ccb = req->vsr_ccb; 1467 ccbh = &ccb->ccb_h; 1468 tmf_resp = &req->vsr_tmf_resp; 1469 1470 switch (tmf_resp->response) { 1471 case VIRTIO_SCSI_S_FUNCTION_COMPLETE: 1472 ccbh->status = CAM_REQ_CMP; 1473 break; 1474 case VIRTIO_SCSI_S_FUNCTION_REJECTED: 1475 ccbh->status = CAM_UA_ABORT; 1476 break; 1477 default: 1478 ccbh->status = CAM_REQ_CMP_ERR; 1479 break; 1480 } 1481 1482 xpt_done(ccb); 1483 vtscsi_enqueue_request(sc, req); 1484 } 1485 1486 static int 1487 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc, 1488 struct vtscsi_request *req) 1489 { 1490 struct sglist *sg; 1491 struct ccb_abort *cab; 1492 struct ccb_hdr *ccbh; 1493 struct ccb_hdr *abort_ccbh; 1494 struct vtscsi_request *abort_req; 1495 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1496 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1497 int error; 1498 1499 sg = sc->vtscsi_sglist; 1500 cab = &req->vsr_ccb->cab; 1501 ccbh = &cab->ccb_h; 1502 tmf_req = &req->vsr_tmf_req; 1503 tmf_resp = &req->vsr_tmf_resp; 1504 1505 /* CCB header and request that's to be aborted. */ 1506 abort_ccbh = &cab->abort_ccb->ccb_h; 1507 abort_req = abort_ccbh->ccbh_vtscsi_req; 1508 1509 if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) { 1510 error = EINVAL; 1511 goto fail; 1512 } 1513 1514 /* Only attempt to abort requests that could be in-flight. */ 1515 if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) { 1516 error = EALREADY; 1517 goto fail; 1518 } 1519 1520 abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED; 1521 if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) 1522 callout_stop(&abort_req->vsr_callout); 1523 1524 vtscsi_init_ctrl_tmf_req(sc, ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK, 1525 (uintptr_t) abort_ccbh, tmf_req); 1526 1527 sglist_reset(sg); 1528 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1529 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1530 1531 req->vsr_complete = vtscsi_complete_abort_task_cmd; 1532 tmf_resp->response = -1; 1533 1534 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1535 VTSCSI_EXECUTE_ASYNC); 1536 1537 fail: 1538 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p " 1539 "abort_req=%p\n", error, req, abort_ccbh, abort_req); 1540 1541 return (error); 1542 } 1543 1544 static void 1545 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc, 1546 struct vtscsi_request *req) 1547 { 1548 union ccb *ccb; 1549 struct ccb_hdr *ccbh; 1550 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1551 1552 ccb = req->vsr_ccb; 1553 ccbh = &ccb->ccb_h; 1554 tmf_resp = &req->vsr_tmf_resp; 1555 1556 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n", 1557 req, ccb, tmf_resp->response); 1558 1559 if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) { 1560 ccbh->status = CAM_REQ_CMP; 1561 vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id, 1562 ccbh->target_lun); 1563 } else 1564 ccbh->status = CAM_REQ_CMP_ERR; 1565 1566 xpt_done(ccb); 1567 vtscsi_enqueue_request(sc, req); 1568 } 1569 1570 static int 1571 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc, 1572 struct vtscsi_request *req) 1573 { 1574 struct sglist *sg; 1575 struct ccb_resetdev *crd; 1576 struct ccb_hdr *ccbh; 1577 struct virtio_scsi_ctrl_tmf_req *tmf_req; 1578 struct virtio_scsi_ctrl_tmf_resp *tmf_resp; 1579 uint32_t subtype; 1580 int error; 1581 1582 sg = sc->vtscsi_sglist; 1583 crd = &req->vsr_ccb->crd; 1584 ccbh = &crd->ccb_h; 1585 tmf_req = &req->vsr_tmf_req; 1586 tmf_resp = &req->vsr_tmf_resp; 1587 1588 if (ccbh->target_lun == CAM_LUN_WILDCARD) 1589 subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET; 1590 else 1591 subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET; 1592 1593 vtscsi_init_ctrl_tmf_req(sc, ccbh, subtype, 0, tmf_req); 1594 1595 sglist_reset(sg); 1596 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req)); 1597 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); 1598 1599 req->vsr_complete = vtscsi_complete_reset_dev_cmd; 1600 tmf_resp->response = -1; 1601 1602 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1, 1603 VTSCSI_EXECUTE_ASYNC); 1604 1605 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n", 1606 error, req, ccbh); 1607 1608 return (error); 1609 } 1610 1611 static void 1612 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id) 1613 { 1614 1615 *target_id = lun[1]; 1616 *lun_id = (lun[2] << 8) | lun[3]; 1617 } 1618 1619 static void 1620 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[]) 1621 { 1622 1623 lun[0] = 1; 1624 lun[1] = ccbh->target_id; 1625 lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F); 1626 lun[3] = ccbh->target_lun & 0xFF; 1627 } 1628 1629 static void 1630 vtscsi_init_scsi_cmd_req(struct vtscsi_softc *sc, struct ccb_scsiio *csio, 1631 struct virtio_scsi_cmd_req *cmd_req) 1632 { 1633 uint8_t attr; 1634 1635 switch (csio->tag_action) { 1636 case MSG_HEAD_OF_Q_TAG: 1637 attr = VIRTIO_SCSI_S_HEAD; 1638 break; 1639 case MSG_ORDERED_Q_TAG: 1640 attr = VIRTIO_SCSI_S_ORDERED; 1641 break; 1642 case MSG_ACA_TASK: 1643 attr = VIRTIO_SCSI_S_ACA; 1644 break; 1645 default: /* MSG_SIMPLE_Q_TAG */ 1646 attr = VIRTIO_SCSI_S_SIMPLE; 1647 break; 1648 } 1649 1650 vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun); 1651 cmd_req->tag = vtscsi_gtoh64(sc, (uintptr_t) csio); 1652 cmd_req->task_attr = attr; 1653 1654 memcpy(cmd_req->cdb, 1655 csio->ccb_h.flags & CAM_CDB_POINTER ? 1656 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes, 1657 csio->cdb_len); 1658 } 1659 1660 static void 1661 vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *sc, struct ccb_hdr *ccbh, 1662 uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req) 1663 { 1664 1665 vtscsi_set_request_lun(ccbh, tmf_req->lun); 1666 1667 tmf_req->type = vtscsi_gtoh32(sc, VIRTIO_SCSI_T_TMF); 1668 tmf_req->subtype = vtscsi_gtoh32(sc, subtype); 1669 tmf_req->tag = vtscsi_gtoh64(sc, tag); 1670 } 1671 1672 static void 1673 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason) 1674 { 1675 int frozen; 1676 1677 frozen = sc->vtscsi_frozen; 1678 1679 if (reason & VTSCSI_REQUEST && 1680 (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0) 1681 sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS; 1682 1683 if (reason & VTSCSI_REQUEST_VQ && 1684 (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0) 1685 sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL; 1686 1687 /* Freeze the SIMQ if transitioned to frozen. */ 1688 if (frozen == 0 && sc->vtscsi_frozen != 0) { 1689 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n"); 1690 xpt_freeze_simq(sc->vtscsi_sim, 1); 1691 } 1692 } 1693 1694 static int 1695 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason) 1696 { 1697 int thawed; 1698 1699 if (sc->vtscsi_frozen == 0 || reason == 0) 1700 return (0); 1701 1702 if (reason & VTSCSI_REQUEST && 1703 sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) 1704 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS; 1705 1706 if (reason & VTSCSI_REQUEST_VQ && 1707 sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) 1708 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL; 1709 1710 thawed = sc->vtscsi_frozen == 0; 1711 if (thawed != 0) 1712 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n"); 1713 1714 return (thawed); 1715 } 1716 1717 static void 1718 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code, 1719 target_id_t target_id, lun_id_t lun_id) 1720 { 1721 struct cam_path *path; 1722 1723 /* Use the wildcard path from our softc for bus announcements. */ 1724 if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) { 1725 xpt_async(ac_code, sc->vtscsi_path, NULL); 1726 return; 1727 } 1728 1729 if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim), 1730 target_id, lun_id) != CAM_REQ_CMP) { 1731 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n"); 1732 return; 1733 } 1734 1735 xpt_async(ac_code, path, NULL); 1736 xpt_free_path(path); 1737 } 1738 1739 static void 1740 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id, 1741 lun_id_t lun_id) 1742 { 1743 union ccb *ccb; 1744 cam_status status; 1745 1746 ccb = xpt_alloc_ccb_nowait(); 1747 if (ccb == NULL) { 1748 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n"); 1749 return; 1750 } 1751 1752 status = xpt_create_path(&ccb->ccb_h.path, NULL, 1753 cam_sim_path(sc->vtscsi_sim), target_id, lun_id); 1754 if (status != CAM_REQ_CMP) { 1755 xpt_free_ccb(ccb); 1756 return; 1757 } 1758 1759 xpt_rescan(ccb); 1760 } 1761 1762 static void 1763 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc) 1764 { 1765 1766 vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 1767 } 1768 1769 static void 1770 vtscsi_transport_reset_event(struct vtscsi_softc *sc, 1771 struct virtio_scsi_event *event) 1772 { 1773 target_id_t target_id; 1774 lun_id_t lun_id; 1775 1776 vtscsi_get_request_lun(event->lun, &target_id, &lun_id); 1777 1778 switch (event->reason) { 1779 case VIRTIO_SCSI_EVT_RESET_RESCAN: 1780 case VIRTIO_SCSI_EVT_RESET_REMOVED: 1781 vtscsi_execute_rescan(sc, target_id, lun_id); 1782 break; 1783 default: 1784 device_printf(sc->vtscsi_dev, 1785 "unhandled transport event reason: %d\n", event->reason); 1786 break; 1787 } 1788 } 1789 1790 static void 1791 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event) 1792 { 1793 int error __diagused; 1794 1795 if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) { 1796 switch (event->event) { 1797 case VIRTIO_SCSI_T_TRANSPORT_RESET: 1798 vtscsi_transport_reset_event(sc, event); 1799 break; 1800 default: 1801 device_printf(sc->vtscsi_dev, 1802 "unhandled event: %d\n", event->event); 1803 break; 1804 } 1805 } else 1806 vtscsi_execute_rescan_bus(sc); 1807 1808 /* 1809 * This should always be successful since the buffer 1810 * was just dequeued. 1811 */ 1812 error = vtscsi_enqueue_event_buf(sc, event); 1813 KASSERT(error == 0, 1814 ("cannot requeue event buffer: %d", error)); 1815 } 1816 1817 static int 1818 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc, 1819 struct virtio_scsi_event *event) 1820 { 1821 struct sglist *sg; 1822 struct virtqueue *vq; 1823 int size, error; 1824 1825 sg = sc->vtscsi_sglist; 1826 vq = sc->vtscsi_event_vq; 1827 size = sc->vtscsi_event_buf_size; 1828 1829 bzero(event, size); 1830 1831 sglist_reset(sg); 1832 error = sglist_append(sg, event, size); 1833 if (error) 1834 return (error); 1835 1836 error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg); 1837 if (error) 1838 return (error); 1839 1840 virtqueue_notify(vq); 1841 1842 return (0); 1843 } 1844 1845 static int 1846 vtscsi_init_event_vq(struct vtscsi_softc *sc) 1847 { 1848 struct virtio_scsi_event *event; 1849 int i, size, error; 1850 1851 /* 1852 * The first release of QEMU with VirtIO SCSI support would crash 1853 * when attempting to notify the event virtqueue. This was fixed 1854 * when hotplug support was added. 1855 */ 1856 if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) 1857 size = sc->vtscsi_event_buf_size; 1858 else 1859 size = 0; 1860 1861 if (size < sizeof(struct virtio_scsi_event)) 1862 return (0); 1863 1864 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { 1865 event = &sc->vtscsi_event_bufs[i]; 1866 1867 error = vtscsi_enqueue_event_buf(sc, event); 1868 if (error) 1869 break; 1870 } 1871 1872 /* 1873 * Even just one buffer is enough. Missed events are 1874 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag. 1875 */ 1876 if (i > 0) 1877 error = 0; 1878 1879 return (error); 1880 } 1881 1882 static void 1883 vtscsi_reinit_event_vq(struct vtscsi_softc *sc) 1884 { 1885 struct virtio_scsi_event *event; 1886 int i, error; 1887 1888 if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 || 1889 sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event)) 1890 return; 1891 1892 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) { 1893 event = &sc->vtscsi_event_bufs[i]; 1894 1895 error = vtscsi_enqueue_event_buf(sc, event); 1896 if (error) 1897 break; 1898 } 1899 1900 KASSERT(i > 0, ("cannot reinit event vq: %d", error)); 1901 } 1902 1903 static void 1904 vtscsi_drain_event_vq(struct vtscsi_softc *sc) 1905 { 1906 struct virtqueue *vq; 1907 int last; 1908 1909 vq = sc->vtscsi_event_vq; 1910 last = 0; 1911 1912 while (virtqueue_drain(vq, &last) != NULL) 1913 ; 1914 1915 KASSERT(virtqueue_empty(vq), ("eventvq not empty")); 1916 } 1917 1918 static void 1919 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc) 1920 { 1921 1922 VTSCSI_LOCK_OWNED(sc); 1923 1924 if (sc->vtscsi_request_vq != NULL) 1925 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 1926 if (sc->vtscsi_control_vq != NULL) 1927 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 1928 } 1929 1930 static void 1931 vtscsi_complete_vqs(struct vtscsi_softc *sc) 1932 { 1933 1934 VTSCSI_LOCK(sc); 1935 vtscsi_complete_vqs_locked(sc); 1936 VTSCSI_UNLOCK(sc); 1937 } 1938 1939 static void 1940 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 1941 { 1942 union ccb *ccb; 1943 int detach; 1944 1945 ccb = req->vsr_ccb; 1946 1947 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb); 1948 1949 /* 1950 * The callout must be drained when detaching since the request is 1951 * about to be freed. The VTSCSI_MTX must not be held for this in 1952 * case the callout is pending because there is a deadlock potential. 1953 * Otherwise, the virtqueue is being drained because of a bus reset 1954 * so we only need to attempt to stop the callouts. 1955 */ 1956 detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0; 1957 if (detach != 0) 1958 VTSCSI_LOCK_NOTOWNED(sc); 1959 else 1960 VTSCSI_LOCK_OWNED(sc); 1961 1962 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) { 1963 if (detach != 0) 1964 callout_drain(&req->vsr_callout); 1965 else 1966 callout_stop(&req->vsr_callout); 1967 } 1968 1969 if (ccb != NULL) { 1970 if (detach != 0) { 1971 VTSCSI_LOCK(sc); 1972 ccb->ccb_h.status = CAM_NO_HBA; 1973 } else 1974 ccb->ccb_h.status = CAM_REQUEUE_REQ; 1975 xpt_done(ccb); 1976 if (detach != 0) 1977 VTSCSI_UNLOCK(sc); 1978 } 1979 1980 vtscsi_enqueue_request(sc, req); 1981 } 1982 1983 static void 1984 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq) 1985 { 1986 struct vtscsi_request *req; 1987 int last; 1988 1989 last = 0; 1990 1991 vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq); 1992 1993 while ((req = virtqueue_drain(vq, &last)) != NULL) 1994 vtscsi_cancel_request(sc, req); 1995 1996 KASSERT(virtqueue_empty(vq), ("virtqueue not empty")); 1997 } 1998 1999 static void 2000 vtscsi_drain_vqs(struct vtscsi_softc *sc) 2001 { 2002 2003 if (sc->vtscsi_control_vq != NULL) 2004 vtscsi_drain_vq(sc, sc->vtscsi_control_vq); 2005 if (sc->vtscsi_request_vq != NULL) 2006 vtscsi_drain_vq(sc, sc->vtscsi_request_vq); 2007 if (sc->vtscsi_event_vq != NULL) 2008 vtscsi_drain_event_vq(sc); 2009 } 2010 2011 static void 2012 vtscsi_stop(struct vtscsi_softc *sc) 2013 { 2014 2015 vtscsi_disable_vqs_intr(sc); 2016 virtio_stop(sc->vtscsi_dev); 2017 } 2018 2019 static int 2020 vtscsi_reset_bus(struct vtscsi_softc *sc) 2021 { 2022 int error; 2023 2024 VTSCSI_LOCK_OWNED(sc); 2025 2026 if (vtscsi_bus_reset_disable != 0) { 2027 device_printf(sc->vtscsi_dev, "bus reset disabled\n"); 2028 return (0); 2029 } 2030 2031 sc->vtscsi_flags |= VTSCSI_FLAG_RESET; 2032 2033 /* 2034 * vtscsi_stop() will cause the in-flight requests to be canceled. 2035 * Those requests are then completed here so CAM will retry them 2036 * after the reset is complete. 2037 */ 2038 vtscsi_stop(sc); 2039 vtscsi_complete_vqs_locked(sc); 2040 2041 /* Rid the virtqueues of any remaining requests. */ 2042 vtscsi_drain_vqs(sc); 2043 2044 /* 2045 * Any resource shortage that froze the SIMQ cannot persist across 2046 * a bus reset so ensure it gets thawed here. 2047 */ 2048 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0) 2049 xpt_release_simq(sc->vtscsi_sim, 0); 2050 2051 error = vtscsi_reinit(sc); 2052 if (error) { 2053 device_printf(sc->vtscsi_dev, 2054 "reinitialization failed, stopping device...\n"); 2055 vtscsi_stop(sc); 2056 } else 2057 vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 2058 CAM_LUN_WILDCARD); 2059 2060 sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET; 2061 2062 return (error); 2063 } 2064 2065 static void 2066 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 2067 { 2068 2069 #ifdef INVARIANTS 2070 int req_nsegs, resp_nsegs; 2071 2072 req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq)); 2073 resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp)); 2074 2075 KASSERT(req_nsegs == 1, ("request crossed page boundary")); 2076 KASSERT(resp_nsegs == 1, ("response crossed page boundary")); 2077 #endif 2078 2079 req->vsr_softc = sc; 2080 callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0); 2081 } 2082 2083 static int 2084 vtscsi_alloc_requests(struct vtscsi_softc *sc) 2085 { 2086 struct vtscsi_request *req; 2087 int i, nreqs; 2088 2089 /* 2090 * Commands destined for either the request or control queues come 2091 * from the same SIM queue. Use the size of the request virtqueue 2092 * as it (should) be much more frequently used. Some additional 2093 * requests are allocated for internal (TMF) use. 2094 */ 2095 nreqs = virtqueue_size(sc->vtscsi_request_vq); 2096 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) 2097 nreqs /= VTSCSI_MIN_SEGMENTS; 2098 nreqs += VTSCSI_RESERVED_REQUESTS; 2099 2100 for (i = 0; i < nreqs; i++) { 2101 req = malloc(sizeof(struct vtscsi_request), M_DEVBUF, 2102 M_NOWAIT); 2103 if (req == NULL) 2104 return (ENOMEM); 2105 2106 vtscsi_init_request(sc, req); 2107 2108 sc->vtscsi_nrequests++; 2109 vtscsi_enqueue_request(sc, req); 2110 } 2111 2112 return (0); 2113 } 2114 2115 static void 2116 vtscsi_free_requests(struct vtscsi_softc *sc) 2117 { 2118 struct vtscsi_request *req; 2119 2120 while ((req = vtscsi_dequeue_request(sc)) != NULL) { 2121 KASSERT(callout_active(&req->vsr_callout) == 0, 2122 ("request callout still active")); 2123 2124 sc->vtscsi_nrequests--; 2125 free(req, M_DEVBUF); 2126 } 2127 2128 KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d", 2129 sc->vtscsi_nrequests)); 2130 } 2131 2132 static void 2133 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req) 2134 { 2135 2136 KASSERT(req->vsr_softc == sc, 2137 ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc)); 2138 2139 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); 2140 2141 /* A request is available so the SIMQ could be released. */ 2142 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0) 2143 xpt_release_simq(sc->vtscsi_sim, 1); 2144 2145 req->vsr_ccb = NULL; 2146 req->vsr_complete = NULL; 2147 req->vsr_ptr0 = NULL; 2148 req->vsr_state = VTSCSI_REQ_STATE_FREE; 2149 req->vsr_flags = 0; 2150 2151 bzero(&req->vsr_ureq, sizeof(req->vsr_ureq)); 2152 bzero(&req->vsr_uresp, sizeof(req->vsr_uresp)); 2153 2154 /* 2155 * We insert at the tail of the queue in order to make it 2156 * very unlikely a request will be reused if we race with 2157 * stopping its callout handler. 2158 */ 2159 TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link); 2160 } 2161 2162 static struct vtscsi_request * 2163 vtscsi_dequeue_request(struct vtscsi_softc *sc) 2164 { 2165 struct vtscsi_request *req; 2166 2167 req = TAILQ_FIRST(&sc->vtscsi_req_free); 2168 if (req != NULL) { 2169 req->vsr_state = VTSCSI_REQ_STATE_INUSE; 2170 TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link); 2171 } else 2172 sc->vtscsi_stats.dequeue_no_requests++; 2173 2174 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req); 2175 2176 return (req); 2177 } 2178 2179 static void 2180 vtscsi_complete_request(struct vtscsi_request *req) 2181 { 2182 2183 if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED) 2184 req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE; 2185 2186 if (req->vsr_complete != NULL) 2187 req->vsr_complete(req->vsr_softc, req); 2188 } 2189 2190 static void 2191 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq) 2192 { 2193 struct vtscsi_request *req; 2194 2195 VTSCSI_LOCK_OWNED(sc); 2196 2197 while ((req = virtqueue_dequeue(vq, NULL)) != NULL) 2198 vtscsi_complete_request(req); 2199 } 2200 2201 static void 2202 vtscsi_control_vq_intr(void *xsc) 2203 { 2204 struct vtscsi_softc *sc; 2205 struct virtqueue *vq; 2206 2207 sc = xsc; 2208 vq = sc->vtscsi_control_vq; 2209 2210 again: 2211 VTSCSI_LOCK(sc); 2212 2213 vtscsi_complete_vq(sc, sc->vtscsi_control_vq); 2214 2215 if (virtqueue_enable_intr(vq) != 0) { 2216 virtqueue_disable_intr(vq); 2217 VTSCSI_UNLOCK(sc); 2218 goto again; 2219 } 2220 2221 VTSCSI_UNLOCK(sc); 2222 } 2223 2224 static void 2225 vtscsi_event_vq_intr(void *xsc) 2226 { 2227 struct vtscsi_softc *sc; 2228 struct virtqueue *vq; 2229 struct virtio_scsi_event *event; 2230 2231 sc = xsc; 2232 vq = sc->vtscsi_event_vq; 2233 2234 again: 2235 VTSCSI_LOCK(sc); 2236 2237 while ((event = virtqueue_dequeue(vq, NULL)) != NULL) 2238 vtscsi_handle_event(sc, event); 2239 2240 if (virtqueue_enable_intr(vq) != 0) { 2241 virtqueue_disable_intr(vq); 2242 VTSCSI_UNLOCK(sc); 2243 goto again; 2244 } 2245 2246 VTSCSI_UNLOCK(sc); 2247 } 2248 2249 static void 2250 vtscsi_request_vq_intr(void *xsc) 2251 { 2252 struct vtscsi_softc *sc; 2253 struct virtqueue *vq; 2254 2255 sc = xsc; 2256 vq = sc->vtscsi_request_vq; 2257 2258 again: 2259 VTSCSI_LOCK(sc); 2260 2261 vtscsi_complete_vq(sc, sc->vtscsi_request_vq); 2262 2263 if (virtqueue_enable_intr(vq) != 0) { 2264 virtqueue_disable_intr(vq); 2265 VTSCSI_UNLOCK(sc); 2266 goto again; 2267 } 2268 2269 VTSCSI_UNLOCK(sc); 2270 } 2271 2272 static void 2273 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc) 2274 { 2275 2276 virtqueue_disable_intr(sc->vtscsi_control_vq); 2277 virtqueue_disable_intr(sc->vtscsi_event_vq); 2278 virtqueue_disable_intr(sc->vtscsi_request_vq); 2279 } 2280 2281 static void 2282 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc) 2283 { 2284 2285 virtqueue_enable_intr(sc->vtscsi_control_vq); 2286 virtqueue_enable_intr(sc->vtscsi_event_vq); 2287 virtqueue_enable_intr(sc->vtscsi_request_vq); 2288 } 2289 2290 static void 2291 vtscsi_get_tunables(struct vtscsi_softc *sc) 2292 { 2293 char tmpstr[64]; 2294 2295 TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug); 2296 2297 snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level", 2298 device_get_unit(sc->vtscsi_dev)); 2299 TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug); 2300 } 2301 2302 static void 2303 vtscsi_setup_sysctl(struct vtscsi_softc *sc) 2304 { 2305 device_t dev; 2306 struct vtscsi_statistics *stats; 2307 struct sysctl_ctx_list *ctx; 2308 struct sysctl_oid *tree; 2309 struct sysctl_oid_list *child; 2310 2311 dev = sc->vtscsi_dev; 2312 stats = &sc->vtscsi_stats; 2313 ctx = device_get_sysctl_ctx(dev); 2314 tree = device_get_sysctl_tree(dev); 2315 child = SYSCTL_CHILDREN(tree); 2316 2317 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level", 2318 CTLFLAG_RW, &sc->vtscsi_debug, 0, 2319 "Debug level"); 2320 2321 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts", 2322 CTLFLAG_RD, &stats->scsi_cmd_timeouts, 2323 "SCSI command timeouts"); 2324 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests", 2325 CTLFLAG_RD, &stats->dequeue_no_requests, 2326 "No available requests to dequeue"); 2327 } 2328 2329 static void 2330 vtscsi_printf_req(struct vtscsi_request *req, const char *func, 2331 const char *fmt, ...) 2332 { 2333 struct vtscsi_softc *sc; 2334 union ccb *ccb; 2335 struct sbuf sb; 2336 va_list ap; 2337 char str[192]; 2338 2339 if (req == NULL) 2340 return; 2341 2342 sc = req->vsr_softc; 2343 ccb = req->vsr_ccb; 2344 2345 va_start(ap, fmt); 2346 sbuf_new(&sb, str, sizeof(str), 0); 2347 2348 if (ccb == NULL) { 2349 sbuf_printf(&sb, "(noperiph:%s%d:%u): ", 2350 cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim), 2351 cam_sim_bus(sc->vtscsi_sim)); 2352 } else { 2353 xpt_path_sbuf(ccb->ccb_h.path, &sb); 2354 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 2355 scsi_command_string(&ccb->csio, &sb); 2356 sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len); 2357 } 2358 } 2359 2360 sbuf_vprintf(&sb, fmt, ap); 2361 va_end(ap); 2362 2363 sbuf_finish(&sb); 2364 printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func, 2365 sbuf_data(&sb)); 2366 } 2367