1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2016 Jakub Klama <jceel@FreeBSD.org>. 5 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer 13 * in this position and unchanged. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/linker_set.h> 36 #include <sys/types.h> 37 #include <sys/uio.h> 38 #include <sys/time.h> 39 #include <sys/queue.h> 40 #include <sys/sbuf.h> 41 42 #include <errno.h> 43 #include <fcntl.h> 44 #include <stdio.h> 45 #include <stdlib.h> 46 #include <stdbool.h> 47 #include <string.h> 48 #include <unistd.h> 49 #include <assert.h> 50 #include <pthread.h> 51 #include <pthread_np.h> 52 53 #include <cam/scsi/scsi_all.h> 54 #include <cam/scsi/scsi_message.h> 55 #include <cam/ctl/ctl.h> 56 #include <cam/ctl/ctl_io.h> 57 #include <cam/ctl/ctl_backend.h> 58 #include <cam/ctl/ctl_ioctl.h> 59 #include <cam/ctl/ctl_util.h> 60 #include <cam/ctl/ctl_scsi_all.h> 61 #include <camlib.h> 62 63 #include "bhyverun.h" 64 #include "debug.h" 65 #include "pci_emul.h" 66 #include "virtio.h" 67 #include "iov.h" 68 69 #define VTSCSI_RINGSZ 64 70 #define VTSCSI_REQUESTQ 1 71 #define VTSCSI_THR_PER_Q 16 72 #define VTSCSI_MAXQ (VTSCSI_REQUESTQ + 2) 73 #define VTSCSI_MAXSEG 64 74 75 #define VTSCSI_IN_HEADER_LEN(_sc) \ 76 (sizeof(struct pci_vtscsi_req_cmd_rd) + _sc->vss_config.cdb_size) 77 78 #define VTSCSI_OUT_HEADER_LEN(_sc) \ 79 (sizeof(struct pci_vtscsi_req_cmd_wr) + _sc->vss_config.sense_size) 80 81 #define VIRTIO_SCSI_MAX_CHANNEL 0 82 #define VIRTIO_SCSI_MAX_TARGET 0 83 #define VIRTIO_SCSI_MAX_LUN 16383 84 85 #define VIRTIO_SCSI_F_INOUT (1 << 0) 86 #define VIRTIO_SCSI_F_HOTPLUG (1 << 1) 87 #define VIRTIO_SCSI_F_CHANGE (1 << 2) 88 89 static int pci_vtscsi_debug = 0; 90 #define DPRINTF(params) if (pci_vtscsi_debug) PRINTLN params 91 #define WPRINTF(params) PRINTLN params 92 93 struct pci_vtscsi_config { 94 uint32_t num_queues; 95 uint32_t seg_max; 96 uint32_t max_sectors; 97 uint32_t cmd_per_lun; 98 uint32_t event_info_size; 99 uint32_t sense_size; 100 uint32_t cdb_size; 101 uint16_t max_channel; 102 uint16_t max_target; 103 uint32_t max_lun; 104 } __attribute__((packed)); 105 106 struct pci_vtscsi_queue { 107 struct pci_vtscsi_softc * vsq_sc; 108 struct vqueue_info * vsq_vq; 109 pthread_mutex_t vsq_mtx; 110 pthread_mutex_t vsq_qmtx; 111 pthread_cond_t vsq_cv; 112 STAILQ_HEAD(, pci_vtscsi_request) vsq_requests; 113 LIST_HEAD(, pci_vtscsi_worker) vsq_workers; 114 }; 115 116 struct pci_vtscsi_worker { 117 struct pci_vtscsi_queue * vsw_queue; 118 pthread_t vsw_thread; 119 bool vsw_exiting; 120 LIST_ENTRY(pci_vtscsi_worker) vsw_link; 121 }; 122 123 struct pci_vtscsi_request { 124 struct pci_vtscsi_queue * vsr_queue; 125 struct iovec vsr_iov_in[VTSCSI_MAXSEG]; 126 int vsr_niov_in; 127 struct iovec vsr_iov_out[VTSCSI_MAXSEG]; 128 int vsr_niov_out; 129 uint32_t vsr_idx; 130 STAILQ_ENTRY(pci_vtscsi_request) vsr_link; 131 }; 132 133 /* 134 * Per-device softc 135 */ 136 struct pci_vtscsi_softc { 137 struct virtio_softc vss_vs; 138 struct vqueue_info vss_vq[VTSCSI_MAXQ]; 139 struct pci_vtscsi_queue vss_queues[VTSCSI_REQUESTQ]; 140 pthread_mutex_t vss_mtx; 141 int vss_iid; 142 int vss_ctl_fd; 143 uint32_t vss_features; 144 struct pci_vtscsi_config vss_config; 145 }; 146 147 #define VIRTIO_SCSI_T_TMF 0 148 #define VIRTIO_SCSI_T_TMF_ABORT_TASK 0 149 #define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1 150 #define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2 151 #define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3 152 #define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4 153 #define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5 154 #define VIRTIO_SCSI_T_TMF_QUERY_TASK 6 155 #define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7 156 157 /* command-specific response values */ 158 #define VIRTIO_SCSI_S_FUNCTION_COMPLETE 0 159 #define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10 160 #define VIRTIO_SCSI_S_FUNCTION_REJECTED 11 161 162 struct pci_vtscsi_ctrl_tmf { 163 uint32_t type; 164 uint32_t subtype; 165 uint8_t lun[8]; 166 uint64_t id; 167 uint8_t response; 168 } __attribute__((packed)); 169 170 #define VIRTIO_SCSI_T_AN_QUERY 1 171 #define VIRTIO_SCSI_EVT_ASYNC_OPERATIONAL_CHANGE 2 172 #define VIRTIO_SCSI_EVT_ASYNC_POWER_MGMT 4 173 #define VIRTIO_SCSI_EVT_ASYNC_EXTERNAL_REQUEST 8 174 #define VIRTIO_SCSI_EVT_ASYNC_MEDIA_CHANGE 16 175 #define VIRTIO_SCSI_EVT_ASYNC_MULTI_HOST 32 176 #define VIRTIO_SCSI_EVT_ASYNC_DEVICE_BUSY 64 177 178 struct pci_vtscsi_ctrl_an { 179 uint32_t type; 180 uint8_t lun[8]; 181 uint32_t event_requested; 182 uint32_t event_actual; 183 uint8_t response; 184 } __attribute__((packed)); 185 186 /* command-specific response values */ 187 #define VIRTIO_SCSI_S_OK 0 188 #define VIRTIO_SCSI_S_OVERRUN 1 189 #define VIRTIO_SCSI_S_ABORTED 2 190 #define VIRTIO_SCSI_S_BAD_TARGET 3 191 #define VIRTIO_SCSI_S_RESET 4 192 #define VIRTIO_SCSI_S_BUSY 5 193 #define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6 194 #define VIRTIO_SCSI_S_TARGET_FAILURE 7 195 #define VIRTIO_SCSI_S_NEXUS_FAILURE 8 196 #define VIRTIO_SCSI_S_FAILURE 9 197 #define VIRTIO_SCSI_S_INCORRECT_LUN 12 198 199 /* task_attr */ 200 #define VIRTIO_SCSI_S_SIMPLE 0 201 #define VIRTIO_SCSI_S_ORDERED 1 202 #define VIRTIO_SCSI_S_HEAD 2 203 #define VIRTIO_SCSI_S_ACA 3 204 205 struct pci_vtscsi_event { 206 uint32_t event; 207 uint8_t lun[8]; 208 uint32_t reason; 209 } __attribute__((packed)); 210 211 struct pci_vtscsi_req_cmd_rd { 212 uint8_t lun[8]; 213 uint64_t id; 214 uint8_t task_attr; 215 uint8_t prio; 216 uint8_t crn; 217 uint8_t cdb[]; 218 } __attribute__((packed)); 219 220 struct pci_vtscsi_req_cmd_wr { 221 uint32_t sense_len; 222 uint32_t residual; 223 uint16_t status_qualifier; 224 uint8_t status; 225 uint8_t response; 226 uint8_t sense[]; 227 } __attribute__((packed)); 228 229 static void *pci_vtscsi_proc(void *); 230 static void pci_vtscsi_reset(void *); 231 static void pci_vtscsi_neg_features(void *, uint64_t); 232 static int pci_vtscsi_cfgread(void *, int, int, uint32_t *); 233 static int pci_vtscsi_cfgwrite(void *, int, int, uint32_t); 234 static inline int pci_vtscsi_get_lun(uint8_t *); 235 static int pci_vtscsi_control_handle(struct pci_vtscsi_softc *, void *, size_t); 236 static int pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *, 237 struct pci_vtscsi_ctrl_tmf *); 238 static int pci_vtscsi_an_handle(struct pci_vtscsi_softc *, 239 struct pci_vtscsi_ctrl_an *); 240 static int pci_vtscsi_request_handle(struct pci_vtscsi_queue *, struct iovec *, 241 int, struct iovec *, int); 242 static void pci_vtscsi_controlq_notify(void *, struct vqueue_info *); 243 static void pci_vtscsi_eventq_notify(void *, struct vqueue_info *); 244 static void pci_vtscsi_requestq_notify(void *, struct vqueue_info *); 245 static int pci_vtscsi_init_queue(struct pci_vtscsi_softc *, 246 struct pci_vtscsi_queue *, int); 247 static int pci_vtscsi_init(struct vmctx *, struct pci_devinst *, char *); 248 249 static struct virtio_consts vtscsi_vi_consts = { 250 "vtscsi", /* our name */ 251 VTSCSI_MAXQ, /* we support 2+n virtqueues */ 252 sizeof(struct pci_vtscsi_config), /* config reg size */ 253 pci_vtscsi_reset, /* reset */ 254 NULL, /* device-wide qnotify */ 255 pci_vtscsi_cfgread, /* read virtio config */ 256 pci_vtscsi_cfgwrite, /* write virtio config */ 257 pci_vtscsi_neg_features, /* apply negotiated features */ 258 0, /* our capabilities */ 259 }; 260 261 static void * 262 pci_vtscsi_proc(void *arg) 263 { 264 struct pci_vtscsi_worker *worker = (struct pci_vtscsi_worker *)arg; 265 struct pci_vtscsi_queue *q = worker->vsw_queue; 266 struct pci_vtscsi_request *req; 267 int iolen; 268 269 for (;;) { 270 pthread_mutex_lock(&q->vsq_mtx); 271 272 while (STAILQ_EMPTY(&q->vsq_requests) 273 && !worker->vsw_exiting) 274 pthread_cond_wait(&q->vsq_cv, &q->vsq_mtx); 275 276 if (worker->vsw_exiting) 277 break; 278 279 req = STAILQ_FIRST(&q->vsq_requests); 280 STAILQ_REMOVE_HEAD(&q->vsq_requests, vsr_link); 281 282 pthread_mutex_unlock(&q->vsq_mtx); 283 iolen = pci_vtscsi_request_handle(q, req->vsr_iov_in, 284 req->vsr_niov_in, req->vsr_iov_out, req->vsr_niov_out); 285 286 pthread_mutex_lock(&q->vsq_qmtx); 287 vq_relchain(q->vsq_vq, req->vsr_idx, iolen); 288 vq_endchains(q->vsq_vq, 0); 289 pthread_mutex_unlock(&q->vsq_qmtx); 290 291 DPRINTF(("virtio-scsi: request <idx=%d> completed", 292 req->vsr_idx)); 293 free(req); 294 } 295 296 pthread_mutex_unlock(&q->vsq_mtx); 297 return (NULL); 298 } 299 300 static void 301 pci_vtscsi_reset(void *vsc) 302 { 303 struct pci_vtscsi_softc *sc; 304 305 sc = vsc; 306 307 DPRINTF(("vtscsi: device reset requested")); 308 vi_reset_dev(&sc->vss_vs); 309 310 /* initialize config structure */ 311 sc->vss_config = (struct pci_vtscsi_config){ 312 .num_queues = VTSCSI_REQUESTQ, 313 /* Leave room for the request and the response. */ 314 .seg_max = VTSCSI_MAXSEG - 2, 315 .max_sectors = 2, 316 .cmd_per_lun = 1, 317 .event_info_size = sizeof(struct pci_vtscsi_event), 318 .sense_size = 96, 319 .cdb_size = 32, 320 .max_channel = VIRTIO_SCSI_MAX_CHANNEL, 321 .max_target = VIRTIO_SCSI_MAX_TARGET, 322 .max_lun = VIRTIO_SCSI_MAX_LUN 323 }; 324 } 325 326 static void 327 pci_vtscsi_neg_features(void *vsc, uint64_t negotiated_features) 328 { 329 struct pci_vtscsi_softc *sc = vsc; 330 331 sc->vss_features = negotiated_features; 332 } 333 334 static int 335 pci_vtscsi_cfgread(void *vsc, int offset, int size, uint32_t *retval) 336 { 337 struct pci_vtscsi_softc *sc = vsc; 338 void *ptr; 339 340 ptr = (uint8_t *)&sc->vss_config + offset; 341 memcpy(retval, ptr, size); 342 return (0); 343 } 344 345 static int 346 pci_vtscsi_cfgwrite(void *vsc, int offset, int size, uint32_t val) 347 { 348 349 return (0); 350 } 351 352 static inline int 353 pci_vtscsi_get_lun(uint8_t *lun) 354 { 355 356 return (((lun[2] << 8) | lun[3]) & 0x3fff); 357 } 358 359 static int 360 pci_vtscsi_control_handle(struct pci_vtscsi_softc *sc, void *buf, 361 size_t bufsize) 362 { 363 struct pci_vtscsi_ctrl_tmf *tmf; 364 struct pci_vtscsi_ctrl_an *an; 365 uint32_t type; 366 367 type = *(uint32_t *)buf; 368 369 if (type == VIRTIO_SCSI_T_TMF) { 370 tmf = (struct pci_vtscsi_ctrl_tmf *)buf; 371 return (pci_vtscsi_tmf_handle(sc, tmf)); 372 } 373 374 if (type == VIRTIO_SCSI_T_AN_QUERY) { 375 an = (struct pci_vtscsi_ctrl_an *)buf; 376 return (pci_vtscsi_an_handle(sc, an)); 377 } 378 379 return (0); 380 } 381 382 static int 383 pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *sc, 384 struct pci_vtscsi_ctrl_tmf *tmf) 385 { 386 union ctl_io *io; 387 int err; 388 389 io = ctl_scsi_alloc_io(sc->vss_iid); 390 ctl_scsi_zero_io(io); 391 392 io->io_hdr.io_type = CTL_IO_TASK; 393 io->io_hdr.nexus.initid = sc->vss_iid; 394 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(tmf->lun); 395 io->taskio.tag_type = CTL_TAG_SIMPLE; 396 io->taskio.tag_num = (uint32_t)tmf->id; 397 398 switch (tmf->subtype) { 399 case VIRTIO_SCSI_T_TMF_ABORT_TASK: 400 io->taskio.task_action = CTL_TASK_ABORT_TASK; 401 break; 402 403 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET: 404 io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; 405 break; 406 407 case VIRTIO_SCSI_T_TMF_CLEAR_ACA: 408 io->taskio.task_action = CTL_TASK_CLEAR_ACA; 409 break; 410 411 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: 412 io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; 413 break; 414 415 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: 416 io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET; 417 break; 418 419 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: 420 io->taskio.task_action = CTL_TASK_LUN_RESET; 421 break; 422 423 case VIRTIO_SCSI_T_TMF_QUERY_TASK: 424 io->taskio.task_action = CTL_TASK_QUERY_TASK; 425 break; 426 427 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET: 428 io->taskio.task_action = CTL_TASK_QUERY_TASK_SET; 429 break; 430 } 431 432 if (pci_vtscsi_debug) { 433 struct sbuf *sb = sbuf_new_auto(); 434 ctl_io_sbuf(io, sb); 435 sbuf_finish(sb); 436 DPRINTF(("pci_virtio_scsi: %s", sbuf_data(sb))); 437 sbuf_delete(sb); 438 } 439 440 err = ioctl(sc->vss_ctl_fd, CTL_IO, io); 441 if (err != 0) 442 WPRINTF(("CTL_IO: err=%d (%s)", errno, strerror(errno))); 443 444 tmf->response = io->taskio.task_status; 445 ctl_scsi_free_io(io); 446 return (1); 447 } 448 449 static int 450 pci_vtscsi_an_handle(struct pci_vtscsi_softc *sc, 451 struct pci_vtscsi_ctrl_an *an) 452 { 453 454 return (0); 455 } 456 457 static int 458 pci_vtscsi_request_handle(struct pci_vtscsi_queue *q, struct iovec *iov_in, 459 int niov_in, struct iovec *iov_out, int niov_out) 460 { 461 struct pci_vtscsi_softc *sc = q->vsq_sc; 462 struct pci_vtscsi_req_cmd_rd *cmd_rd = NULL; 463 struct pci_vtscsi_req_cmd_wr *cmd_wr; 464 struct iovec data_iov_in[VTSCSI_MAXSEG], data_iov_out[VTSCSI_MAXSEG]; 465 union ctl_io *io; 466 int data_niov_in, data_niov_out; 467 void *ext_data_ptr = NULL; 468 uint32_t ext_data_len = 0, ext_sg_entries = 0; 469 int err, nxferred; 470 471 seek_iov(iov_in, niov_in, data_iov_in, &data_niov_in, 472 VTSCSI_IN_HEADER_LEN(sc)); 473 seek_iov(iov_out, niov_out, data_iov_out, &data_niov_out, 474 VTSCSI_OUT_HEADER_LEN(sc)); 475 476 truncate_iov(iov_in, &niov_in, VTSCSI_IN_HEADER_LEN(sc)); 477 truncate_iov(iov_out, &niov_out, VTSCSI_OUT_HEADER_LEN(sc)); 478 iov_to_buf(iov_in, niov_in, (void **)&cmd_rd); 479 480 cmd_wr = malloc(VTSCSI_OUT_HEADER_LEN(sc)); 481 io = ctl_scsi_alloc_io(sc->vss_iid); 482 ctl_scsi_zero_io(io); 483 484 io->io_hdr.nexus.initid = sc->vss_iid; 485 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(cmd_rd->lun); 486 487 io->io_hdr.io_type = CTL_IO_SCSI; 488 489 if (data_niov_in > 0) { 490 ext_data_ptr = (void *)data_iov_in; 491 ext_sg_entries = data_niov_in; 492 ext_data_len = count_iov(data_iov_in, data_niov_in); 493 io->io_hdr.flags |= CTL_FLAG_DATA_OUT; 494 } else if (data_niov_out > 0) { 495 ext_data_ptr = (void *)data_iov_out; 496 ext_sg_entries = data_niov_out; 497 ext_data_len = count_iov(data_iov_out, data_niov_out); 498 io->io_hdr.flags |= CTL_FLAG_DATA_IN; 499 } 500 501 io->scsiio.sense_len = sc->vss_config.sense_size; 502 io->scsiio.tag_num = (uint32_t)cmd_rd->id; 503 switch (cmd_rd->task_attr) { 504 case VIRTIO_SCSI_S_ORDERED: 505 io->scsiio.tag_type = CTL_TAG_ORDERED; 506 break; 507 case VIRTIO_SCSI_S_HEAD: 508 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; 509 break; 510 case VIRTIO_SCSI_S_ACA: 511 io->scsiio.tag_type = CTL_TAG_ACA; 512 break; 513 case VIRTIO_SCSI_S_SIMPLE: 514 default: 515 io->scsiio.tag_type = CTL_TAG_SIMPLE; 516 break; 517 } 518 io->scsiio.ext_sg_entries = ext_sg_entries; 519 io->scsiio.ext_data_ptr = ext_data_ptr; 520 io->scsiio.ext_data_len = ext_data_len; 521 io->scsiio.ext_data_filled = 0; 522 io->scsiio.cdb_len = sc->vss_config.cdb_size; 523 memcpy(io->scsiio.cdb, cmd_rd->cdb, sc->vss_config.cdb_size); 524 525 if (pci_vtscsi_debug) { 526 struct sbuf *sb = sbuf_new_auto(); 527 ctl_io_sbuf(io, sb); 528 sbuf_finish(sb); 529 DPRINTF(("pci_virtio_scsi: %s", sbuf_data(sb))); 530 sbuf_delete(sb); 531 } 532 533 err = ioctl(sc->vss_ctl_fd, CTL_IO, io); 534 if (err != 0) { 535 WPRINTF(("CTL_IO: err=%d (%s)", errno, strerror(errno))); 536 cmd_wr->response = VIRTIO_SCSI_S_FAILURE; 537 } else { 538 cmd_wr->sense_len = MIN(io->scsiio.sense_len, 539 sc->vss_config.sense_size); 540 cmd_wr->residual = io->scsiio.residual; 541 cmd_wr->status = io->scsiio.scsi_status; 542 cmd_wr->response = VIRTIO_SCSI_S_OK; 543 memcpy(&cmd_wr->sense, &io->scsiio.sense_data, 544 cmd_wr->sense_len); 545 } 546 547 buf_to_iov(cmd_wr, VTSCSI_OUT_HEADER_LEN(sc), iov_out, niov_out, 0); 548 nxferred = VTSCSI_OUT_HEADER_LEN(sc) + io->scsiio.ext_data_filled; 549 free(cmd_rd); 550 free(cmd_wr); 551 ctl_scsi_free_io(io); 552 return (nxferred); 553 } 554 555 static void 556 pci_vtscsi_controlq_notify(void *vsc, struct vqueue_info *vq) 557 { 558 struct pci_vtscsi_softc *sc; 559 struct iovec iov[VTSCSI_MAXSEG]; 560 uint16_t idx, n; 561 void *buf = NULL; 562 size_t bufsize; 563 int iolen; 564 565 sc = vsc; 566 567 while (vq_has_descs(vq)) { 568 n = vq_getchain(vq, &idx, iov, VTSCSI_MAXSEG, NULL); 569 bufsize = iov_to_buf(iov, n, &buf); 570 iolen = pci_vtscsi_control_handle(sc, buf, bufsize); 571 buf_to_iov(buf + bufsize - iolen, iolen, iov, n, 572 bufsize - iolen); 573 574 /* 575 * Release this chain and handle more 576 */ 577 vq_relchain(vq, idx, iolen); 578 } 579 vq_endchains(vq, 1); /* Generate interrupt if appropriate. */ 580 free(buf); 581 } 582 583 static void 584 pci_vtscsi_eventq_notify(void *vsc, struct vqueue_info *vq) 585 { 586 587 vq_kick_disable(vq); 588 } 589 590 static void 591 pci_vtscsi_requestq_notify(void *vsc, struct vqueue_info *vq) 592 { 593 struct pci_vtscsi_softc *sc; 594 struct pci_vtscsi_queue *q; 595 struct pci_vtscsi_request *req; 596 struct iovec iov[VTSCSI_MAXSEG]; 597 uint16_t flags[VTSCSI_MAXSEG]; 598 uint16_t idx, n, i; 599 int readable; 600 601 sc = vsc; 602 q = &sc->vss_queues[vq->vq_num - 2]; 603 604 while (vq_has_descs(vq)) { 605 readable = 0; 606 n = vq_getchain(vq, &idx, iov, VTSCSI_MAXSEG, flags); 607 608 /* Count readable descriptors */ 609 for (i = 0; i < n; i++) { 610 if (flags[i] & VRING_DESC_F_WRITE) 611 break; 612 613 readable++; 614 } 615 616 req = calloc(1, sizeof(struct pci_vtscsi_request)); 617 req->vsr_idx = idx; 618 req->vsr_queue = q; 619 req->vsr_niov_in = readable; 620 req->vsr_niov_out = n - readable; 621 memcpy(req->vsr_iov_in, iov, 622 req->vsr_niov_in * sizeof(struct iovec)); 623 memcpy(req->vsr_iov_out, iov + readable, 624 req->vsr_niov_out * sizeof(struct iovec)); 625 626 pthread_mutex_lock(&q->vsq_mtx); 627 STAILQ_INSERT_TAIL(&q->vsq_requests, req, vsr_link); 628 pthread_cond_signal(&q->vsq_cv); 629 pthread_mutex_unlock(&q->vsq_mtx); 630 631 DPRINTF(("virtio-scsi: request <idx=%d> enqueued", idx)); 632 } 633 } 634 635 static int 636 pci_vtscsi_init_queue(struct pci_vtscsi_softc *sc, 637 struct pci_vtscsi_queue *queue, int num) 638 { 639 struct pci_vtscsi_worker *worker; 640 char tname[MAXCOMLEN + 1]; 641 int i; 642 643 queue->vsq_sc = sc; 644 queue->vsq_vq = &sc->vss_vq[num + 2]; 645 646 pthread_mutex_init(&queue->vsq_mtx, NULL); 647 pthread_mutex_init(&queue->vsq_qmtx, NULL); 648 pthread_cond_init(&queue->vsq_cv, NULL); 649 STAILQ_INIT(&queue->vsq_requests); 650 LIST_INIT(&queue->vsq_workers); 651 652 for (i = 0; i < VTSCSI_THR_PER_Q; i++) { 653 worker = calloc(1, sizeof(struct pci_vtscsi_worker)); 654 worker->vsw_queue = queue; 655 656 pthread_create(&worker->vsw_thread, NULL, &pci_vtscsi_proc, 657 (void *)worker); 658 659 snprintf(tname, sizeof(tname), "vtscsi:%d-%d", num, i); 660 pthread_set_name_np(worker->vsw_thread, tname); 661 LIST_INSERT_HEAD(&queue->vsq_workers, worker, vsw_link); 662 } 663 664 return (0); 665 } 666 667 static int 668 pci_vtscsi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 669 { 670 struct pci_vtscsi_softc *sc; 671 char *opt, *optname; 672 const char *devname; 673 int i, optidx = 0; 674 675 sc = calloc(1, sizeof(struct pci_vtscsi_softc)); 676 devname = "/dev/cam/ctl"; 677 while ((opt = strsep(&opts, ",")) != NULL) { 678 optname = strsep(&opt, "="); 679 if (opt == NULL && optidx == 0) { 680 if (optname[0] != 0) 681 devname = optname; 682 } else if (strcmp(optname, "dev") == 0 && opt != NULL) { 683 devname = opt; 684 } else if (strcmp(optname, "iid") == 0 && opt != NULL) { 685 sc->vss_iid = strtoul(opt, NULL, 10); 686 } else { 687 EPRINTLN("Invalid option %s", optname); 688 free(sc); 689 return (1); 690 } 691 optidx++; 692 } 693 694 sc->vss_ctl_fd = open(devname, O_RDWR); 695 if (sc->vss_ctl_fd < 0) { 696 WPRINTF(("cannot open %s: %s", devname, strerror(errno))); 697 free(sc); 698 return (1); 699 } 700 701 vi_softc_linkup(&sc->vss_vs, &vtscsi_vi_consts, sc, pi, sc->vss_vq); 702 sc->vss_vs.vs_mtx = &sc->vss_mtx; 703 704 /* controlq */ 705 sc->vss_vq[0].vq_qsize = VTSCSI_RINGSZ; 706 sc->vss_vq[0].vq_notify = pci_vtscsi_controlq_notify; 707 708 /* eventq */ 709 sc->vss_vq[1].vq_qsize = VTSCSI_RINGSZ; 710 sc->vss_vq[1].vq_notify = pci_vtscsi_eventq_notify; 711 712 /* request queues */ 713 for (i = 2; i < VTSCSI_MAXQ; i++) { 714 sc->vss_vq[i].vq_qsize = VTSCSI_RINGSZ; 715 sc->vss_vq[i].vq_notify = pci_vtscsi_requestq_notify; 716 pci_vtscsi_init_queue(sc, &sc->vss_queues[i - 2], i - 2); 717 } 718 719 /* initialize config space */ 720 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_SCSI); 721 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); 722 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE); 723 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_SCSI); 724 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); 725 726 if (vi_intr_init(&sc->vss_vs, 1, fbsdrun_virtio_msix())) 727 return (1); 728 vi_set_io_bar(&sc->vss_vs, 0); 729 730 return (0); 731 } 732 733 734 struct pci_devemu pci_de_vscsi = { 735 .pe_emu = "virtio-scsi", 736 .pe_init = pci_vtscsi_init, 737 .pe_barwrite = vi_pci_write, 738 .pe_barread = vi_pci_read 739 }; 740 PCI_EMUL_SET(pci_de_vscsi); 741