1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2016 Jakub Klama <jceel@FreeBSD.org>. 5 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer 13 * in this position and unchanged. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/linker_set.h> 36 #include <sys/types.h> 37 #include <sys/uio.h> 38 #include <sys/time.h> 39 #include <sys/queue.h> 40 #include <sys/sbuf.h> 41 42 #include <errno.h> 43 #include <fcntl.h> 44 #include <stdio.h> 45 #include <stdlib.h> 46 #include <stdbool.h> 47 #include <string.h> 48 #include <unistd.h> 49 #include <assert.h> 50 #include <pthread.h> 51 #include <pthread_np.h> 52 53 #include <cam/scsi/scsi_all.h> 54 #include <cam/scsi/scsi_message.h> 55 #include <cam/ctl/ctl.h> 56 #include <cam/ctl/ctl_io.h> 57 #include <cam/ctl/ctl_backend.h> 58 #include <cam/ctl/ctl_ioctl.h> 59 #include <cam/ctl/ctl_util.h> 60 #include <cam/ctl/ctl_scsi_all.h> 61 #include <camlib.h> 62 63 #include "bhyverun.h" 64 #include "config.h" 65 #include "debug.h" 66 #include "pci_emul.h" 67 #include "virtio.h" 68 #include "iov.h" 69 70 #define VTSCSI_RINGSZ 64 71 #define VTSCSI_REQUESTQ 1 72 #define VTSCSI_THR_PER_Q 16 73 #define VTSCSI_MAXQ (VTSCSI_REQUESTQ + 2) 74 #define VTSCSI_MAXSEG 64 75 76 #define VTSCSI_IN_HEADER_LEN(_sc) \ 77 (sizeof(struct pci_vtscsi_req_cmd_rd) + _sc->vss_config.cdb_size) 78 79 #define VTSCSI_OUT_HEADER_LEN(_sc) \ 80 (sizeof(struct pci_vtscsi_req_cmd_wr) + _sc->vss_config.sense_size) 81 82 #define VIRTIO_SCSI_MAX_CHANNEL 0 83 #define VIRTIO_SCSI_MAX_TARGET 0 84 #define VIRTIO_SCSI_MAX_LUN 16383 85 86 #define VIRTIO_SCSI_F_INOUT (1 << 0) 87 #define VIRTIO_SCSI_F_HOTPLUG (1 << 1) 88 #define VIRTIO_SCSI_F_CHANGE (1 << 2) 89 90 static int pci_vtscsi_debug = 0; 91 #define DPRINTF(params) if (pci_vtscsi_debug) PRINTLN params 92 #define WPRINTF(params) PRINTLN params 93 94 struct pci_vtscsi_config { 95 uint32_t num_queues; 96 uint32_t seg_max; 97 uint32_t max_sectors; 98 uint32_t cmd_per_lun; 99 uint32_t event_info_size; 100 uint32_t sense_size; 101 uint32_t cdb_size; 102 uint16_t max_channel; 103 uint16_t max_target; 104 uint32_t max_lun; 105 } __attribute__((packed)); 106 107 struct pci_vtscsi_queue { 108 struct pci_vtscsi_softc * vsq_sc; 109 struct vqueue_info * vsq_vq; 110 pthread_mutex_t vsq_mtx; 111 pthread_mutex_t vsq_qmtx; 112 pthread_cond_t vsq_cv; 113 STAILQ_HEAD(, pci_vtscsi_request) vsq_requests; 114 LIST_HEAD(, pci_vtscsi_worker) vsq_workers; 115 }; 116 117 struct pci_vtscsi_worker { 118 struct pci_vtscsi_queue * vsw_queue; 119 pthread_t vsw_thread; 120 bool vsw_exiting; 121 LIST_ENTRY(pci_vtscsi_worker) vsw_link; 122 }; 123 124 struct pci_vtscsi_request { 125 struct pci_vtscsi_queue * vsr_queue; 126 struct iovec vsr_iov_in[VTSCSI_MAXSEG]; 127 int vsr_niov_in; 128 struct iovec vsr_iov_out[VTSCSI_MAXSEG]; 129 int vsr_niov_out; 130 uint32_t vsr_idx; 131 STAILQ_ENTRY(pci_vtscsi_request) vsr_link; 132 }; 133 134 /* 135 * Per-device softc 136 */ 137 struct pci_vtscsi_softc { 138 struct virtio_softc vss_vs; 139 struct vqueue_info vss_vq[VTSCSI_MAXQ]; 140 struct pci_vtscsi_queue vss_queues[VTSCSI_REQUESTQ]; 141 pthread_mutex_t vss_mtx; 142 int vss_iid; 143 int vss_ctl_fd; 144 uint32_t vss_features; 145 struct pci_vtscsi_config vss_config; 146 }; 147 148 #define VIRTIO_SCSI_T_TMF 0 149 #define VIRTIO_SCSI_T_TMF_ABORT_TASK 0 150 #define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1 151 #define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2 152 #define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3 153 #define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4 154 #define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5 155 #define VIRTIO_SCSI_T_TMF_QUERY_TASK 6 156 #define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7 157 158 /* command-specific response values */ 159 #define VIRTIO_SCSI_S_FUNCTION_COMPLETE 0 160 #define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10 161 #define VIRTIO_SCSI_S_FUNCTION_REJECTED 11 162 163 struct pci_vtscsi_ctrl_tmf { 164 uint32_t type; 165 uint32_t subtype; 166 uint8_t lun[8]; 167 uint64_t id; 168 uint8_t response; 169 } __attribute__((packed)); 170 171 #define VIRTIO_SCSI_T_AN_QUERY 1 172 #define VIRTIO_SCSI_EVT_ASYNC_OPERATIONAL_CHANGE 2 173 #define VIRTIO_SCSI_EVT_ASYNC_POWER_MGMT 4 174 #define VIRTIO_SCSI_EVT_ASYNC_EXTERNAL_REQUEST 8 175 #define VIRTIO_SCSI_EVT_ASYNC_MEDIA_CHANGE 16 176 #define VIRTIO_SCSI_EVT_ASYNC_MULTI_HOST 32 177 #define VIRTIO_SCSI_EVT_ASYNC_DEVICE_BUSY 64 178 179 struct pci_vtscsi_ctrl_an { 180 uint32_t type; 181 uint8_t lun[8]; 182 uint32_t event_requested; 183 uint32_t event_actual; 184 uint8_t response; 185 } __attribute__((packed)); 186 187 /* command-specific response values */ 188 #define VIRTIO_SCSI_S_OK 0 189 #define VIRTIO_SCSI_S_OVERRUN 1 190 #define VIRTIO_SCSI_S_ABORTED 2 191 #define VIRTIO_SCSI_S_BAD_TARGET 3 192 #define VIRTIO_SCSI_S_RESET 4 193 #define VIRTIO_SCSI_S_BUSY 5 194 #define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6 195 #define VIRTIO_SCSI_S_TARGET_FAILURE 7 196 #define VIRTIO_SCSI_S_NEXUS_FAILURE 8 197 #define VIRTIO_SCSI_S_FAILURE 9 198 #define VIRTIO_SCSI_S_INCORRECT_LUN 12 199 200 /* task_attr */ 201 #define VIRTIO_SCSI_S_SIMPLE 0 202 #define VIRTIO_SCSI_S_ORDERED 1 203 #define VIRTIO_SCSI_S_HEAD 2 204 #define VIRTIO_SCSI_S_ACA 3 205 206 struct pci_vtscsi_event { 207 uint32_t event; 208 uint8_t lun[8]; 209 uint32_t reason; 210 } __attribute__((packed)); 211 212 struct pci_vtscsi_req_cmd_rd { 213 uint8_t lun[8]; 214 uint64_t id; 215 uint8_t task_attr; 216 uint8_t prio; 217 uint8_t crn; 218 uint8_t cdb[]; 219 } __attribute__((packed)); 220 221 struct pci_vtscsi_req_cmd_wr { 222 uint32_t sense_len; 223 uint32_t residual; 224 uint16_t status_qualifier; 225 uint8_t status; 226 uint8_t response; 227 uint8_t sense[]; 228 } __attribute__((packed)); 229 230 static void *pci_vtscsi_proc(void *); 231 static void pci_vtscsi_reset(void *); 232 static void pci_vtscsi_neg_features(void *, uint64_t); 233 static int pci_vtscsi_cfgread(void *, int, int, uint32_t *); 234 static int pci_vtscsi_cfgwrite(void *, int, int, uint32_t); 235 static inline int pci_vtscsi_get_lun(uint8_t *); 236 static int pci_vtscsi_control_handle(struct pci_vtscsi_softc *, void *, size_t); 237 static int pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *, 238 struct pci_vtscsi_ctrl_tmf *); 239 static int pci_vtscsi_an_handle(struct pci_vtscsi_softc *, 240 struct pci_vtscsi_ctrl_an *); 241 static int pci_vtscsi_request_handle(struct pci_vtscsi_queue *, struct iovec *, 242 int, struct iovec *, int); 243 static void pci_vtscsi_controlq_notify(void *, struct vqueue_info *); 244 static void pci_vtscsi_eventq_notify(void *, struct vqueue_info *); 245 static void pci_vtscsi_requestq_notify(void *, struct vqueue_info *); 246 static int pci_vtscsi_init_queue(struct pci_vtscsi_softc *, 247 struct pci_vtscsi_queue *, int); 248 static int pci_vtscsi_init(struct vmctx *, struct pci_devinst *, nvlist_t *); 249 250 static struct virtio_consts vtscsi_vi_consts = { 251 "vtscsi", /* our name */ 252 VTSCSI_MAXQ, /* we support 2+n virtqueues */ 253 sizeof(struct pci_vtscsi_config), /* config reg size */ 254 pci_vtscsi_reset, /* reset */ 255 NULL, /* device-wide qnotify */ 256 pci_vtscsi_cfgread, /* read virtio config */ 257 pci_vtscsi_cfgwrite, /* write virtio config */ 258 pci_vtscsi_neg_features, /* apply negotiated features */ 259 0, /* our capabilities */ 260 }; 261 262 static void * 263 pci_vtscsi_proc(void *arg) 264 { 265 struct pci_vtscsi_worker *worker = (struct pci_vtscsi_worker *)arg; 266 struct pci_vtscsi_queue *q = worker->vsw_queue; 267 struct pci_vtscsi_request *req; 268 int iolen; 269 270 for (;;) { 271 pthread_mutex_lock(&q->vsq_mtx); 272 273 while (STAILQ_EMPTY(&q->vsq_requests) 274 && !worker->vsw_exiting) 275 pthread_cond_wait(&q->vsq_cv, &q->vsq_mtx); 276 277 if (worker->vsw_exiting) 278 break; 279 280 req = STAILQ_FIRST(&q->vsq_requests); 281 STAILQ_REMOVE_HEAD(&q->vsq_requests, vsr_link); 282 283 pthread_mutex_unlock(&q->vsq_mtx); 284 iolen = pci_vtscsi_request_handle(q, req->vsr_iov_in, 285 req->vsr_niov_in, req->vsr_iov_out, req->vsr_niov_out); 286 287 pthread_mutex_lock(&q->vsq_qmtx); 288 vq_relchain(q->vsq_vq, req->vsr_idx, iolen); 289 vq_endchains(q->vsq_vq, 0); 290 pthread_mutex_unlock(&q->vsq_qmtx); 291 292 DPRINTF(("virtio-scsi: request <idx=%d> completed", 293 req->vsr_idx)); 294 free(req); 295 } 296 297 pthread_mutex_unlock(&q->vsq_mtx); 298 return (NULL); 299 } 300 301 static void 302 pci_vtscsi_reset(void *vsc) 303 { 304 struct pci_vtscsi_softc *sc; 305 306 sc = vsc; 307 308 DPRINTF(("vtscsi: device reset requested")); 309 vi_reset_dev(&sc->vss_vs); 310 311 /* initialize config structure */ 312 sc->vss_config = (struct pci_vtscsi_config){ 313 .num_queues = VTSCSI_REQUESTQ, 314 /* Leave room for the request and the response. */ 315 .seg_max = VTSCSI_MAXSEG - 2, 316 .max_sectors = 2, 317 .cmd_per_lun = 1, 318 .event_info_size = sizeof(struct pci_vtscsi_event), 319 .sense_size = 96, 320 .cdb_size = 32, 321 .max_channel = VIRTIO_SCSI_MAX_CHANNEL, 322 .max_target = VIRTIO_SCSI_MAX_TARGET, 323 .max_lun = VIRTIO_SCSI_MAX_LUN 324 }; 325 } 326 327 static void 328 pci_vtscsi_neg_features(void *vsc, uint64_t negotiated_features) 329 { 330 struct pci_vtscsi_softc *sc = vsc; 331 332 sc->vss_features = negotiated_features; 333 } 334 335 static int 336 pci_vtscsi_cfgread(void *vsc, int offset, int size, uint32_t *retval) 337 { 338 struct pci_vtscsi_softc *sc = vsc; 339 void *ptr; 340 341 ptr = (uint8_t *)&sc->vss_config + offset; 342 memcpy(retval, ptr, size); 343 return (0); 344 } 345 346 static int 347 pci_vtscsi_cfgwrite(void *vsc, int offset, int size, uint32_t val) 348 { 349 350 return (0); 351 } 352 353 static inline int 354 pci_vtscsi_get_lun(uint8_t *lun) 355 { 356 357 return (((lun[2] << 8) | lun[3]) & 0x3fff); 358 } 359 360 static int 361 pci_vtscsi_control_handle(struct pci_vtscsi_softc *sc, void *buf, 362 size_t bufsize) 363 { 364 struct pci_vtscsi_ctrl_tmf *tmf; 365 struct pci_vtscsi_ctrl_an *an; 366 uint32_t type; 367 368 type = *(uint32_t *)buf; 369 370 if (type == VIRTIO_SCSI_T_TMF) { 371 tmf = (struct pci_vtscsi_ctrl_tmf *)buf; 372 return (pci_vtscsi_tmf_handle(sc, tmf)); 373 } 374 375 if (type == VIRTIO_SCSI_T_AN_QUERY) { 376 an = (struct pci_vtscsi_ctrl_an *)buf; 377 return (pci_vtscsi_an_handle(sc, an)); 378 } 379 380 return (0); 381 } 382 383 static int 384 pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *sc, 385 struct pci_vtscsi_ctrl_tmf *tmf) 386 { 387 union ctl_io *io; 388 int err; 389 390 io = ctl_scsi_alloc_io(sc->vss_iid); 391 ctl_scsi_zero_io(io); 392 393 io->io_hdr.io_type = CTL_IO_TASK; 394 io->io_hdr.nexus.initid = sc->vss_iid; 395 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(tmf->lun); 396 io->taskio.tag_type = CTL_TAG_SIMPLE; 397 io->taskio.tag_num = (uint32_t)tmf->id; 398 399 switch (tmf->subtype) { 400 case VIRTIO_SCSI_T_TMF_ABORT_TASK: 401 io->taskio.task_action = CTL_TASK_ABORT_TASK; 402 break; 403 404 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET: 405 io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; 406 break; 407 408 case VIRTIO_SCSI_T_TMF_CLEAR_ACA: 409 io->taskio.task_action = CTL_TASK_CLEAR_ACA; 410 break; 411 412 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: 413 io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; 414 break; 415 416 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: 417 io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET; 418 break; 419 420 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: 421 io->taskio.task_action = CTL_TASK_LUN_RESET; 422 break; 423 424 case VIRTIO_SCSI_T_TMF_QUERY_TASK: 425 io->taskio.task_action = CTL_TASK_QUERY_TASK; 426 break; 427 428 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET: 429 io->taskio.task_action = CTL_TASK_QUERY_TASK_SET; 430 break; 431 } 432 433 if (pci_vtscsi_debug) { 434 struct sbuf *sb = sbuf_new_auto(); 435 ctl_io_sbuf(io, sb); 436 sbuf_finish(sb); 437 DPRINTF(("pci_virtio_scsi: %s", sbuf_data(sb))); 438 sbuf_delete(sb); 439 } 440 441 err = ioctl(sc->vss_ctl_fd, CTL_IO, io); 442 if (err != 0) 443 WPRINTF(("CTL_IO: err=%d (%s)", errno, strerror(errno))); 444 445 tmf->response = io->taskio.task_status; 446 ctl_scsi_free_io(io); 447 return (1); 448 } 449 450 static int 451 pci_vtscsi_an_handle(struct pci_vtscsi_softc *sc, 452 struct pci_vtscsi_ctrl_an *an) 453 { 454 455 return (0); 456 } 457 458 static int 459 pci_vtscsi_request_handle(struct pci_vtscsi_queue *q, struct iovec *iov_in, 460 int niov_in, struct iovec *iov_out, int niov_out) 461 { 462 struct pci_vtscsi_softc *sc = q->vsq_sc; 463 struct pci_vtscsi_req_cmd_rd *cmd_rd = NULL; 464 struct pci_vtscsi_req_cmd_wr *cmd_wr; 465 struct iovec data_iov_in[VTSCSI_MAXSEG], data_iov_out[VTSCSI_MAXSEG]; 466 union ctl_io *io; 467 int data_niov_in, data_niov_out; 468 void *ext_data_ptr = NULL; 469 uint32_t ext_data_len = 0, ext_sg_entries = 0; 470 int err, nxferred; 471 472 seek_iov(iov_in, niov_in, data_iov_in, &data_niov_in, 473 VTSCSI_IN_HEADER_LEN(sc)); 474 seek_iov(iov_out, niov_out, data_iov_out, &data_niov_out, 475 VTSCSI_OUT_HEADER_LEN(sc)); 476 477 truncate_iov(iov_in, &niov_in, VTSCSI_IN_HEADER_LEN(sc)); 478 truncate_iov(iov_out, &niov_out, VTSCSI_OUT_HEADER_LEN(sc)); 479 iov_to_buf(iov_in, niov_in, (void **)&cmd_rd); 480 481 cmd_wr = malloc(VTSCSI_OUT_HEADER_LEN(sc)); 482 io = ctl_scsi_alloc_io(sc->vss_iid); 483 ctl_scsi_zero_io(io); 484 485 io->io_hdr.nexus.initid = sc->vss_iid; 486 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(cmd_rd->lun); 487 488 io->io_hdr.io_type = CTL_IO_SCSI; 489 490 if (data_niov_in > 0) { 491 ext_data_ptr = (void *)data_iov_in; 492 ext_sg_entries = data_niov_in; 493 ext_data_len = count_iov(data_iov_in, data_niov_in); 494 io->io_hdr.flags |= CTL_FLAG_DATA_OUT; 495 } else if (data_niov_out > 0) { 496 ext_data_ptr = (void *)data_iov_out; 497 ext_sg_entries = data_niov_out; 498 ext_data_len = count_iov(data_iov_out, data_niov_out); 499 io->io_hdr.flags |= CTL_FLAG_DATA_IN; 500 } 501 502 io->scsiio.sense_len = sc->vss_config.sense_size; 503 io->scsiio.tag_num = (uint32_t)cmd_rd->id; 504 switch (cmd_rd->task_attr) { 505 case VIRTIO_SCSI_S_ORDERED: 506 io->scsiio.tag_type = CTL_TAG_ORDERED; 507 break; 508 case VIRTIO_SCSI_S_HEAD: 509 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; 510 break; 511 case VIRTIO_SCSI_S_ACA: 512 io->scsiio.tag_type = CTL_TAG_ACA; 513 break; 514 case VIRTIO_SCSI_S_SIMPLE: 515 default: 516 io->scsiio.tag_type = CTL_TAG_SIMPLE; 517 break; 518 } 519 io->scsiio.ext_sg_entries = ext_sg_entries; 520 io->scsiio.ext_data_ptr = ext_data_ptr; 521 io->scsiio.ext_data_len = ext_data_len; 522 io->scsiio.ext_data_filled = 0; 523 io->scsiio.cdb_len = sc->vss_config.cdb_size; 524 memcpy(io->scsiio.cdb, cmd_rd->cdb, sc->vss_config.cdb_size); 525 526 if (pci_vtscsi_debug) { 527 struct sbuf *sb = sbuf_new_auto(); 528 ctl_io_sbuf(io, sb); 529 sbuf_finish(sb); 530 DPRINTF(("pci_virtio_scsi: %s", sbuf_data(sb))); 531 sbuf_delete(sb); 532 } 533 534 err = ioctl(sc->vss_ctl_fd, CTL_IO, io); 535 if (err != 0) { 536 WPRINTF(("CTL_IO: err=%d (%s)", errno, strerror(errno))); 537 cmd_wr->response = VIRTIO_SCSI_S_FAILURE; 538 } else { 539 cmd_wr->sense_len = MIN(io->scsiio.sense_len, 540 sc->vss_config.sense_size); 541 cmd_wr->residual = io->scsiio.residual; 542 cmd_wr->status = io->scsiio.scsi_status; 543 cmd_wr->response = VIRTIO_SCSI_S_OK; 544 memcpy(&cmd_wr->sense, &io->scsiio.sense_data, 545 cmd_wr->sense_len); 546 } 547 548 buf_to_iov(cmd_wr, VTSCSI_OUT_HEADER_LEN(sc), iov_out, niov_out, 0); 549 nxferred = VTSCSI_OUT_HEADER_LEN(sc) + io->scsiio.ext_data_filled; 550 free(cmd_rd); 551 free(cmd_wr); 552 ctl_scsi_free_io(io); 553 return (nxferred); 554 } 555 556 static void 557 pci_vtscsi_controlq_notify(void *vsc, struct vqueue_info *vq) 558 { 559 struct pci_vtscsi_softc *sc; 560 struct iovec iov[VTSCSI_MAXSEG]; 561 struct vi_req req; 562 void *buf = NULL; 563 size_t bufsize; 564 int iolen, n; 565 566 sc = vsc; 567 568 while (vq_has_descs(vq)) { 569 n = vq_getchain(vq, iov, VTSCSI_MAXSEG, &req); 570 assert(n >= 1 && n <= VTSCSI_MAXSEG); 571 572 bufsize = iov_to_buf(iov, n, &buf); 573 iolen = pci_vtscsi_control_handle(sc, buf, bufsize); 574 buf_to_iov(buf + bufsize - iolen, iolen, iov, n, 575 bufsize - iolen); 576 577 /* 578 * Release this chain and handle more 579 */ 580 vq_relchain(vq, req.idx, iolen); 581 } 582 vq_endchains(vq, 1); /* Generate interrupt if appropriate. */ 583 free(buf); 584 } 585 586 static void 587 pci_vtscsi_eventq_notify(void *vsc, struct vqueue_info *vq) 588 { 589 590 vq_kick_disable(vq); 591 } 592 593 static void 594 pci_vtscsi_requestq_notify(void *vsc, struct vqueue_info *vq) 595 { 596 struct pci_vtscsi_softc *sc; 597 struct pci_vtscsi_queue *q; 598 struct pci_vtscsi_request *req; 599 struct iovec iov[VTSCSI_MAXSEG]; 600 struct vi_req vireq; 601 int n; 602 603 sc = vsc; 604 q = &sc->vss_queues[vq->vq_num - 2]; 605 606 while (vq_has_descs(vq)) { 607 n = vq_getchain(vq, iov, VTSCSI_MAXSEG, &vireq); 608 assert(n >= 1 && n <= VTSCSI_MAXSEG); 609 610 req = calloc(1, sizeof(struct pci_vtscsi_request)); 611 req->vsr_idx = vireq.idx; 612 req->vsr_queue = q; 613 req->vsr_niov_in = vireq.readable; 614 req->vsr_niov_out = vireq.writable; 615 memcpy(req->vsr_iov_in, iov, 616 req->vsr_niov_in * sizeof(struct iovec)); 617 memcpy(req->vsr_iov_out, iov + vireq.readable, 618 req->vsr_niov_out * sizeof(struct iovec)); 619 620 pthread_mutex_lock(&q->vsq_mtx); 621 STAILQ_INSERT_TAIL(&q->vsq_requests, req, vsr_link); 622 pthread_cond_signal(&q->vsq_cv); 623 pthread_mutex_unlock(&q->vsq_mtx); 624 625 DPRINTF(("virtio-scsi: request <idx=%d> enqueued", 626 vireq.idx)); 627 } 628 } 629 630 static int 631 pci_vtscsi_init_queue(struct pci_vtscsi_softc *sc, 632 struct pci_vtscsi_queue *queue, int num) 633 { 634 struct pci_vtscsi_worker *worker; 635 char tname[MAXCOMLEN + 1]; 636 int i; 637 638 queue->vsq_sc = sc; 639 queue->vsq_vq = &sc->vss_vq[num + 2]; 640 641 pthread_mutex_init(&queue->vsq_mtx, NULL); 642 pthread_mutex_init(&queue->vsq_qmtx, NULL); 643 pthread_cond_init(&queue->vsq_cv, NULL); 644 STAILQ_INIT(&queue->vsq_requests); 645 LIST_INIT(&queue->vsq_workers); 646 647 for (i = 0; i < VTSCSI_THR_PER_Q; i++) { 648 worker = calloc(1, sizeof(struct pci_vtscsi_worker)); 649 worker->vsw_queue = queue; 650 651 pthread_create(&worker->vsw_thread, NULL, &pci_vtscsi_proc, 652 (void *)worker); 653 654 snprintf(tname, sizeof(tname), "vtscsi:%d-%d", num, i); 655 pthread_set_name_np(worker->vsw_thread, tname); 656 LIST_INSERT_HEAD(&queue->vsq_workers, worker, vsw_link); 657 } 658 659 return (0); 660 } 661 662 static int 663 pci_vtscsi_legacy_config(nvlist_t *nvl, const char *opts) 664 { 665 char *cp, *devname; 666 667 if (opts == NULL) 668 return (0); 669 670 cp = strchr(opts, ','); 671 if (cp == NULL) { 672 set_config_value_node(nvl, "dev", opts); 673 return (0); 674 } 675 devname = strndup(opts, cp - opts); 676 set_config_value_node(nvl, "dev", devname); 677 free(devname); 678 return (pci_parse_legacy_config(nvl, cp + 1)); 679 } 680 681 static int 682 pci_vtscsi_init(struct vmctx *ctx, struct pci_devinst *pi, nvlist_t *nvl) 683 { 684 struct pci_vtscsi_softc *sc; 685 const char *devname, *value; 686 int i; 687 688 sc = calloc(1, sizeof(struct pci_vtscsi_softc)); 689 value = get_config_value_node(nvl, "iid"); 690 if (value != NULL) 691 sc->vss_iid = strtoul(value, NULL, 10); 692 693 devname = get_config_value_node(nvl, "dev"); 694 if (devname == NULL) 695 devname = "/dev/cam/ctl"; 696 sc->vss_ctl_fd = open(devname, O_RDWR); 697 if (sc->vss_ctl_fd < 0) { 698 WPRINTF(("cannot open %s: %s", devname, strerror(errno))); 699 free(sc); 700 return (1); 701 } 702 703 vi_softc_linkup(&sc->vss_vs, &vtscsi_vi_consts, sc, pi, sc->vss_vq); 704 sc->vss_vs.vs_mtx = &sc->vss_mtx; 705 706 /* controlq */ 707 sc->vss_vq[0].vq_qsize = VTSCSI_RINGSZ; 708 sc->vss_vq[0].vq_notify = pci_vtscsi_controlq_notify; 709 710 /* eventq */ 711 sc->vss_vq[1].vq_qsize = VTSCSI_RINGSZ; 712 sc->vss_vq[1].vq_notify = pci_vtscsi_eventq_notify; 713 714 /* request queues */ 715 for (i = 2; i < VTSCSI_MAXQ; i++) { 716 sc->vss_vq[i].vq_qsize = VTSCSI_RINGSZ; 717 sc->vss_vq[i].vq_notify = pci_vtscsi_requestq_notify; 718 pci_vtscsi_init_queue(sc, &sc->vss_queues[i - 2], i - 2); 719 } 720 721 /* initialize config space */ 722 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_SCSI); 723 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); 724 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE); 725 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_SCSI); 726 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); 727 728 if (vi_intr_init(&sc->vss_vs, 1, fbsdrun_virtio_msix())) 729 return (1); 730 vi_set_io_bar(&sc->vss_vs, 0); 731 732 return (0); 733 } 734 735 736 struct pci_devemu pci_de_vscsi = { 737 .pe_emu = "virtio-scsi", 738 .pe_init = pci_vtscsi_init, 739 .pe_legacy_config = pci_vtscsi_legacy_config, 740 .pe_barwrite = vi_pci_write, 741 .pe_barread = vi_pci_read 742 }; 743 PCI_EMUL_SET(pci_de_vscsi); 744