1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2016 Jakub Klama <jceel@FreeBSD.org>.
5 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer
13 * in this position and unchanged.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/linker_set.h>
33 #include <sys/types.h>
34 #include <sys/uio.h>
35 #include <sys/time.h>
36 #include <sys/queue.h>
37 #include <sys/sbuf.h>
38
39 #include <errno.h>
40 #include <fcntl.h>
41 #include <stdio.h>
42 #include <stdlib.h>
43 #include <stdbool.h>
44 #include <string.h>
45 #include <unistd.h>
46 #include <assert.h>
47 #include <pthread.h>
48 #include <pthread_np.h>
49
50 #include <cam/scsi/scsi_all.h>
51 #include <cam/scsi/scsi_message.h>
52 #include <cam/ctl/ctl.h>
53 #include <cam/ctl/ctl_io.h>
54 #include <cam/ctl/ctl_backend.h>
55 #include <cam/ctl/ctl_ioctl.h>
56 #include <cam/ctl/ctl_util.h>
57 #include <cam/ctl/ctl_scsi_all.h>
58 #include <camlib.h>
59
60 #include "bhyverun.h"
61 #include "config.h"
62 #include "debug.h"
63 #include "pci_emul.h"
64 #include "virtio.h"
65 #include "iov.h"
66
67 #define VTSCSI_RINGSZ 64
68 #define VTSCSI_REQUESTQ 1
69 #define VTSCSI_THR_PER_Q 16
70 #define VTSCSI_MAXQ (VTSCSI_REQUESTQ + 2)
71 #define VTSCSI_MAXSEG 64
72
73 #define VTSCSI_IN_HEADER_LEN(_sc) \
74 (sizeof(struct pci_vtscsi_req_cmd_rd) + _sc->vss_config.cdb_size)
75
76 #define VTSCSI_OUT_HEADER_LEN(_sc) \
77 (sizeof(struct pci_vtscsi_req_cmd_wr) + _sc->vss_config.sense_size)
78
79 #define VIRTIO_SCSI_MAX_CHANNEL 0
80 #define VIRTIO_SCSI_MAX_TARGET 0
81 #define VIRTIO_SCSI_MAX_LUN 16383
82
83 #define VIRTIO_SCSI_F_INOUT (1 << 0)
84 #define VIRTIO_SCSI_F_HOTPLUG (1 << 1)
85 #define VIRTIO_SCSI_F_CHANGE (1 << 2)
86
87 static int pci_vtscsi_debug = 0;
88 #define WPRINTF(msg, params...) PRINTLN("virtio-scsi: " msg, ##params)
89 #define DPRINTF(msg, params...) if (pci_vtscsi_debug) WPRINTF(msg, ##params)
90
91 struct pci_vtscsi_config {
92 uint32_t num_queues;
93 uint32_t seg_max;
94 uint32_t max_sectors;
95 uint32_t cmd_per_lun;
96 uint32_t event_info_size;
97 uint32_t sense_size;
98 uint32_t cdb_size;
99 uint16_t max_channel;
100 uint16_t max_target;
101 uint32_t max_lun;
102 } __attribute__((packed));
103
104 struct pci_vtscsi_queue {
105 struct pci_vtscsi_softc * vsq_sc;
106 struct vqueue_info * vsq_vq;
107 pthread_mutex_t vsq_mtx;
108 pthread_mutex_t vsq_qmtx;
109 pthread_cond_t vsq_cv;
110 STAILQ_HEAD(, pci_vtscsi_request) vsq_requests;
111 LIST_HEAD(, pci_vtscsi_worker) vsq_workers;
112 };
113
114 struct pci_vtscsi_worker {
115 struct pci_vtscsi_queue * vsw_queue;
116 pthread_t vsw_thread;
117 bool vsw_exiting;
118 LIST_ENTRY(pci_vtscsi_worker) vsw_link;
119 };
120
121 struct pci_vtscsi_request {
122 struct pci_vtscsi_queue * vsr_queue;
123 struct iovec vsr_iov_in[VTSCSI_MAXSEG];
124 int vsr_niov_in;
125 struct iovec vsr_iov_out[VTSCSI_MAXSEG];
126 int vsr_niov_out;
127 uint32_t vsr_idx;
128 STAILQ_ENTRY(pci_vtscsi_request) vsr_link;
129 };
130
131 /*
132 * Per-device softc
133 */
134 struct pci_vtscsi_softc {
135 struct virtio_softc vss_vs;
136 struct vqueue_info vss_vq[VTSCSI_MAXQ];
137 struct pci_vtscsi_queue vss_queues[VTSCSI_REQUESTQ];
138 pthread_mutex_t vss_mtx;
139 int vss_iid;
140 int vss_ctl_fd;
141 uint32_t vss_features;
142 struct pci_vtscsi_config vss_config;
143 };
144
145 #define VIRTIO_SCSI_T_TMF 0
146 #define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
147 #define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
148 #define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
149 #define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
150 #define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
151 #define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
152 #define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
153 #define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
154
155 /* command-specific response values */
156 #define VIRTIO_SCSI_S_FUNCTION_COMPLETE 0
157 #define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
158 #define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
159
160 struct pci_vtscsi_ctrl_tmf {
161 uint32_t type;
162 uint32_t subtype;
163 uint8_t lun[8];
164 uint64_t id;
165 uint8_t response;
166 } __attribute__((packed));
167
168 #define VIRTIO_SCSI_T_AN_QUERY 1
169 #define VIRTIO_SCSI_EVT_ASYNC_OPERATIONAL_CHANGE 2
170 #define VIRTIO_SCSI_EVT_ASYNC_POWER_MGMT 4
171 #define VIRTIO_SCSI_EVT_ASYNC_EXTERNAL_REQUEST 8
172 #define VIRTIO_SCSI_EVT_ASYNC_MEDIA_CHANGE 16
173 #define VIRTIO_SCSI_EVT_ASYNC_MULTI_HOST 32
174 #define VIRTIO_SCSI_EVT_ASYNC_DEVICE_BUSY 64
175
176 struct pci_vtscsi_ctrl_an {
177 uint32_t type;
178 uint8_t lun[8];
179 uint32_t event_requested;
180 uint32_t event_actual;
181 uint8_t response;
182 } __attribute__((packed));
183
184 /* command-specific response values */
185 #define VIRTIO_SCSI_S_OK 0
186 #define VIRTIO_SCSI_S_OVERRUN 1
187 #define VIRTIO_SCSI_S_ABORTED 2
188 #define VIRTIO_SCSI_S_BAD_TARGET 3
189 #define VIRTIO_SCSI_S_RESET 4
190 #define VIRTIO_SCSI_S_BUSY 5
191 #define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
192 #define VIRTIO_SCSI_S_TARGET_FAILURE 7
193 #define VIRTIO_SCSI_S_NEXUS_FAILURE 8
194 #define VIRTIO_SCSI_S_FAILURE 9
195 #define VIRTIO_SCSI_S_INCORRECT_LUN 12
196
197 /* task_attr */
198 #define VIRTIO_SCSI_S_SIMPLE 0
199 #define VIRTIO_SCSI_S_ORDERED 1
200 #define VIRTIO_SCSI_S_HEAD 2
201 #define VIRTIO_SCSI_S_ACA 3
202
203 struct pci_vtscsi_event {
204 uint32_t event;
205 uint8_t lun[8];
206 uint32_t reason;
207 } __attribute__((packed));
208
209 struct pci_vtscsi_req_cmd_rd {
210 uint8_t lun[8];
211 uint64_t id;
212 uint8_t task_attr;
213 uint8_t prio;
214 uint8_t crn;
215 uint8_t cdb[];
216 } __attribute__((packed));
217
218 struct pci_vtscsi_req_cmd_wr {
219 uint32_t sense_len;
220 uint32_t residual;
221 uint16_t status_qualifier;
222 uint8_t status;
223 uint8_t response;
224 uint8_t sense[];
225 } __attribute__((packed));
226
227 static void *pci_vtscsi_proc(void *);
228 static void pci_vtscsi_reset(void *);
229 static void pci_vtscsi_neg_features(void *, uint64_t);
230 static int pci_vtscsi_cfgread(void *, int, int, uint32_t *);
231 static int pci_vtscsi_cfgwrite(void *, int, int, uint32_t);
232 static inline int pci_vtscsi_get_lun(uint8_t *);
233 static int pci_vtscsi_control_handle(struct pci_vtscsi_softc *, void *, size_t);
234 static int pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *,
235 struct pci_vtscsi_ctrl_tmf *);
236 static int pci_vtscsi_an_handle(struct pci_vtscsi_softc *,
237 struct pci_vtscsi_ctrl_an *);
238 static int pci_vtscsi_request_handle(struct pci_vtscsi_queue *, struct iovec *,
239 int, struct iovec *, int);
240 static void pci_vtscsi_controlq_notify(void *, struct vqueue_info *);
241 static void pci_vtscsi_eventq_notify(void *, struct vqueue_info *);
242 static void pci_vtscsi_requestq_notify(void *, struct vqueue_info *);
243 static int pci_vtscsi_init_queue(struct pci_vtscsi_softc *,
244 struct pci_vtscsi_queue *, int);
245 static int pci_vtscsi_init(struct pci_devinst *, nvlist_t *);
246
247 static struct virtio_consts vtscsi_vi_consts = {
248 .vc_name = "vtscsi",
249 .vc_nvq = VTSCSI_MAXQ,
250 .vc_cfgsize = sizeof(struct pci_vtscsi_config),
251 .vc_reset = pci_vtscsi_reset,
252 .vc_cfgread = pci_vtscsi_cfgread,
253 .vc_cfgwrite = pci_vtscsi_cfgwrite,
254 .vc_apply_features = pci_vtscsi_neg_features,
255 .vc_hv_caps = 0,
256 };
257
258 static void *
pci_vtscsi_proc(void * arg)259 pci_vtscsi_proc(void *arg)
260 {
261 struct pci_vtscsi_worker *worker = (struct pci_vtscsi_worker *)arg;
262 struct pci_vtscsi_queue *q = worker->vsw_queue;
263 struct pci_vtscsi_request *req;
264 int iolen;
265
266 for (;;) {
267 pthread_mutex_lock(&q->vsq_mtx);
268
269 while (STAILQ_EMPTY(&q->vsq_requests)
270 && !worker->vsw_exiting)
271 pthread_cond_wait(&q->vsq_cv, &q->vsq_mtx);
272
273 if (worker->vsw_exiting)
274 break;
275
276 req = STAILQ_FIRST(&q->vsq_requests);
277 STAILQ_REMOVE_HEAD(&q->vsq_requests, vsr_link);
278
279 pthread_mutex_unlock(&q->vsq_mtx);
280 iolen = pci_vtscsi_request_handle(q, req->vsr_iov_in,
281 req->vsr_niov_in, req->vsr_iov_out, req->vsr_niov_out);
282
283 pthread_mutex_lock(&q->vsq_qmtx);
284 vq_relchain(q->vsq_vq, req->vsr_idx, iolen);
285 vq_endchains(q->vsq_vq, 0);
286 pthread_mutex_unlock(&q->vsq_qmtx);
287
288 DPRINTF("request <idx=%d> completed", req->vsr_idx);
289 free(req);
290 }
291
292 pthread_mutex_unlock(&q->vsq_mtx);
293 return (NULL);
294 }
295
296 static void
pci_vtscsi_reset(void * vsc)297 pci_vtscsi_reset(void *vsc)
298 {
299 struct pci_vtscsi_softc *sc;
300
301 sc = vsc;
302
303 DPRINTF("device reset requested");
304 vi_reset_dev(&sc->vss_vs);
305
306 /* initialize config structure */
307 sc->vss_config = (struct pci_vtscsi_config){
308 .num_queues = VTSCSI_REQUESTQ,
309 /* Leave room for the request and the response. */
310 .seg_max = VTSCSI_MAXSEG - 2,
311 .max_sectors = 2,
312 .cmd_per_lun = 1,
313 .event_info_size = sizeof(struct pci_vtscsi_event),
314 .sense_size = 96,
315 .cdb_size = 32,
316 .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
317 .max_target = VIRTIO_SCSI_MAX_TARGET,
318 .max_lun = VIRTIO_SCSI_MAX_LUN
319 };
320 }
321
322 static void
pci_vtscsi_neg_features(void * vsc,uint64_t negotiated_features)323 pci_vtscsi_neg_features(void *vsc, uint64_t negotiated_features)
324 {
325 struct pci_vtscsi_softc *sc = vsc;
326
327 sc->vss_features = negotiated_features;
328 }
329
330 static int
pci_vtscsi_cfgread(void * vsc,int offset,int size,uint32_t * retval)331 pci_vtscsi_cfgread(void *vsc, int offset, int size, uint32_t *retval)
332 {
333 struct pci_vtscsi_softc *sc = vsc;
334 void *ptr;
335
336 ptr = (uint8_t *)&sc->vss_config + offset;
337 memcpy(retval, ptr, size);
338 return (0);
339 }
340
341 static int
pci_vtscsi_cfgwrite(void * vsc __unused,int offset __unused,int size __unused,uint32_t val __unused)342 pci_vtscsi_cfgwrite(void *vsc __unused, int offset __unused, int size __unused,
343 uint32_t val __unused)
344 {
345 return (0);
346 }
347
348 static inline int
pci_vtscsi_get_lun(uint8_t * lun)349 pci_vtscsi_get_lun(uint8_t *lun)
350 {
351
352 return (((lun[2] << 8) | lun[3]) & 0x3fff);
353 }
354
355 static int
pci_vtscsi_control_handle(struct pci_vtscsi_softc * sc,void * buf,size_t bufsize)356 pci_vtscsi_control_handle(struct pci_vtscsi_softc *sc, void *buf,
357 size_t bufsize)
358 {
359 struct pci_vtscsi_ctrl_tmf *tmf;
360 struct pci_vtscsi_ctrl_an *an;
361 uint32_t type;
362
363 if (bufsize < sizeof(uint32_t)) {
364 WPRINTF("ignoring truncated control request");
365 return (0);
366 }
367
368 type = *(uint32_t *)buf;
369
370 if (type == VIRTIO_SCSI_T_TMF) {
371 if (bufsize != sizeof(*tmf)) {
372 WPRINTF("ignoring tmf request with size %zu", bufsize);
373 return (0);
374 }
375 tmf = (struct pci_vtscsi_ctrl_tmf *)buf;
376 return (pci_vtscsi_tmf_handle(sc, tmf));
377 }
378
379 if (type == VIRTIO_SCSI_T_AN_QUERY) {
380 if (bufsize != sizeof(*an)) {
381 WPRINTF("ignoring AN request with size %zu", bufsize);
382 return (0);
383 }
384 an = (struct pci_vtscsi_ctrl_an *)buf;
385 return (pci_vtscsi_an_handle(sc, an));
386 }
387
388 return (0);
389 }
390
391 static int
pci_vtscsi_tmf_handle(struct pci_vtscsi_softc * sc,struct pci_vtscsi_ctrl_tmf * tmf)392 pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *sc,
393 struct pci_vtscsi_ctrl_tmf *tmf)
394 {
395 union ctl_io *io;
396 int err;
397
398 io = ctl_scsi_alloc_io(sc->vss_iid);
399 ctl_scsi_zero_io(io);
400
401 io->io_hdr.io_type = CTL_IO_TASK;
402 io->io_hdr.nexus.initid = sc->vss_iid;
403 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(tmf->lun);
404 io->taskio.tag_type = CTL_TAG_SIMPLE;
405 io->taskio.tag_num = tmf->id;
406 io->io_hdr.flags |= CTL_FLAG_USER_TAG;
407
408 switch (tmf->subtype) {
409 case VIRTIO_SCSI_T_TMF_ABORT_TASK:
410 io->taskio.task_action = CTL_TASK_ABORT_TASK;
411 break;
412
413 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
414 io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
415 break;
416
417 case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
418 io->taskio.task_action = CTL_TASK_CLEAR_ACA;
419 break;
420
421 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
422 io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET;
423 break;
424
425 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
426 io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
427 break;
428
429 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
430 io->taskio.task_action = CTL_TASK_LUN_RESET;
431 break;
432
433 case VIRTIO_SCSI_T_TMF_QUERY_TASK:
434 io->taskio.task_action = CTL_TASK_QUERY_TASK;
435 break;
436
437 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
438 io->taskio.task_action = CTL_TASK_QUERY_TASK_SET;
439 break;
440 }
441
442 if (pci_vtscsi_debug) {
443 struct sbuf *sb = sbuf_new_auto();
444 ctl_io_sbuf(io, sb);
445 sbuf_finish(sb);
446 DPRINTF("%s", sbuf_data(sb));
447 sbuf_delete(sb);
448 }
449
450 err = ioctl(sc->vss_ctl_fd, CTL_IO, io);
451 if (err != 0)
452 WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
453
454 tmf->response = io->taskio.task_status;
455 ctl_scsi_free_io(io);
456 return (1);
457 }
458
459 static int
pci_vtscsi_an_handle(struct pci_vtscsi_softc * sc __unused,struct pci_vtscsi_ctrl_an * an __unused)460 pci_vtscsi_an_handle(struct pci_vtscsi_softc *sc __unused,
461 struct pci_vtscsi_ctrl_an *an __unused)
462 {
463 return (0);
464 }
465
466 static int
pci_vtscsi_request_handle(struct pci_vtscsi_queue * q,struct iovec * iov_in,int niov_in,struct iovec * iov_out,int niov_out)467 pci_vtscsi_request_handle(struct pci_vtscsi_queue *q, struct iovec *iov_in,
468 int niov_in, struct iovec *iov_out, int niov_out)
469 {
470 struct pci_vtscsi_softc *sc = q->vsq_sc;
471 struct pci_vtscsi_req_cmd_rd *cmd_rd = NULL;
472 struct pci_vtscsi_req_cmd_wr *cmd_wr;
473 struct iovec data_iov_in[VTSCSI_MAXSEG], data_iov_out[VTSCSI_MAXSEG];
474 union ctl_io *io;
475 int data_niov_in, data_niov_out;
476 void *ext_data_ptr = NULL;
477 uint32_t ext_data_len = 0, ext_sg_entries = 0;
478 int err, nxferred;
479
480 if (count_iov(iov_out, niov_out) < VTSCSI_OUT_HEADER_LEN(sc)) {
481 WPRINTF("ignoring request with insufficient output");
482 return (0);
483 }
484 if (count_iov(iov_in, niov_in) < VTSCSI_IN_HEADER_LEN(sc)) {
485 WPRINTF("ignoring request with incomplete header");
486 return (0);
487 }
488
489 seek_iov(iov_in, niov_in, data_iov_in, &data_niov_in,
490 VTSCSI_IN_HEADER_LEN(sc));
491 seek_iov(iov_out, niov_out, data_iov_out, &data_niov_out,
492 VTSCSI_OUT_HEADER_LEN(sc));
493
494 truncate_iov(iov_in, &niov_in, VTSCSI_IN_HEADER_LEN(sc));
495 truncate_iov(iov_out, &niov_out, VTSCSI_OUT_HEADER_LEN(sc));
496 iov_to_buf(iov_in, niov_in, (void **)&cmd_rd);
497
498 cmd_wr = calloc(1, VTSCSI_OUT_HEADER_LEN(sc));
499 io = ctl_scsi_alloc_io(sc->vss_iid);
500 ctl_scsi_zero_io(io);
501
502 io->io_hdr.nexus.initid = sc->vss_iid;
503 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(cmd_rd->lun);
504
505 io->io_hdr.io_type = CTL_IO_SCSI;
506
507 if (data_niov_in > 0) {
508 ext_data_ptr = (void *)data_iov_in;
509 ext_sg_entries = data_niov_in;
510 ext_data_len = count_iov(data_iov_in, data_niov_in);
511 io->io_hdr.flags |= CTL_FLAG_DATA_OUT;
512 } else if (data_niov_out > 0) {
513 ext_data_ptr = (void *)data_iov_out;
514 ext_sg_entries = data_niov_out;
515 ext_data_len = count_iov(data_iov_out, data_niov_out);
516 io->io_hdr.flags |= CTL_FLAG_DATA_IN;
517 }
518
519 io->scsiio.sense_len = sc->vss_config.sense_size;
520 io->scsiio.tag_num = cmd_rd->id;
521 io->io_hdr.flags |= CTL_FLAG_USER_TAG;
522 switch (cmd_rd->task_attr) {
523 case VIRTIO_SCSI_S_ORDERED:
524 io->scsiio.tag_type = CTL_TAG_ORDERED;
525 break;
526 case VIRTIO_SCSI_S_HEAD:
527 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
528 break;
529 case VIRTIO_SCSI_S_ACA:
530 io->scsiio.tag_type = CTL_TAG_ACA;
531 break;
532 case VIRTIO_SCSI_S_SIMPLE:
533 default:
534 io->scsiio.tag_type = CTL_TAG_SIMPLE;
535 break;
536 }
537 io->scsiio.ext_sg_entries = ext_sg_entries;
538 io->scsiio.ext_data_ptr = ext_data_ptr;
539 io->scsiio.ext_data_len = ext_data_len;
540 io->scsiio.ext_data_filled = 0;
541 io->scsiio.cdb_len = sc->vss_config.cdb_size;
542 memcpy(io->scsiio.cdb, cmd_rd->cdb, sc->vss_config.cdb_size);
543
544 if (pci_vtscsi_debug) {
545 struct sbuf *sb = sbuf_new_auto();
546 ctl_io_sbuf(io, sb);
547 sbuf_finish(sb);
548 DPRINTF("%s", sbuf_data(sb));
549 sbuf_delete(sb);
550 }
551
552 err = ioctl(sc->vss_ctl_fd, CTL_IO, io);
553 if (err != 0) {
554 WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
555 cmd_wr->response = VIRTIO_SCSI_S_FAILURE;
556 } else {
557 cmd_wr->sense_len = MIN(io->scsiio.sense_len,
558 sc->vss_config.sense_size);
559 cmd_wr->residual = ext_data_len - io->scsiio.ext_data_filled;
560 cmd_wr->status = io->scsiio.scsi_status;
561 cmd_wr->response = VIRTIO_SCSI_S_OK;
562 memcpy(&cmd_wr->sense, &io->scsiio.sense_data,
563 cmd_wr->sense_len);
564 }
565
566 buf_to_iov(cmd_wr, VTSCSI_OUT_HEADER_LEN(sc), iov_out, niov_out, 0);
567 nxferred = VTSCSI_OUT_HEADER_LEN(sc) + io->scsiio.ext_data_filled;
568 free(cmd_rd);
569 free(cmd_wr);
570 ctl_scsi_free_io(io);
571 return (nxferred);
572 }
573
574 static void
pci_vtscsi_controlq_notify(void * vsc,struct vqueue_info * vq)575 pci_vtscsi_controlq_notify(void *vsc, struct vqueue_info *vq)
576 {
577 struct pci_vtscsi_softc *sc;
578 struct iovec iov[VTSCSI_MAXSEG];
579 struct vi_req req;
580 void *buf = NULL;
581 size_t bufsize;
582 int iolen, n;
583
584 sc = vsc;
585
586 while (vq_has_descs(vq)) {
587 n = vq_getchain(vq, iov, VTSCSI_MAXSEG, &req);
588 assert(n >= 1 && n <= VTSCSI_MAXSEG);
589
590 bufsize = iov_to_buf(iov, n, &buf);
591 iolen = pci_vtscsi_control_handle(sc, buf, bufsize);
592 buf_to_iov((uint8_t *)buf + bufsize - iolen, iolen, iov, n,
593 bufsize - iolen);
594
595 /*
596 * Release this chain and handle more
597 */
598 vq_relchain(vq, req.idx, iolen);
599 }
600 vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
601 free(buf);
602 }
603
604 static void
pci_vtscsi_eventq_notify(void * vsc __unused,struct vqueue_info * vq)605 pci_vtscsi_eventq_notify(void *vsc __unused, struct vqueue_info *vq)
606 {
607 vq_kick_disable(vq);
608 }
609
610 static void
pci_vtscsi_requestq_notify(void * vsc,struct vqueue_info * vq)611 pci_vtscsi_requestq_notify(void *vsc, struct vqueue_info *vq)
612 {
613 struct pci_vtscsi_softc *sc;
614 struct pci_vtscsi_queue *q;
615 struct pci_vtscsi_request *req;
616 struct iovec iov[VTSCSI_MAXSEG];
617 struct vi_req vireq;
618 int n;
619
620 sc = vsc;
621 q = &sc->vss_queues[vq->vq_num - 2];
622
623 while (vq_has_descs(vq)) {
624 n = vq_getchain(vq, iov, VTSCSI_MAXSEG, &vireq);
625 assert(n >= 1 && n <= VTSCSI_MAXSEG);
626
627 req = calloc(1, sizeof(struct pci_vtscsi_request));
628 req->vsr_idx = vireq.idx;
629 req->vsr_queue = q;
630 req->vsr_niov_in = vireq.readable;
631 req->vsr_niov_out = vireq.writable;
632 memcpy(req->vsr_iov_in, iov,
633 req->vsr_niov_in * sizeof(struct iovec));
634 memcpy(req->vsr_iov_out, iov + vireq.readable,
635 req->vsr_niov_out * sizeof(struct iovec));
636
637 pthread_mutex_lock(&q->vsq_mtx);
638 STAILQ_INSERT_TAIL(&q->vsq_requests, req, vsr_link);
639 pthread_cond_signal(&q->vsq_cv);
640 pthread_mutex_unlock(&q->vsq_mtx);
641
642 DPRINTF("request <idx=%d> enqueued", vireq.idx);
643 }
644 }
645
646 static int
pci_vtscsi_init_queue(struct pci_vtscsi_softc * sc,struct pci_vtscsi_queue * queue,int num)647 pci_vtscsi_init_queue(struct pci_vtscsi_softc *sc,
648 struct pci_vtscsi_queue *queue, int num)
649 {
650 struct pci_vtscsi_worker *worker;
651 char tname[MAXCOMLEN + 1];
652 int i;
653
654 queue->vsq_sc = sc;
655 queue->vsq_vq = &sc->vss_vq[num + 2];
656
657 pthread_mutex_init(&queue->vsq_mtx, NULL);
658 pthread_mutex_init(&queue->vsq_qmtx, NULL);
659 pthread_cond_init(&queue->vsq_cv, NULL);
660 STAILQ_INIT(&queue->vsq_requests);
661 LIST_INIT(&queue->vsq_workers);
662
663 for (i = 0; i < VTSCSI_THR_PER_Q; i++) {
664 worker = calloc(1, sizeof(struct pci_vtscsi_worker));
665 worker->vsw_queue = queue;
666
667 pthread_create(&worker->vsw_thread, NULL, &pci_vtscsi_proc,
668 (void *)worker);
669
670 snprintf(tname, sizeof(tname), "vtscsi:%d-%d", num, i);
671 pthread_set_name_np(worker->vsw_thread, tname);
672 LIST_INSERT_HEAD(&queue->vsq_workers, worker, vsw_link);
673 }
674
675 return (0);
676 }
677
678 static int
pci_vtscsi_legacy_config(nvlist_t * nvl,const char * opts)679 pci_vtscsi_legacy_config(nvlist_t *nvl, const char *opts)
680 {
681 char *cp, *devname;
682
683 if (opts == NULL)
684 return (0);
685
686 cp = strchr(opts, ',');
687 if (cp == NULL) {
688 set_config_value_node(nvl, "dev", opts);
689 return (0);
690 }
691 devname = strndup(opts, cp - opts);
692 set_config_value_node(nvl, "dev", devname);
693 free(devname);
694 return (pci_parse_legacy_config(nvl, cp + 1));
695 }
696
697 static int
pci_vtscsi_init(struct pci_devinst * pi,nvlist_t * nvl)698 pci_vtscsi_init(struct pci_devinst *pi, nvlist_t *nvl)
699 {
700 struct pci_vtscsi_softc *sc;
701 const char *devname, *value;
702 int i;
703
704 sc = calloc(1, sizeof(struct pci_vtscsi_softc));
705 value = get_config_value_node(nvl, "iid");
706 if (value != NULL)
707 sc->vss_iid = strtoul(value, NULL, 10);
708
709 value = get_config_value_node(nvl, "bootindex");
710 if (value != NULL) {
711 if (pci_emul_add_boot_device(pi, atoi(value))) {
712 EPRINTLN("Invalid bootindex %d", atoi(value));
713 free(sc);
714 return (-1);
715 }
716 }
717
718 devname = get_config_value_node(nvl, "dev");
719 if (devname == NULL)
720 devname = "/dev/cam/ctl";
721 sc->vss_ctl_fd = open(devname, O_RDWR);
722 if (sc->vss_ctl_fd < 0) {
723 WPRINTF("cannot open %s: %s", devname, strerror(errno));
724 free(sc);
725 return (1);
726 }
727
728 pthread_mutex_init(&sc->vss_mtx, NULL);
729
730 vi_softc_linkup(&sc->vss_vs, &vtscsi_vi_consts, sc, pi, sc->vss_vq);
731 sc->vss_vs.vs_mtx = &sc->vss_mtx;
732
733 /* controlq */
734 sc->vss_vq[0].vq_qsize = VTSCSI_RINGSZ;
735 sc->vss_vq[0].vq_notify = pci_vtscsi_controlq_notify;
736
737 /* eventq */
738 sc->vss_vq[1].vq_qsize = VTSCSI_RINGSZ;
739 sc->vss_vq[1].vq_notify = pci_vtscsi_eventq_notify;
740
741 /* request queues */
742 for (i = 2; i < VTSCSI_MAXQ; i++) {
743 sc->vss_vq[i].vq_qsize = VTSCSI_RINGSZ;
744 sc->vss_vq[i].vq_notify = pci_vtscsi_requestq_notify;
745 pci_vtscsi_init_queue(sc, &sc->vss_queues[i - 2], i - 2);
746 }
747
748 /* initialize config space */
749 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_SCSI);
750 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
751 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
752 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_SCSI);
753 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
754
755 if (vi_intr_init(&sc->vss_vs, 1, fbsdrun_virtio_msix()))
756 return (1);
757 vi_set_io_bar(&sc->vss_vs, 0);
758
759 return (0);
760 }
761
762
763 static const struct pci_devemu pci_de_vscsi = {
764 .pe_emu = "virtio-scsi",
765 .pe_init = pci_vtscsi_init,
766 .pe_legacy_config = pci_vtscsi_legacy_config,
767 .pe_barwrite = vi_pci_write,
768 .pe_barread = vi_pci_read
769 };
770 PCI_EMUL_SET(pci_de_vscsi);
771