1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2016 Jakub Klama <jceel@FreeBSD.org>.
5 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer
13 * in this position and unchanged.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31
32 #include <sys/param.h>
33 #include <sys/linker_set.h>
34 #include <sys/types.h>
35 #include <sys/uio.h>
36 #include <sys/time.h>
37 #include <sys/queue.h>
38 #include <sys/sbuf.h>
39
40 #include <errno.h>
41 #include <fcntl.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <stdbool.h>
45 #include <string.h>
46 #include <unistd.h>
47 #include <assert.h>
48 #include <pthread.h>
49 #include <pthread_np.h>
50
51 #include <cam/scsi/scsi_all.h>
52 #include <cam/scsi/scsi_message.h>
53 #include <cam/ctl/ctl.h>
54 #include <cam/ctl/ctl_io.h>
55 #include <cam/ctl/ctl_backend.h>
56 #include <cam/ctl/ctl_ioctl.h>
57 #include <cam/ctl/ctl_util.h>
58 #include <cam/ctl/ctl_scsi_all.h>
59 #include <camlib.h>
60
61 #include "bhyverun.h"
62 #include "config.h"
63 #include "debug.h"
64 #include "pci_emul.h"
65 #include "virtio.h"
66 #include "iov.h"
67
68 #define VTSCSI_RINGSZ 64
69 #define VTSCSI_REQUESTQ 1
70 #define VTSCSI_THR_PER_Q 16
71 #define VTSCSI_MAXQ (VTSCSI_REQUESTQ + 2)
72 #define VTSCSI_MAXSEG 64
73
74 #define VTSCSI_IN_HEADER_LEN(_sc) \
75 (sizeof(struct pci_vtscsi_req_cmd_rd) + _sc->vss_config.cdb_size)
76
77 #define VTSCSI_OUT_HEADER_LEN(_sc) \
78 (sizeof(struct pci_vtscsi_req_cmd_wr) + _sc->vss_config.sense_size)
79
80 #define VIRTIO_SCSI_MAX_CHANNEL 0
81 #define VIRTIO_SCSI_MAX_TARGET 0
82 #define VIRTIO_SCSI_MAX_LUN 16383
83
84 #define VIRTIO_SCSI_F_INOUT (1 << 0)
85 #define VIRTIO_SCSI_F_HOTPLUG (1 << 1)
86 #define VIRTIO_SCSI_F_CHANGE (1 << 2)
87
88 static int pci_vtscsi_debug = 0;
89 #define WPRINTF(msg, params...) PRINTLN("virtio-scsi: " msg, ##params)
90 #define DPRINTF(msg, params...) if (pci_vtscsi_debug) WPRINTF(msg, ##params)
91
92 struct pci_vtscsi_config {
93 uint32_t num_queues;
94 uint32_t seg_max;
95 uint32_t max_sectors;
96 uint32_t cmd_per_lun;
97 uint32_t event_info_size;
98 uint32_t sense_size;
99 uint32_t cdb_size;
100 uint16_t max_channel;
101 uint16_t max_target;
102 uint32_t max_lun;
103 } __attribute__((packed));
104
105 struct pci_vtscsi_queue {
106 struct pci_vtscsi_softc * vsq_sc;
107 struct vqueue_info * vsq_vq;
108 pthread_mutex_t vsq_mtx;
109 pthread_mutex_t vsq_qmtx;
110 pthread_cond_t vsq_cv;
111 STAILQ_HEAD(, pci_vtscsi_request) vsq_requests;
112 LIST_HEAD(, pci_vtscsi_worker) vsq_workers;
113 };
114
115 struct pci_vtscsi_worker {
116 struct pci_vtscsi_queue * vsw_queue;
117 pthread_t vsw_thread;
118 bool vsw_exiting;
119 LIST_ENTRY(pci_vtscsi_worker) vsw_link;
120 };
121
122 struct pci_vtscsi_request {
123 struct pci_vtscsi_queue * vsr_queue;
124 struct iovec vsr_iov_in[VTSCSI_MAXSEG];
125 int vsr_niov_in;
126 struct iovec vsr_iov_out[VTSCSI_MAXSEG];
127 int vsr_niov_out;
128 uint32_t vsr_idx;
129 STAILQ_ENTRY(pci_vtscsi_request) vsr_link;
130 };
131
132 /*
133 * Per-device softc
134 */
135 struct pci_vtscsi_softc {
136 struct virtio_softc vss_vs;
137 struct vqueue_info vss_vq[VTSCSI_MAXQ];
138 struct pci_vtscsi_queue vss_queues[VTSCSI_REQUESTQ];
139 pthread_mutex_t vss_mtx;
140 int vss_iid;
141 int vss_ctl_fd;
142 uint32_t vss_features;
143 struct pci_vtscsi_config vss_config;
144 };
145
146 #define VIRTIO_SCSI_T_TMF 0
147 #define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
148 #define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
149 #define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
150 #define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
151 #define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
152 #define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
153 #define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
154 #define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
155
156 /* command-specific response values */
157 #define VIRTIO_SCSI_S_FUNCTION_COMPLETE 0
158 #define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
159 #define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
160
161 struct pci_vtscsi_ctrl_tmf {
162 uint32_t type;
163 uint32_t subtype;
164 uint8_t lun[8];
165 uint64_t id;
166 uint8_t response;
167 } __attribute__((packed));
168
169 #define VIRTIO_SCSI_T_AN_QUERY 1
170 #define VIRTIO_SCSI_EVT_ASYNC_OPERATIONAL_CHANGE 2
171 #define VIRTIO_SCSI_EVT_ASYNC_POWER_MGMT 4
172 #define VIRTIO_SCSI_EVT_ASYNC_EXTERNAL_REQUEST 8
173 #define VIRTIO_SCSI_EVT_ASYNC_MEDIA_CHANGE 16
174 #define VIRTIO_SCSI_EVT_ASYNC_MULTI_HOST 32
175 #define VIRTIO_SCSI_EVT_ASYNC_DEVICE_BUSY 64
176
177 struct pci_vtscsi_ctrl_an {
178 uint32_t type;
179 uint8_t lun[8];
180 uint32_t event_requested;
181 uint32_t event_actual;
182 uint8_t response;
183 } __attribute__((packed));
184
185 /* command-specific response values */
186 #define VIRTIO_SCSI_S_OK 0
187 #define VIRTIO_SCSI_S_OVERRUN 1
188 #define VIRTIO_SCSI_S_ABORTED 2
189 #define VIRTIO_SCSI_S_BAD_TARGET 3
190 #define VIRTIO_SCSI_S_RESET 4
191 #define VIRTIO_SCSI_S_BUSY 5
192 #define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
193 #define VIRTIO_SCSI_S_TARGET_FAILURE 7
194 #define VIRTIO_SCSI_S_NEXUS_FAILURE 8
195 #define VIRTIO_SCSI_S_FAILURE 9
196 #define VIRTIO_SCSI_S_INCORRECT_LUN 12
197
198 /* task_attr */
199 #define VIRTIO_SCSI_S_SIMPLE 0
200 #define VIRTIO_SCSI_S_ORDERED 1
201 #define VIRTIO_SCSI_S_HEAD 2
202 #define VIRTIO_SCSI_S_ACA 3
203
204 struct pci_vtscsi_event {
205 uint32_t event;
206 uint8_t lun[8];
207 uint32_t reason;
208 } __attribute__((packed));
209
210 struct pci_vtscsi_req_cmd_rd {
211 uint8_t lun[8];
212 uint64_t id;
213 uint8_t task_attr;
214 uint8_t prio;
215 uint8_t crn;
216 uint8_t cdb[];
217 } __attribute__((packed));
218
219 struct pci_vtscsi_req_cmd_wr {
220 uint32_t sense_len;
221 uint32_t residual;
222 uint16_t status_qualifier;
223 uint8_t status;
224 uint8_t response;
225 uint8_t sense[];
226 } __attribute__((packed));
227
228 static void *pci_vtscsi_proc(void *);
229 static void pci_vtscsi_reset(void *);
230 static void pci_vtscsi_neg_features(void *, uint64_t);
231 static int pci_vtscsi_cfgread(void *, int, int, uint32_t *);
232 static int pci_vtscsi_cfgwrite(void *, int, int, uint32_t);
233 static inline int pci_vtscsi_get_lun(uint8_t *);
234 static int pci_vtscsi_control_handle(struct pci_vtscsi_softc *, void *, size_t);
235 static int pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *,
236 struct pci_vtscsi_ctrl_tmf *);
237 static int pci_vtscsi_an_handle(struct pci_vtscsi_softc *,
238 struct pci_vtscsi_ctrl_an *);
239 static int pci_vtscsi_request_handle(struct pci_vtscsi_queue *, struct iovec *,
240 int, struct iovec *, int);
241 static void pci_vtscsi_controlq_notify(void *, struct vqueue_info *);
242 static void pci_vtscsi_eventq_notify(void *, struct vqueue_info *);
243 static void pci_vtscsi_requestq_notify(void *, struct vqueue_info *);
244 static int pci_vtscsi_init_queue(struct pci_vtscsi_softc *,
245 struct pci_vtscsi_queue *, int);
246 static int pci_vtscsi_init(struct pci_devinst *, nvlist_t *);
247
248 static struct virtio_consts vtscsi_vi_consts = {
249 .vc_name = "vtscsi",
250 .vc_nvq = VTSCSI_MAXQ,
251 .vc_cfgsize = sizeof(struct pci_vtscsi_config),
252 .vc_reset = pci_vtscsi_reset,
253 .vc_cfgread = pci_vtscsi_cfgread,
254 .vc_cfgwrite = pci_vtscsi_cfgwrite,
255 .vc_apply_features = pci_vtscsi_neg_features,
256 .vc_hv_caps = 0,
257 };
258
259 static void *
pci_vtscsi_proc(void * arg)260 pci_vtscsi_proc(void *arg)
261 {
262 struct pci_vtscsi_worker *worker = (struct pci_vtscsi_worker *)arg;
263 struct pci_vtscsi_queue *q = worker->vsw_queue;
264 struct pci_vtscsi_request *req;
265 int iolen;
266
267 for (;;) {
268 pthread_mutex_lock(&q->vsq_mtx);
269
270 while (STAILQ_EMPTY(&q->vsq_requests)
271 && !worker->vsw_exiting)
272 pthread_cond_wait(&q->vsq_cv, &q->vsq_mtx);
273
274 if (worker->vsw_exiting)
275 break;
276
277 req = STAILQ_FIRST(&q->vsq_requests);
278 STAILQ_REMOVE_HEAD(&q->vsq_requests, vsr_link);
279
280 pthread_mutex_unlock(&q->vsq_mtx);
281 iolen = pci_vtscsi_request_handle(q, req->vsr_iov_in,
282 req->vsr_niov_in, req->vsr_iov_out, req->vsr_niov_out);
283
284 pthread_mutex_lock(&q->vsq_qmtx);
285 vq_relchain(q->vsq_vq, req->vsr_idx, iolen);
286 vq_endchains(q->vsq_vq, 0);
287 pthread_mutex_unlock(&q->vsq_qmtx);
288
289 DPRINTF("request <idx=%d> completed", req->vsr_idx);
290 free(req);
291 }
292
293 pthread_mutex_unlock(&q->vsq_mtx);
294 return (NULL);
295 }
296
297 static void
pci_vtscsi_reset(void * vsc)298 pci_vtscsi_reset(void *vsc)
299 {
300 struct pci_vtscsi_softc *sc;
301
302 sc = vsc;
303
304 DPRINTF("device reset requested");
305 vi_reset_dev(&sc->vss_vs);
306
307 /* initialize config structure */
308 sc->vss_config = (struct pci_vtscsi_config){
309 .num_queues = VTSCSI_REQUESTQ,
310 /* Leave room for the request and the response. */
311 .seg_max = VTSCSI_MAXSEG - 2,
312 .max_sectors = 2,
313 .cmd_per_lun = 1,
314 .event_info_size = sizeof(struct pci_vtscsi_event),
315 .sense_size = 96,
316 .cdb_size = 32,
317 .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
318 .max_target = VIRTIO_SCSI_MAX_TARGET,
319 .max_lun = VIRTIO_SCSI_MAX_LUN
320 };
321 }
322
323 static void
pci_vtscsi_neg_features(void * vsc,uint64_t negotiated_features)324 pci_vtscsi_neg_features(void *vsc, uint64_t negotiated_features)
325 {
326 struct pci_vtscsi_softc *sc = vsc;
327
328 sc->vss_features = negotiated_features;
329 }
330
331 static int
pci_vtscsi_cfgread(void * vsc,int offset,int size,uint32_t * retval)332 pci_vtscsi_cfgread(void *vsc, int offset, int size, uint32_t *retval)
333 {
334 struct pci_vtscsi_softc *sc = vsc;
335 void *ptr;
336
337 ptr = (uint8_t *)&sc->vss_config + offset;
338 memcpy(retval, ptr, size);
339 return (0);
340 }
341
342 static int
pci_vtscsi_cfgwrite(void * vsc __unused,int offset __unused,int size __unused,uint32_t val __unused)343 pci_vtscsi_cfgwrite(void *vsc __unused, int offset __unused, int size __unused,
344 uint32_t val __unused)
345 {
346 return (0);
347 }
348
349 static inline int
pci_vtscsi_get_lun(uint8_t * lun)350 pci_vtscsi_get_lun(uint8_t *lun)
351 {
352
353 return (((lun[2] << 8) | lun[3]) & 0x3fff);
354 }
355
356 static int
pci_vtscsi_control_handle(struct pci_vtscsi_softc * sc,void * buf,size_t bufsize)357 pci_vtscsi_control_handle(struct pci_vtscsi_softc *sc, void *buf,
358 size_t bufsize)
359 {
360 struct pci_vtscsi_ctrl_tmf *tmf;
361 struct pci_vtscsi_ctrl_an *an;
362 uint32_t type;
363
364 if (bufsize < sizeof(uint32_t)) {
365 WPRINTF("ignoring truncated control request");
366 return (0);
367 }
368
369 type = *(uint32_t *)buf;
370
371 if (type == VIRTIO_SCSI_T_TMF) {
372 if (bufsize != sizeof(*tmf)) {
373 WPRINTF("ignoring tmf request with size %zu", bufsize);
374 return (0);
375 }
376 tmf = (struct pci_vtscsi_ctrl_tmf *)buf;
377 return (pci_vtscsi_tmf_handle(sc, tmf));
378 }
379
380 if (type == VIRTIO_SCSI_T_AN_QUERY) {
381 if (bufsize != sizeof(*an)) {
382 WPRINTF("ignoring AN request with size %zu", bufsize);
383 return (0);
384 }
385 an = (struct pci_vtscsi_ctrl_an *)buf;
386 return (pci_vtscsi_an_handle(sc, an));
387 }
388
389 return (0);
390 }
391
392 static int
pci_vtscsi_tmf_handle(struct pci_vtscsi_softc * sc,struct pci_vtscsi_ctrl_tmf * tmf)393 pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *sc,
394 struct pci_vtscsi_ctrl_tmf *tmf)
395 {
396 union ctl_io *io;
397 int err;
398
399 io = ctl_scsi_alloc_io(sc->vss_iid);
400 ctl_scsi_zero_io(io);
401
402 io->io_hdr.io_type = CTL_IO_TASK;
403 io->io_hdr.nexus.initid = sc->vss_iid;
404 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(tmf->lun);
405 io->taskio.tag_type = CTL_TAG_SIMPLE;
406 io->taskio.tag_num = tmf->id;
407 io->io_hdr.flags |= CTL_FLAG_USER_TAG;
408
409 switch (tmf->subtype) {
410 case VIRTIO_SCSI_T_TMF_ABORT_TASK:
411 io->taskio.task_action = CTL_TASK_ABORT_TASK;
412 break;
413
414 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
415 io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
416 break;
417
418 case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
419 io->taskio.task_action = CTL_TASK_CLEAR_ACA;
420 break;
421
422 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
423 io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET;
424 break;
425
426 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
427 io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
428 break;
429
430 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
431 io->taskio.task_action = CTL_TASK_LUN_RESET;
432 break;
433
434 case VIRTIO_SCSI_T_TMF_QUERY_TASK:
435 io->taskio.task_action = CTL_TASK_QUERY_TASK;
436 break;
437
438 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
439 io->taskio.task_action = CTL_TASK_QUERY_TASK_SET;
440 break;
441 }
442
443 if (pci_vtscsi_debug) {
444 struct sbuf *sb = sbuf_new_auto();
445 ctl_io_sbuf(io, sb);
446 sbuf_finish(sb);
447 DPRINTF("%s", sbuf_data(sb));
448 sbuf_delete(sb);
449 }
450
451 err = ioctl(sc->vss_ctl_fd, CTL_IO, io);
452 if (err != 0)
453 WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
454
455 tmf->response = io->taskio.task_status;
456 ctl_scsi_free_io(io);
457 return (1);
458 }
459
460 static int
pci_vtscsi_an_handle(struct pci_vtscsi_softc * sc __unused,struct pci_vtscsi_ctrl_an * an __unused)461 pci_vtscsi_an_handle(struct pci_vtscsi_softc *sc __unused,
462 struct pci_vtscsi_ctrl_an *an __unused)
463 {
464 return (0);
465 }
466
467 static int
pci_vtscsi_request_handle(struct pci_vtscsi_queue * q,struct iovec * iov_in,int niov_in,struct iovec * iov_out,int niov_out)468 pci_vtscsi_request_handle(struct pci_vtscsi_queue *q, struct iovec *iov_in,
469 int niov_in, struct iovec *iov_out, int niov_out)
470 {
471 struct pci_vtscsi_softc *sc = q->vsq_sc;
472 struct pci_vtscsi_req_cmd_rd *cmd_rd = NULL;
473 struct pci_vtscsi_req_cmd_wr *cmd_wr;
474 struct iovec data_iov_in[VTSCSI_MAXSEG], data_iov_out[VTSCSI_MAXSEG];
475 union ctl_io *io;
476 int data_niov_in, data_niov_out;
477 void *ext_data_ptr = NULL;
478 uint32_t ext_data_len = 0, ext_sg_entries = 0;
479 int err, nxferred;
480
481 if (count_iov(iov_out, niov_out) < VTSCSI_OUT_HEADER_LEN(sc)) {
482 WPRINTF("ignoring request with insufficient output");
483 return (0);
484 }
485 if (count_iov(iov_in, niov_in) < VTSCSI_IN_HEADER_LEN(sc)) {
486 WPRINTF("ignoring request with incomplete header");
487 return (0);
488 }
489
490 seek_iov(iov_in, niov_in, data_iov_in, &data_niov_in,
491 VTSCSI_IN_HEADER_LEN(sc));
492 seek_iov(iov_out, niov_out, data_iov_out, &data_niov_out,
493 VTSCSI_OUT_HEADER_LEN(sc));
494
495 truncate_iov(iov_in, &niov_in, VTSCSI_IN_HEADER_LEN(sc));
496 truncate_iov(iov_out, &niov_out, VTSCSI_OUT_HEADER_LEN(sc));
497 iov_to_buf(iov_in, niov_in, (void **)&cmd_rd);
498
499 cmd_wr = calloc(1, VTSCSI_OUT_HEADER_LEN(sc));
500 io = ctl_scsi_alloc_io(sc->vss_iid);
501 ctl_scsi_zero_io(io);
502
503 io->io_hdr.nexus.initid = sc->vss_iid;
504 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(cmd_rd->lun);
505
506 io->io_hdr.io_type = CTL_IO_SCSI;
507
508 if (data_niov_in > 0) {
509 ext_data_ptr = (void *)data_iov_in;
510 ext_sg_entries = data_niov_in;
511 ext_data_len = count_iov(data_iov_in, data_niov_in);
512 io->io_hdr.flags |= CTL_FLAG_DATA_OUT;
513 } else if (data_niov_out > 0) {
514 ext_data_ptr = (void *)data_iov_out;
515 ext_sg_entries = data_niov_out;
516 ext_data_len = count_iov(data_iov_out, data_niov_out);
517 io->io_hdr.flags |= CTL_FLAG_DATA_IN;
518 }
519
520 io->scsiio.sense_len = sc->vss_config.sense_size;
521 io->scsiio.tag_num = cmd_rd->id;
522 io->io_hdr.flags |= CTL_FLAG_USER_TAG;
523 switch (cmd_rd->task_attr) {
524 case VIRTIO_SCSI_S_ORDERED:
525 io->scsiio.tag_type = CTL_TAG_ORDERED;
526 break;
527 case VIRTIO_SCSI_S_HEAD:
528 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
529 break;
530 case VIRTIO_SCSI_S_ACA:
531 io->scsiio.tag_type = CTL_TAG_ACA;
532 break;
533 case VIRTIO_SCSI_S_SIMPLE:
534 default:
535 io->scsiio.tag_type = CTL_TAG_SIMPLE;
536 break;
537 }
538 io->scsiio.ext_sg_entries = ext_sg_entries;
539 io->scsiio.ext_data_ptr = ext_data_ptr;
540 io->scsiio.ext_data_len = ext_data_len;
541 io->scsiio.ext_data_filled = 0;
542 io->scsiio.cdb_len = sc->vss_config.cdb_size;
543 memcpy(io->scsiio.cdb, cmd_rd->cdb, sc->vss_config.cdb_size);
544
545 if (pci_vtscsi_debug) {
546 struct sbuf *sb = sbuf_new_auto();
547 ctl_io_sbuf(io, sb);
548 sbuf_finish(sb);
549 DPRINTF("%s", sbuf_data(sb));
550 sbuf_delete(sb);
551 }
552
553 err = ioctl(sc->vss_ctl_fd, CTL_IO, io);
554 if (err != 0) {
555 WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
556 cmd_wr->response = VIRTIO_SCSI_S_FAILURE;
557 } else {
558 cmd_wr->sense_len = MIN(io->scsiio.sense_len,
559 sc->vss_config.sense_size);
560 cmd_wr->residual = ext_data_len - io->scsiio.ext_data_filled;
561 cmd_wr->status = io->scsiio.scsi_status;
562 cmd_wr->response = VIRTIO_SCSI_S_OK;
563 memcpy(&cmd_wr->sense, &io->scsiio.sense_data,
564 cmd_wr->sense_len);
565 }
566
567 buf_to_iov(cmd_wr, VTSCSI_OUT_HEADER_LEN(sc), iov_out, niov_out, 0);
568 nxferred = VTSCSI_OUT_HEADER_LEN(sc) + io->scsiio.ext_data_filled;
569 free(cmd_rd);
570 free(cmd_wr);
571 ctl_scsi_free_io(io);
572 return (nxferred);
573 }
574
575 static void
pci_vtscsi_controlq_notify(void * vsc,struct vqueue_info * vq)576 pci_vtscsi_controlq_notify(void *vsc, struct vqueue_info *vq)
577 {
578 struct pci_vtscsi_softc *sc;
579 struct iovec iov[VTSCSI_MAXSEG];
580 struct vi_req req;
581 void *buf = NULL;
582 size_t bufsize;
583 int iolen, n;
584
585 sc = vsc;
586
587 while (vq_has_descs(vq)) {
588 n = vq_getchain(vq, iov, VTSCSI_MAXSEG, &req);
589 assert(n >= 1 && n <= VTSCSI_MAXSEG);
590
591 bufsize = iov_to_buf(iov, n, &buf);
592 iolen = pci_vtscsi_control_handle(sc, buf, bufsize);
593 buf_to_iov((uint8_t *)buf + bufsize - iolen, iolen, iov, n,
594 bufsize - iolen);
595
596 /*
597 * Release this chain and handle more
598 */
599 vq_relchain(vq, req.idx, iolen);
600 }
601 vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
602 free(buf);
603 }
604
605 static void
pci_vtscsi_eventq_notify(void * vsc __unused,struct vqueue_info * vq)606 pci_vtscsi_eventq_notify(void *vsc __unused, struct vqueue_info *vq)
607 {
608 vq_kick_disable(vq);
609 }
610
611 static void
pci_vtscsi_requestq_notify(void * vsc,struct vqueue_info * vq)612 pci_vtscsi_requestq_notify(void *vsc, struct vqueue_info *vq)
613 {
614 struct pci_vtscsi_softc *sc;
615 struct pci_vtscsi_queue *q;
616 struct pci_vtscsi_request *req;
617 struct iovec iov[VTSCSI_MAXSEG];
618 struct vi_req vireq;
619 int n;
620
621 sc = vsc;
622 q = &sc->vss_queues[vq->vq_num - 2];
623
624 while (vq_has_descs(vq)) {
625 n = vq_getchain(vq, iov, VTSCSI_MAXSEG, &vireq);
626 assert(n >= 1 && n <= VTSCSI_MAXSEG);
627
628 req = calloc(1, sizeof(struct pci_vtscsi_request));
629 req->vsr_idx = vireq.idx;
630 req->vsr_queue = q;
631 req->vsr_niov_in = vireq.readable;
632 req->vsr_niov_out = vireq.writable;
633 memcpy(req->vsr_iov_in, iov,
634 req->vsr_niov_in * sizeof(struct iovec));
635 memcpy(req->vsr_iov_out, iov + vireq.readable,
636 req->vsr_niov_out * sizeof(struct iovec));
637
638 pthread_mutex_lock(&q->vsq_mtx);
639 STAILQ_INSERT_TAIL(&q->vsq_requests, req, vsr_link);
640 pthread_cond_signal(&q->vsq_cv);
641 pthread_mutex_unlock(&q->vsq_mtx);
642
643 DPRINTF("request <idx=%d> enqueued", vireq.idx);
644 }
645 }
646
647 static int
pci_vtscsi_init_queue(struct pci_vtscsi_softc * sc,struct pci_vtscsi_queue * queue,int num)648 pci_vtscsi_init_queue(struct pci_vtscsi_softc *sc,
649 struct pci_vtscsi_queue *queue, int num)
650 {
651 struct pci_vtscsi_worker *worker;
652 char tname[MAXCOMLEN + 1];
653 int i;
654
655 queue->vsq_sc = sc;
656 queue->vsq_vq = &sc->vss_vq[num + 2];
657
658 pthread_mutex_init(&queue->vsq_mtx, NULL);
659 pthread_mutex_init(&queue->vsq_qmtx, NULL);
660 pthread_cond_init(&queue->vsq_cv, NULL);
661 STAILQ_INIT(&queue->vsq_requests);
662 LIST_INIT(&queue->vsq_workers);
663
664 for (i = 0; i < VTSCSI_THR_PER_Q; i++) {
665 worker = calloc(1, sizeof(struct pci_vtscsi_worker));
666 worker->vsw_queue = queue;
667
668 pthread_create(&worker->vsw_thread, NULL, &pci_vtscsi_proc,
669 (void *)worker);
670
671 snprintf(tname, sizeof(tname), "vtscsi:%d-%d", num, i);
672 pthread_set_name_np(worker->vsw_thread, tname);
673 LIST_INSERT_HEAD(&queue->vsq_workers, worker, vsw_link);
674 }
675
676 return (0);
677 }
678
679 static int
pci_vtscsi_legacy_config(nvlist_t * nvl,const char * opts)680 pci_vtscsi_legacy_config(nvlist_t *nvl, const char *opts)
681 {
682 char *cp, *devname;
683
684 if (opts == NULL)
685 return (0);
686
687 cp = strchr(opts, ',');
688 if (cp == NULL) {
689 set_config_value_node(nvl, "dev", opts);
690 return (0);
691 }
692 devname = strndup(opts, cp - opts);
693 set_config_value_node(nvl, "dev", devname);
694 free(devname);
695 return (pci_parse_legacy_config(nvl, cp + 1));
696 }
697
698 static int
pci_vtscsi_init(struct pci_devinst * pi,nvlist_t * nvl)699 pci_vtscsi_init(struct pci_devinst *pi, nvlist_t *nvl)
700 {
701 struct pci_vtscsi_softc *sc;
702 const char *devname, *value;
703 int i;
704
705 sc = calloc(1, sizeof(struct pci_vtscsi_softc));
706 value = get_config_value_node(nvl, "iid");
707 if (value != NULL)
708 sc->vss_iid = strtoul(value, NULL, 10);
709
710 value = get_config_value_node(nvl, "bootindex");
711 if (value != NULL) {
712 if (pci_emul_add_boot_device(pi, atoi(value))) {
713 EPRINTLN("Invalid bootindex %d", atoi(value));
714 free(sc);
715 return (-1);
716 }
717 }
718
719 devname = get_config_value_node(nvl, "dev");
720 if (devname == NULL)
721 devname = "/dev/cam/ctl";
722 sc->vss_ctl_fd = open(devname, O_RDWR);
723 if (sc->vss_ctl_fd < 0) {
724 WPRINTF("cannot open %s: %s", devname, strerror(errno));
725 free(sc);
726 return (1);
727 }
728
729 pthread_mutex_init(&sc->vss_mtx, NULL);
730
731 vi_softc_linkup(&sc->vss_vs, &vtscsi_vi_consts, sc, pi, sc->vss_vq);
732 sc->vss_vs.vs_mtx = &sc->vss_mtx;
733
734 /* controlq */
735 sc->vss_vq[0].vq_qsize = VTSCSI_RINGSZ;
736 sc->vss_vq[0].vq_notify = pci_vtscsi_controlq_notify;
737
738 /* eventq */
739 sc->vss_vq[1].vq_qsize = VTSCSI_RINGSZ;
740 sc->vss_vq[1].vq_notify = pci_vtscsi_eventq_notify;
741
742 /* request queues */
743 for (i = 2; i < VTSCSI_MAXQ; i++) {
744 sc->vss_vq[i].vq_qsize = VTSCSI_RINGSZ;
745 sc->vss_vq[i].vq_notify = pci_vtscsi_requestq_notify;
746 pci_vtscsi_init_queue(sc, &sc->vss_queues[i - 2], i - 2);
747 }
748
749 /* initialize config space */
750 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_SCSI);
751 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
752 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
753 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_SCSI);
754 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
755
756 if (vi_intr_init(&sc->vss_vs, 1, fbsdrun_virtio_msix()))
757 return (1);
758 vi_set_io_bar(&sc->vss_vs, 0);
759
760 return (0);
761 }
762
763
764 static const struct pci_devemu pci_de_vscsi = {
765 .pe_emu = "virtio-scsi",
766 .pe_init = pci_vtscsi_init,
767 .pe_legacy_config = pci_vtscsi_legacy_config,
768 .pe_barwrite = vi_pci_write,
769 .pe_barread = vi_pci_read
770 };
771 PCI_EMUL_SET(pci_de_vscsi);
772