xref: /linux/drivers/scsi/virtio_scsi.c (revision 88e45067a30918ebb4942120892963e2311330af)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Virtio SCSI HBA driver
4  *
5  * Copyright IBM Corp. 2010
6  * Copyright Red Hat, Inc. 2011
7  *
8  * Authors:
9  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
10  *  Paolo Bonzini   <pbonzini@redhat.com>
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/interrupt.h>
19 #include <linux/virtio.h>
20 #include <linux/virtio_ids.h>
21 #include <linux/virtio_config.h>
22 #include <linux/virtio_scsi.h>
23 #include <linux/cpu.h>
24 #include <linux/blkdev.h>
25 #include <linux/blk-integrity.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_devinfo.h>
31 #include <linux/seqlock.h>
32 
33 #include "sd.h"
34 
35 #define VIRTIO_SCSI_MEMPOOL_SZ 64
36 #define VIRTIO_SCSI_EVENT_LEN 8
37 #define VIRTIO_SCSI_VQ_BASE 2
38 
39 static unsigned int virtscsi_poll_queues;
40 module_param(virtscsi_poll_queues, uint, 0644);
41 MODULE_PARM_DESC(virtscsi_poll_queues,
42 		 "The number of dedicated virtqueues for polling I/O");
43 
44 /* Command queue element */
45 struct virtio_scsi_cmd {
46 	struct scsi_cmnd *sc;
47 	struct completion *comp;
48 	union {
49 		struct virtio_scsi_cmd_req       cmd;
50 		struct virtio_scsi_cmd_req_pi    cmd_pi;
51 		struct virtio_scsi_ctrl_tmf_req  tmf;
52 		struct virtio_scsi_ctrl_an_req   an;
53 	} req;
54 	union {
55 		struct virtio_scsi_cmd_resp      cmd;
56 		struct virtio_scsi_ctrl_tmf_resp tmf;
57 		struct virtio_scsi_ctrl_an_resp  an;
58 		struct virtio_scsi_event         evt;
59 	} resp;
60 } ____cacheline_aligned_in_smp;
61 
62 struct virtio_scsi_event_node {
63 	struct virtio_scsi *vscsi;
64 	struct virtio_scsi_event event;
65 	struct work_struct work;
66 };
67 
68 struct virtio_scsi_vq {
69 	/* Protects vq */
70 	spinlock_t vq_lock;
71 
72 	struct virtqueue *vq;
73 };
74 
75 /* Driver instance state */
76 struct virtio_scsi {
77 	struct virtio_device *vdev;
78 
79 	/* Get some buffers ready for event vq */
80 	struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
81 
82 	u32 num_queues;
83 	int io_queues[HCTX_MAX_TYPES];
84 
85 	struct hlist_node node;
86 
87 	/* Protected by event_vq lock */
88 	bool stop_events;
89 
90 	struct virtio_scsi_vq ctrl_vq;
91 	struct virtio_scsi_vq event_vq;
92 	struct virtio_scsi_vq req_vqs[];
93 };
94 
95 static struct kmem_cache *virtscsi_cmd_cache;
96 static mempool_t *virtscsi_cmd_pool;
97 
virtio_scsi_host(struct virtio_device * vdev)98 static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
99 {
100 	return vdev->priv;
101 }
102 
virtscsi_compute_resid(struct scsi_cmnd * sc,u32 resid)103 static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
104 {
105 	if (resid)
106 		scsi_set_resid(sc, min(resid, scsi_bufflen(sc)));
107 }
108 
109 /*
110  * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
111  *
112  * Called with vq_lock held.
113  */
virtscsi_complete_cmd(struct virtio_scsi * vscsi,void * buf)114 static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
115 {
116 	struct virtio_scsi_cmd *cmd = buf;
117 	struct scsi_cmnd *sc = cmd->sc;
118 	struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
119 
120 	dev_dbg(&sc->device->sdev_gendev,
121 		"cmd %p response %u status %#02x sense_len %u\n",
122 		sc, resp->response, resp->status, resp->sense_len);
123 
124 	sc->result = resp->status;
125 	virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
126 	switch (resp->response) {
127 	case VIRTIO_SCSI_S_OK:
128 		set_host_byte(sc, DID_OK);
129 		break;
130 	case VIRTIO_SCSI_S_OVERRUN:
131 		set_host_byte(sc, DID_ERROR);
132 		break;
133 	case VIRTIO_SCSI_S_ABORTED:
134 		set_host_byte(sc, DID_ABORT);
135 		break;
136 	case VIRTIO_SCSI_S_BAD_TARGET:
137 		set_host_byte(sc, DID_BAD_TARGET);
138 		break;
139 	case VIRTIO_SCSI_S_RESET:
140 		set_host_byte(sc, DID_RESET);
141 		break;
142 	case VIRTIO_SCSI_S_BUSY:
143 		set_host_byte(sc, DID_BUS_BUSY);
144 		break;
145 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
146 		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
147 		break;
148 	case VIRTIO_SCSI_S_TARGET_FAILURE:
149 		set_host_byte(sc, DID_BAD_TARGET);
150 		break;
151 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
152 		set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT);
153 		break;
154 	default:
155 		scmd_printk(KERN_WARNING, sc, "Unknown response %d",
156 			    resp->response);
157 		fallthrough;
158 	case VIRTIO_SCSI_S_FAILURE:
159 		set_host_byte(sc, DID_ERROR);
160 		break;
161 	}
162 
163 	WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
164 		VIRTIO_SCSI_SENSE_SIZE);
165 	if (resp->sense_len) {
166 		memcpy(sc->sense_buffer, resp->sense,
167 		       min_t(u32,
168 			     virtio32_to_cpu(vscsi->vdev, resp->sense_len),
169 			     VIRTIO_SCSI_SENSE_SIZE));
170 	}
171 
172 	scsi_done(sc);
173 }
174 
virtscsi_vq_done(struct virtio_scsi * vscsi,struct virtio_scsi_vq * virtscsi_vq,void (* fn)(struct virtio_scsi * vscsi,void * buf))175 static void virtscsi_vq_done(struct virtio_scsi *vscsi,
176 			     struct virtio_scsi_vq *virtscsi_vq,
177 			     void (*fn)(struct virtio_scsi *vscsi, void *buf))
178 {
179 	void *buf;
180 	unsigned int len;
181 	unsigned long flags;
182 	struct virtqueue *vq = virtscsi_vq->vq;
183 
184 	spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
185 	do {
186 		virtqueue_disable_cb(vq);
187 		while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
188 			fn(vscsi, buf);
189 
190 	} while (!virtqueue_enable_cb(vq));
191 	spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
192 }
193 
virtscsi_req_done(struct virtqueue * vq)194 static void virtscsi_req_done(struct virtqueue *vq)
195 {
196 	struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
197 	struct virtio_scsi *vscsi = shost_priv(sh);
198 	int index = vq->index - VIRTIO_SCSI_VQ_BASE;
199 	struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
200 
201 	virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
202 };
203 
virtscsi_poll_requests(struct virtio_scsi * vscsi)204 static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
205 {
206 	int i, num_vqs;
207 
208 	num_vqs = vscsi->num_queues;
209 	for (i = 0; i < num_vqs; i++)
210 		virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
211 				 virtscsi_complete_cmd);
212 }
213 
virtscsi_complete_free(struct virtio_scsi * vscsi,void * buf)214 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
215 {
216 	struct virtio_scsi_cmd *cmd = buf;
217 
218 	if (cmd->comp)
219 		complete(cmd->comp);
220 }
221 
virtscsi_ctrl_done(struct virtqueue * vq)222 static void virtscsi_ctrl_done(struct virtqueue *vq)
223 {
224 	struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
225 	struct virtio_scsi *vscsi = shost_priv(sh);
226 
227 	virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
228 };
229 
230 static void virtscsi_handle_event(struct work_struct *work);
231 
virtscsi_kick_event(struct virtio_scsi * vscsi,struct virtio_scsi_event_node * event_node)232 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
233 			       struct virtio_scsi_event_node *event_node)
234 {
235 	int err;
236 	struct scatterlist sg;
237 	unsigned long flags;
238 
239 	INIT_WORK(&event_node->work, virtscsi_handle_event);
240 	sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
241 
242 	spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
243 
244 	err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
245 				  GFP_ATOMIC);
246 	if (!err)
247 		virtqueue_kick(vscsi->event_vq.vq);
248 
249 	spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
250 
251 	return err;
252 }
253 
virtscsi_kick_event_all(struct virtio_scsi * vscsi)254 static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
255 {
256 	int i;
257 
258 	for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
259 		vscsi->event_list[i].vscsi = vscsi;
260 		virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
261 	}
262 
263 	return 0;
264 }
265 
virtscsi_cancel_event_work(struct virtio_scsi * vscsi)266 static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
267 {
268 	int i;
269 
270 	/* Stop scheduling work before calling cancel_work_sync.  */
271 	spin_lock_irq(&vscsi->event_vq.vq_lock);
272 	vscsi->stop_events = true;
273 	spin_unlock_irq(&vscsi->event_vq.vq_lock);
274 
275 	for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
276 		cancel_work_sync(&vscsi->event_list[i].work);
277 }
278 
virtscsi_handle_transport_reset(struct virtio_scsi * vscsi,struct virtio_scsi_event * event)279 static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
280 					    struct virtio_scsi_event *event)
281 {
282 	struct scsi_device *sdev;
283 	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
284 	unsigned int target = event->lun[1];
285 	unsigned int lun = (event->lun[2] << 8) | event->lun[3];
286 
287 	switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
288 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
289 		if (lun == 0) {
290 			scsi_scan_target(&shost->shost_gendev, 0, target,
291 					 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
292 		} else {
293 			scsi_add_device(shost, 0, target, lun);
294 		}
295 		break;
296 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
297 		sdev = scsi_device_lookup(shost, 0, target, lun);
298 		if (sdev) {
299 			scsi_remove_device(sdev);
300 			scsi_device_put(sdev);
301 		} else {
302 			pr_err("SCSI device %d 0 %d %d not found\n",
303 				shost->host_no, target, lun);
304 		}
305 		break;
306 	default:
307 		pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
308 	}
309 }
310 
virtscsi_handle_param_change(struct virtio_scsi * vscsi,struct virtio_scsi_event * event)311 static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
312 					 struct virtio_scsi_event *event)
313 {
314 	struct scsi_device *sdev;
315 	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
316 	unsigned int target = event->lun[1];
317 	unsigned int lun = (event->lun[2] << 8) | event->lun[3];
318 	u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
319 	u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
320 
321 	sdev = scsi_device_lookup(shost, 0, target, lun);
322 	if (!sdev) {
323 		pr_err("SCSI device %d 0 %d %d not found\n",
324 			shost->host_no, target, lun);
325 		return;
326 	}
327 
328 	/* Handle "Parameters changed", "Mode parameters changed", and
329 	   "Capacity data has changed".  */
330 	if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
331 		scsi_rescan_device(sdev);
332 
333 	scsi_device_put(sdev);
334 }
335 
virtscsi_rescan_hotunplug(struct virtio_scsi * vscsi)336 static int virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
337 {
338 	struct scsi_device *sdev;
339 	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
340 	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
341 	int result, inquiry_len, inq_result_len = 256;
342 	char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
343 
344 	if (!inq_result)
345 		return -ENOMEM;
346 
347 	shost_for_each_device(sdev, shost) {
348 		inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
349 
350 		memset(scsi_cmd, 0, sizeof(scsi_cmd));
351 		scsi_cmd[0] = INQUIRY;
352 		scsi_cmd[4] = (unsigned char) inquiry_len;
353 
354 		memset(inq_result, 0, inq_result_len);
355 
356 		result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
357 					  inq_result, inquiry_len,
358 					  SD_TIMEOUT, SD_MAX_RETRIES, NULL);
359 
360 		if (result == 0 && inq_result[0] >> 5) {
361 			/* PQ indicates the LUN is not attached */
362 			scsi_remove_device(sdev);
363 		} else if (result > 0 && host_byte(result) == DID_BAD_TARGET) {
364 			/*
365 			 * If all LUNs of a virtio-scsi device are unplugged
366 			 * it will respond with BAD TARGET on any INQUIRY
367 			 * command.
368 			 * Remove the device in this case as well.
369 			 */
370 			scsi_remove_device(sdev);
371 		}
372 	}
373 
374 	kfree(inq_result);
375 	return 0;
376 }
377 
virtscsi_handle_event(struct work_struct * work)378 static void virtscsi_handle_event(struct work_struct *work)
379 {
380 	struct virtio_scsi_event_node *event_node =
381 		container_of(work, struct virtio_scsi_event_node, work);
382 	struct virtio_scsi *vscsi = event_node->vscsi;
383 	struct virtio_scsi_event *event = &event_node->event;
384 
385 	if (event->event &
386 	    cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
387 		int ret;
388 
389 		event->event &= ~cpu_to_virtio32(vscsi->vdev,
390 						   VIRTIO_SCSI_T_EVENTS_MISSED);
391 		ret = virtscsi_rescan_hotunplug(vscsi);
392 		if (ret)
393 			return;
394 		scsi_scan_host(virtio_scsi_host(vscsi->vdev));
395 	}
396 
397 	switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
398 	case VIRTIO_SCSI_T_NO_EVENT:
399 		break;
400 	case VIRTIO_SCSI_T_TRANSPORT_RESET:
401 		virtscsi_handle_transport_reset(vscsi, event);
402 		break;
403 	case VIRTIO_SCSI_T_PARAM_CHANGE:
404 		virtscsi_handle_param_change(vscsi, event);
405 		break;
406 	default:
407 		pr_err("Unsupported virtio scsi event %x\n", event->event);
408 	}
409 	virtscsi_kick_event(vscsi, event_node);
410 }
411 
virtscsi_complete_event(struct virtio_scsi * vscsi,void * buf)412 static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
413 {
414 	struct virtio_scsi_event_node *event_node = buf;
415 
416 	if (!vscsi->stop_events)
417 		queue_work(system_freezable_wq, &event_node->work);
418 }
419 
virtscsi_event_done(struct virtqueue * vq)420 static void virtscsi_event_done(struct virtqueue *vq)
421 {
422 	struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
423 	struct virtio_scsi *vscsi = shost_priv(sh);
424 
425 	virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
426 };
427 
__virtscsi_add_cmd(struct virtqueue * vq,struct virtio_scsi_cmd * cmd,size_t req_size,size_t resp_size)428 static int __virtscsi_add_cmd(struct virtqueue *vq,
429 			    struct virtio_scsi_cmd *cmd,
430 			    size_t req_size, size_t resp_size)
431 {
432 	struct scsi_cmnd *sc = cmd->sc;
433 	struct scatterlist *sgs[6], req, resp;
434 	struct sg_table *out, *in;
435 	unsigned out_num = 0, in_num = 0;
436 
437 	out = in = NULL;
438 
439 	if (sc && sc->sc_data_direction != DMA_NONE) {
440 		if (sc->sc_data_direction != DMA_FROM_DEVICE)
441 			out = &sc->sdb.table;
442 		if (sc->sc_data_direction != DMA_TO_DEVICE)
443 			in = &sc->sdb.table;
444 	}
445 
446 	/* Request header.  */
447 	sg_init_one(&req, &cmd->req, req_size);
448 	sgs[out_num++] = &req;
449 
450 	/* Data-out buffer.  */
451 	if (out) {
452 		/* Place WRITE protection SGLs before Data OUT payload */
453 		if (scsi_prot_sg_count(sc))
454 			sgs[out_num++] = scsi_prot_sglist(sc);
455 		sgs[out_num++] = out->sgl;
456 	}
457 
458 	/* Response header.  */
459 	sg_init_one(&resp, &cmd->resp, resp_size);
460 	sgs[out_num + in_num++] = &resp;
461 
462 	/* Data-in buffer */
463 	if (in) {
464 		/* Place READ protection SGLs before Data IN payload */
465 		if (scsi_prot_sg_count(sc))
466 			sgs[out_num + in_num++] = scsi_prot_sglist(sc);
467 		sgs[out_num + in_num++] = in->sgl;
468 	}
469 
470 	return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
471 }
472 
virtscsi_kick_vq(struct virtio_scsi_vq * vq)473 static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
474 {
475 	bool needs_kick;
476 	unsigned long flags;
477 
478 	spin_lock_irqsave(&vq->vq_lock, flags);
479 	needs_kick = virtqueue_kick_prepare(vq->vq);
480 	spin_unlock_irqrestore(&vq->vq_lock, flags);
481 
482 	if (needs_kick)
483 		virtqueue_notify(vq->vq);
484 }
485 
486 /**
487  * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
488  * @vq		: the struct virtqueue we're talking about
489  * @cmd		: command structure
490  * @req_size	: size of the request buffer
491  * @resp_size	: size of the response buffer
492  * @kick	: whether to kick the virtqueue immediately
493  */
virtscsi_add_cmd(struct virtio_scsi_vq * vq,struct virtio_scsi_cmd * cmd,size_t req_size,size_t resp_size,bool kick)494 static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
495 			     struct virtio_scsi_cmd *cmd,
496 			     size_t req_size, size_t resp_size,
497 			     bool kick)
498 {
499 	unsigned long flags;
500 	int err;
501 	bool needs_kick = false;
502 
503 	spin_lock_irqsave(&vq->vq_lock, flags);
504 	err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
505 	if (!err && kick)
506 		needs_kick = virtqueue_kick_prepare(vq->vq);
507 
508 	spin_unlock_irqrestore(&vq->vq_lock, flags);
509 
510 	if (needs_kick)
511 		virtqueue_notify(vq->vq);
512 	return err;
513 }
514 
virtio_scsi_init_hdr(struct virtio_device * vdev,struct virtio_scsi_cmd_req * cmd,struct scsi_cmnd * sc)515 static void virtio_scsi_init_hdr(struct virtio_device *vdev,
516 				 struct virtio_scsi_cmd_req *cmd,
517 				 struct scsi_cmnd *sc)
518 {
519 	cmd->lun[0] = 1;
520 	cmd->lun[1] = sc->device->id;
521 	cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
522 	cmd->lun[3] = sc->device->lun & 0xff;
523 	cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
524 	cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
525 	cmd->prio = 0;
526 	cmd->crn = 0;
527 }
528 
529 #ifdef CONFIG_BLK_DEV_INTEGRITY
virtio_scsi_init_hdr_pi(struct virtio_device * vdev,struct virtio_scsi_cmd_req_pi * cmd_pi,struct scsi_cmnd * sc)530 static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
531 				    struct virtio_scsi_cmd_req_pi *cmd_pi,
532 				    struct scsi_cmnd *sc)
533 {
534 	struct request *rq = scsi_cmd_to_rq(sc);
535 	struct blk_integrity *bi;
536 
537 	virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
538 
539 	if (!rq || !scsi_prot_sg_count(sc))
540 		return;
541 
542 	bi = blk_get_integrity(rq->q->disk);
543 
544 	if (sc->sc_data_direction == DMA_TO_DEVICE)
545 		cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
546 						      bio_integrity_bytes(bi,
547 							blk_rq_sectors(rq)));
548 	else if (sc->sc_data_direction == DMA_FROM_DEVICE)
549 		cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
550 						     bio_integrity_bytes(bi,
551 							blk_rq_sectors(rq)));
552 }
553 #endif
554 
virtscsi_pick_vq_mq(struct virtio_scsi * vscsi,struct scsi_cmnd * sc)555 static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
556 						  struct scsi_cmnd *sc)
557 {
558 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
559 	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
560 
561 	return &vscsi->req_vqs[hwq];
562 }
563 
virtscsi_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * sc)564 static int virtscsi_queuecommand(struct Scsi_Host *shost,
565 				 struct scsi_cmnd *sc)
566 {
567 	struct virtio_scsi *vscsi = shost_priv(shost);
568 	struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
569 	struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
570 	bool kick;
571 	unsigned long flags;
572 	int req_size;
573 	int ret;
574 
575 	BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
576 
577 	/* TODO: check feature bit and fail if unsupported?  */
578 	BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
579 
580 	dev_dbg(&sc->device->sdev_gendev,
581 		"cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
582 
583 	cmd->sc = sc;
584 
585 	BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
586 
587 #ifdef CONFIG_BLK_DEV_INTEGRITY
588 	if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
589 		virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
590 		memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
591 		req_size = sizeof(cmd->req.cmd_pi);
592 	} else
593 #endif
594 	{
595 		virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
596 		memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
597 		req_size = sizeof(cmd->req.cmd);
598 	}
599 
600 	kick = (sc->flags & SCMD_LAST) != 0;
601 	ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
602 	if (ret == -EIO) {
603 		cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
604 		spin_lock_irqsave(&req_vq->vq_lock, flags);
605 		virtscsi_complete_cmd(vscsi, cmd);
606 		spin_unlock_irqrestore(&req_vq->vq_lock, flags);
607 	} else if (ret != 0) {
608 		return SCSI_MLQUEUE_HOST_BUSY;
609 	}
610 	return 0;
611 }
612 
virtscsi_tmf(struct virtio_scsi * vscsi,struct virtio_scsi_cmd * cmd)613 static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
614 {
615 	DECLARE_COMPLETION_ONSTACK(comp);
616 	int ret = FAILED;
617 
618 	cmd->comp = &comp;
619 	if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
620 			      sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
621 		goto out;
622 
623 	wait_for_completion(&comp);
624 	if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
625 	    cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
626 		ret = SUCCESS;
627 
628 	/*
629 	 * The spec guarantees that all requests related to the TMF have
630 	 * been completed, but the callback might not have run yet if
631 	 * we're using independent interrupts (e.g. MSI).  Poll the
632 	 * virtqueues once.
633 	 *
634 	 * In the abort case, scsi_done() will do nothing, because the
635 	 * command timed out and hence SCMD_STATE_COMPLETE has been set.
636 	 */
637 	virtscsi_poll_requests(vscsi);
638 
639 out:
640 	mempool_free(cmd, virtscsi_cmd_pool);
641 	return ret;
642 }
643 
virtscsi_device_reset(struct scsi_cmnd * sc)644 static int virtscsi_device_reset(struct scsi_cmnd *sc)
645 {
646 	struct virtio_scsi *vscsi = shost_priv(sc->device->host);
647 	struct virtio_scsi_cmd *cmd;
648 
649 	sdev_printk(KERN_INFO, sc->device, "device reset\n");
650 	cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
651 	if (!cmd)
652 		return FAILED;
653 
654 	memset(cmd, 0, sizeof(*cmd));
655 	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
656 		.type = VIRTIO_SCSI_T_TMF,
657 		.subtype = cpu_to_virtio32(vscsi->vdev,
658 					     VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
659 		.lun[0] = 1,
660 		.lun[1] = sc->device->id,
661 		.lun[2] = (sc->device->lun >> 8) | 0x40,
662 		.lun[3] = sc->device->lun & 0xff,
663 	};
664 	return virtscsi_tmf(vscsi, cmd);
665 }
666 
virtscsi_device_alloc(struct scsi_device * sdevice)667 static int virtscsi_device_alloc(struct scsi_device *sdevice)
668 {
669 	/*
670 	 * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
671 	 * may have transfer limits which come from the host SCSI
672 	 * controller or something on the host side other than the
673 	 * target itself.
674 	 *
675 	 * To make this work properly, the hypervisor can adjust the
676 	 * target's VPD information to advertise these limits.  But
677 	 * for that to work, the guest has to look at the VPD pages,
678 	 * which we won't do by default if it is an SPC-2 device, even
679 	 * if it does actually support it.
680 	 *
681 	 * So, set the blist to always try to read the VPD pages.
682 	 */
683 	sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
684 
685 	return 0;
686 }
687 
688 
689 /**
690  * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
691  * @sdev:	Virtscsi target whose queue depth to change
692  * @qdepth:	New queue depth
693  */
virtscsi_change_queue_depth(struct scsi_device * sdev,int qdepth)694 static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
695 {
696 	struct Scsi_Host *shost = sdev->host;
697 	int max_depth = shost->cmd_per_lun;
698 
699 	return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
700 }
701 
virtscsi_abort(struct scsi_cmnd * sc)702 static int virtscsi_abort(struct scsi_cmnd *sc)
703 {
704 	struct virtio_scsi *vscsi = shost_priv(sc->device->host);
705 	struct virtio_scsi_cmd *cmd;
706 
707 	scmd_printk(KERN_INFO, sc, "abort\n");
708 	cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
709 	if (!cmd)
710 		return FAILED;
711 
712 	memset(cmd, 0, sizeof(*cmd));
713 	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
714 		.type = VIRTIO_SCSI_T_TMF,
715 		.subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
716 		.lun[0] = 1,
717 		.lun[1] = sc->device->id,
718 		.lun[2] = (sc->device->lun >> 8) | 0x40,
719 		.lun[3] = sc->device->lun & 0xff,
720 		.tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
721 	};
722 	return virtscsi_tmf(vscsi, cmd);
723 }
724 
virtscsi_map_queues(struct Scsi_Host * shost)725 static void virtscsi_map_queues(struct Scsi_Host *shost)
726 {
727 	struct virtio_scsi *vscsi = shost_priv(shost);
728 	int i, qoff;
729 
730 	for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
731 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
732 
733 		map->nr_queues = vscsi->io_queues[i];
734 		map->queue_offset = qoff;
735 		qoff += map->nr_queues;
736 
737 		if (map->nr_queues == 0)
738 			continue;
739 
740 		/*
741 		 * Regular queues have interrupts and hence CPU affinity is
742 		 * defined by the core virtio code, but polling queues have
743 		 * no interrupts so we let the block layer assign CPU affinity.
744 		 */
745 		if (i == HCTX_TYPE_POLL)
746 			blk_mq_map_queues(map);
747 		else
748 			blk_mq_map_hw_queues(map, &vscsi->vdev->dev, 2);
749 	}
750 }
751 
virtscsi_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)752 static int virtscsi_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
753 {
754 	struct virtio_scsi *vscsi = shost_priv(shost);
755 	struct virtio_scsi_vq *virtscsi_vq = &vscsi->req_vqs[queue_num];
756 	unsigned long flags;
757 	unsigned int len;
758 	int found = 0;
759 	void *buf;
760 
761 	spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
762 
763 	while ((buf = virtqueue_get_buf(virtscsi_vq->vq, &len)) != NULL) {
764 		virtscsi_complete_cmd(vscsi, buf);
765 		found++;
766 	}
767 
768 	spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
769 
770 	return found;
771 }
772 
virtscsi_commit_rqs(struct Scsi_Host * shost,u16 hwq)773 static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
774 {
775 	struct virtio_scsi *vscsi = shost_priv(shost);
776 
777 	virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
778 }
779 
780 /*
781  * The host guarantees to respond to each command, although I/O
782  * latencies might be higher than on bare metal.  Reset the timer
783  * unconditionally to give the host a chance to perform EH.
784  */
virtscsi_eh_timed_out(struct scsi_cmnd * scmnd)785 static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
786 {
787 	return SCSI_EH_RESET_TIMER;
788 }
789 
790 static const struct scsi_host_template virtscsi_host_template = {
791 	.module = THIS_MODULE,
792 	.name = "Virtio SCSI HBA",
793 	.proc_name = "virtio_scsi",
794 	.this_id = -1,
795 	.cmd_size = sizeof(struct virtio_scsi_cmd),
796 	.queuecommand = virtscsi_queuecommand,
797 	.mq_poll = virtscsi_mq_poll,
798 	.commit_rqs = virtscsi_commit_rqs,
799 	.change_queue_depth = virtscsi_change_queue_depth,
800 	.eh_abort_handler = virtscsi_abort,
801 	.eh_device_reset_handler = virtscsi_device_reset,
802 	.eh_timed_out = virtscsi_eh_timed_out,
803 	.sdev_init = virtscsi_device_alloc,
804 
805 	.dma_boundary = UINT_MAX,
806 	.map_queues = virtscsi_map_queues,
807 	.track_queue_depth = 1,
808 };
809 
810 #define virtscsi_config_get(vdev, fld) \
811 	({ \
812 		__virtio_native_type(struct virtio_scsi_config, fld) __val; \
813 		virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
814 		__val; \
815 	})
816 
817 #define virtscsi_config_set(vdev, fld, val) \
818 	do { \
819 		__virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
820 		virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
821 	} while(0)
822 
virtscsi_init_vq(struct virtio_scsi_vq * virtscsi_vq,struct virtqueue * vq)823 static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
824 			     struct virtqueue *vq)
825 {
826 	spin_lock_init(&virtscsi_vq->vq_lock);
827 	virtscsi_vq->vq = vq;
828 }
829 
virtscsi_remove_vqs(struct virtio_device * vdev)830 static void virtscsi_remove_vqs(struct virtio_device *vdev)
831 {
832 	/* Stop all the virtqueues. */
833 	virtio_reset_device(vdev);
834 	vdev->config->del_vqs(vdev);
835 }
836 
virtscsi_init(struct virtio_device * vdev,struct virtio_scsi * vscsi)837 static int virtscsi_init(struct virtio_device *vdev,
838 			 struct virtio_scsi *vscsi)
839 {
840 	int err;
841 	u32 i;
842 	u32 num_vqs, num_poll_vqs, num_req_vqs;
843 	struct virtqueue_info *vqs_info;
844 	struct virtqueue **vqs;
845 	struct irq_affinity desc = { .pre_vectors = 2 };
846 
847 	num_req_vqs = vscsi->num_queues;
848 	num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE;
849 	vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
850 	vqs_info = kcalloc(num_vqs, sizeof(*vqs_info), GFP_KERNEL);
851 
852 	if (!vqs || !vqs_info) {
853 		err = -ENOMEM;
854 		goto out;
855 	}
856 
857 	num_poll_vqs = min_t(unsigned int, virtscsi_poll_queues,
858 			     num_req_vqs - 1);
859 	vscsi->io_queues[HCTX_TYPE_DEFAULT] = num_req_vqs - num_poll_vqs;
860 	vscsi->io_queues[HCTX_TYPE_READ] = 0;
861 	vscsi->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
862 
863 	dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
864 		 vscsi->io_queues[HCTX_TYPE_DEFAULT],
865 		 vscsi->io_queues[HCTX_TYPE_READ],
866 		 vscsi->io_queues[HCTX_TYPE_POLL]);
867 
868 	vqs_info[0].callback = virtscsi_ctrl_done;
869 	vqs_info[0].name = "control";
870 	vqs_info[1].callback = virtscsi_event_done;
871 	vqs_info[1].name = "event";
872 	for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs - num_poll_vqs; i++) {
873 		vqs_info[i].callback = virtscsi_req_done;
874 		vqs_info[i].name = "request";
875 	}
876 
877 	for (; i < num_vqs; i++)
878 		vqs_info[i].name = "request_poll";
879 
880 	/* Discover virtqueues and write information to configuration.  */
881 	err = virtio_find_vqs(vdev, num_vqs, vqs, vqs_info, &desc);
882 	if (err)
883 		goto out;
884 
885 	virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
886 	virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
887 	for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
888 		virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
889 				 vqs[i]);
890 
891 	virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
892 	virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
893 
894 	err = 0;
895 
896 out:
897 	kfree(vqs_info);
898 	kfree(vqs);
899 	if (err)
900 		virtscsi_remove_vqs(vdev);
901 	return err;
902 }
903 
virtscsi_probe(struct virtio_device * vdev)904 static int virtscsi_probe(struct virtio_device *vdev)
905 {
906 	struct Scsi_Host *shost;
907 	struct virtio_scsi *vscsi;
908 	int err;
909 	u32 sg_elems, num_targets;
910 	u32 cmd_per_lun;
911 	u32 num_queues;
912 
913 	if (!vdev->config->get) {
914 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
915 			__func__);
916 		return -EINVAL;
917 	}
918 
919 	/* We need to know how many queues before we allocate. */
920 	num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
921 	num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
922 
923 	num_targets = virtscsi_config_get(vdev, max_target) + 1;
924 
925 	shost = scsi_host_alloc(&virtscsi_host_template,
926 				struct_size(vscsi, req_vqs, num_queues));
927 	if (!shost)
928 		return -ENOMEM;
929 
930 	sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
931 	shost->sg_tablesize = sg_elems;
932 	shost->nr_maps = 1;
933 	vscsi = shost_priv(shost);
934 	vscsi->vdev = vdev;
935 	vscsi->num_queues = num_queues;
936 	vdev->priv = shost;
937 
938 	err = virtscsi_init(vdev, vscsi);
939 	if (err)
940 		goto virtscsi_init_failed;
941 
942 	if (vscsi->io_queues[HCTX_TYPE_POLL])
943 		shost->nr_maps = HCTX_TYPE_POLL + 1;
944 
945 	shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
946 
947 	cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
948 	shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
949 	shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
950 
951 	/* LUNs > 256 are reported with format 1, so they go in the range
952 	 * 16640-32767.
953 	 */
954 	shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
955 	shost->max_id = num_targets;
956 	shost->max_channel = 0;
957 	shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
958 	shost->nr_hw_queues = num_queues;
959 
960 #ifdef CONFIG_BLK_DEV_INTEGRITY
961 	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
962 		int host_prot;
963 
964 		host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
965 			    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
966 			    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
967 
968 		scsi_host_set_prot(shost, host_prot);
969 		scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
970 	}
971 #endif
972 
973 	err = scsi_add_host(shost, &vdev->dev);
974 	if (err)
975 		goto scsi_add_host_failed;
976 
977 	virtio_device_ready(vdev);
978 
979 	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
980 		virtscsi_kick_event_all(vscsi);
981 
982 	scsi_scan_host(shost);
983 	return 0;
984 
985 scsi_add_host_failed:
986 	vdev->config->del_vqs(vdev);
987 virtscsi_init_failed:
988 	scsi_host_put(shost);
989 	return err;
990 }
991 
virtscsi_remove(struct virtio_device * vdev)992 static void virtscsi_remove(struct virtio_device *vdev)
993 {
994 	struct Scsi_Host *shost = virtio_scsi_host(vdev);
995 	struct virtio_scsi *vscsi = shost_priv(shost);
996 
997 	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
998 		virtscsi_cancel_event_work(vscsi);
999 
1000 	scsi_remove_host(shost);
1001 	virtscsi_remove_vqs(vdev);
1002 	scsi_host_put(shost);
1003 }
1004 
1005 #ifdef CONFIG_PM_SLEEP
virtscsi_freeze(struct virtio_device * vdev)1006 static int virtscsi_freeze(struct virtio_device *vdev)
1007 {
1008 	virtscsi_remove_vqs(vdev);
1009 	return 0;
1010 }
1011 
virtscsi_restore(struct virtio_device * vdev)1012 static int virtscsi_restore(struct virtio_device *vdev)
1013 {
1014 	struct Scsi_Host *sh = virtio_scsi_host(vdev);
1015 	struct virtio_scsi *vscsi = shost_priv(sh);
1016 	int err;
1017 
1018 	err = virtscsi_init(vdev, vscsi);
1019 	if (err)
1020 		return err;
1021 
1022 	virtio_device_ready(vdev);
1023 
1024 	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
1025 		virtscsi_kick_event_all(vscsi);
1026 
1027 	return err;
1028 }
1029 #endif
1030 
1031 static struct virtio_device_id id_table[] = {
1032 	{ VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
1033 	{ 0 },
1034 };
1035 
1036 static unsigned int features[] = {
1037 	VIRTIO_SCSI_F_HOTPLUG,
1038 	VIRTIO_SCSI_F_CHANGE,
1039 #ifdef CONFIG_BLK_DEV_INTEGRITY
1040 	VIRTIO_SCSI_F_T10_PI,
1041 #endif
1042 };
1043 
1044 static struct virtio_driver virtio_scsi_driver = {
1045 	.feature_table = features,
1046 	.feature_table_size = ARRAY_SIZE(features),
1047 	.driver.name = KBUILD_MODNAME,
1048 	.id_table = id_table,
1049 	.probe = virtscsi_probe,
1050 #ifdef CONFIG_PM_SLEEP
1051 	.freeze = virtscsi_freeze,
1052 	.restore = virtscsi_restore,
1053 #endif
1054 	.remove = virtscsi_remove,
1055 };
1056 
virtio_scsi_init(void)1057 static int __init virtio_scsi_init(void)
1058 {
1059 	int ret = -ENOMEM;
1060 
1061 	virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
1062 	if (!virtscsi_cmd_cache) {
1063 		pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
1064 		goto error;
1065 	}
1066 
1067 
1068 	virtscsi_cmd_pool =
1069 		mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
1070 					 virtscsi_cmd_cache);
1071 	if (!virtscsi_cmd_pool) {
1072 		pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
1073 		goto error;
1074 	}
1075 	ret = register_virtio_driver(&virtio_scsi_driver);
1076 	if (ret < 0)
1077 		goto error;
1078 
1079 	return 0;
1080 
1081 error:
1082 	mempool_destroy(virtscsi_cmd_pool);
1083 	virtscsi_cmd_pool = NULL;
1084 	kmem_cache_destroy(virtscsi_cmd_cache);
1085 	virtscsi_cmd_cache = NULL;
1086 	return ret;
1087 }
1088 
virtio_scsi_fini(void)1089 static void __exit virtio_scsi_fini(void)
1090 {
1091 	unregister_virtio_driver(&virtio_scsi_driver);
1092 	mempool_destroy(virtscsi_cmd_pool);
1093 	kmem_cache_destroy(virtscsi_cmd_cache);
1094 }
1095 module_init(virtio_scsi_init);
1096 module_exit(virtio_scsi_fini);
1097 
1098 MODULE_DEVICE_TABLE(virtio, id_table);
1099 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
1100 MODULE_LICENSE("GPL");
1101