xref: /linux/drivers/scsi/virtio_scsi.c (revision 9e56ff53b4115875667760445b028357848b4748)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Virtio SCSI HBA driver
4  *
5  * Copyright IBM Corp. 2010
6  * Copyright Red Hat, Inc. 2011
7  *
8  * Authors:
9  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
10  *  Paolo Bonzini   <pbonzini@redhat.com>
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/interrupt.h>
19 #include <linux/virtio.h>
20 #include <linux/virtio_ids.h>
21 #include <linux/virtio_config.h>
22 #include <linux/virtio_scsi.h>
23 #include <linux/cpu.h>
24 #include <linux/blkdev.h>
25 #include <linux/blk-integrity.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_devinfo.h>
31 #include <linux/seqlock.h>
32 #include <linux/blk-mq-virtio.h>
33 
34 #include "sd.h"
35 
36 #define VIRTIO_SCSI_MEMPOOL_SZ 64
37 #define VIRTIO_SCSI_EVENT_LEN 8
38 #define VIRTIO_SCSI_VQ_BASE 2
39 
40 static unsigned int virtscsi_poll_queues;
41 module_param(virtscsi_poll_queues, uint, 0644);
42 MODULE_PARM_DESC(virtscsi_poll_queues,
43 		 "The number of dedicated virtqueues for polling I/O");
44 
45 /* Command queue element */
46 struct virtio_scsi_cmd {
47 	struct scsi_cmnd *sc;
48 	struct completion *comp;
49 	union {
50 		struct virtio_scsi_cmd_req       cmd;
51 		struct virtio_scsi_cmd_req_pi    cmd_pi;
52 		struct virtio_scsi_ctrl_tmf_req  tmf;
53 		struct virtio_scsi_ctrl_an_req   an;
54 	} req;
55 	union {
56 		struct virtio_scsi_cmd_resp      cmd;
57 		struct virtio_scsi_ctrl_tmf_resp tmf;
58 		struct virtio_scsi_ctrl_an_resp  an;
59 		struct virtio_scsi_event         evt;
60 	} resp;
61 } ____cacheline_aligned_in_smp;
62 
63 struct virtio_scsi_event_node {
64 	struct virtio_scsi *vscsi;
65 	struct virtio_scsi_event event;
66 	struct work_struct work;
67 };
68 
69 struct virtio_scsi_vq {
70 	/* Protects vq */
71 	spinlock_t vq_lock;
72 
73 	struct virtqueue *vq;
74 };
75 
76 /* Driver instance state */
77 struct virtio_scsi {
78 	struct virtio_device *vdev;
79 
80 	/* Get some buffers ready for event vq */
81 	struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
82 
83 	u32 num_queues;
84 	int io_queues[HCTX_MAX_TYPES];
85 
86 	struct hlist_node node;
87 
88 	/* Protected by event_vq lock */
89 	bool stop_events;
90 
91 	struct virtio_scsi_vq ctrl_vq;
92 	struct virtio_scsi_vq event_vq;
93 	struct virtio_scsi_vq req_vqs[];
94 };
95 
96 static struct kmem_cache *virtscsi_cmd_cache;
97 static mempool_t *virtscsi_cmd_pool;
98 
99 static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
100 {
101 	return vdev->priv;
102 }
103 
104 static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
105 {
106 	if (resid)
107 		scsi_set_resid(sc, min(resid, scsi_bufflen(sc)));
108 }
109 
110 /*
111  * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
112  *
113  * Called with vq_lock held.
114  */
115 static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
116 {
117 	struct virtio_scsi_cmd *cmd = buf;
118 	struct scsi_cmnd *sc = cmd->sc;
119 	struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
120 
121 	dev_dbg(&sc->device->sdev_gendev,
122 		"cmd %p response %u status %#02x sense_len %u\n",
123 		sc, resp->response, resp->status, resp->sense_len);
124 
125 	sc->result = resp->status;
126 	virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
127 	switch (resp->response) {
128 	case VIRTIO_SCSI_S_OK:
129 		set_host_byte(sc, DID_OK);
130 		break;
131 	case VIRTIO_SCSI_S_OVERRUN:
132 		set_host_byte(sc, DID_ERROR);
133 		break;
134 	case VIRTIO_SCSI_S_ABORTED:
135 		set_host_byte(sc, DID_ABORT);
136 		break;
137 	case VIRTIO_SCSI_S_BAD_TARGET:
138 		set_host_byte(sc, DID_BAD_TARGET);
139 		break;
140 	case VIRTIO_SCSI_S_RESET:
141 		set_host_byte(sc, DID_RESET);
142 		break;
143 	case VIRTIO_SCSI_S_BUSY:
144 		set_host_byte(sc, DID_BUS_BUSY);
145 		break;
146 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
147 		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
148 		break;
149 	case VIRTIO_SCSI_S_TARGET_FAILURE:
150 		set_host_byte(sc, DID_BAD_TARGET);
151 		break;
152 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
153 		set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT);
154 		break;
155 	default:
156 		scmd_printk(KERN_WARNING, sc, "Unknown response %d",
157 			    resp->response);
158 		fallthrough;
159 	case VIRTIO_SCSI_S_FAILURE:
160 		set_host_byte(sc, DID_ERROR);
161 		break;
162 	}
163 
164 	WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
165 		VIRTIO_SCSI_SENSE_SIZE);
166 	if (resp->sense_len) {
167 		memcpy(sc->sense_buffer, resp->sense,
168 		       min_t(u32,
169 			     virtio32_to_cpu(vscsi->vdev, resp->sense_len),
170 			     VIRTIO_SCSI_SENSE_SIZE));
171 	}
172 
173 	scsi_done(sc);
174 }
175 
176 static void virtscsi_vq_done(struct virtio_scsi *vscsi,
177 			     struct virtio_scsi_vq *virtscsi_vq,
178 			     void (*fn)(struct virtio_scsi *vscsi, void *buf))
179 {
180 	void *buf;
181 	unsigned int len;
182 	unsigned long flags;
183 	struct virtqueue *vq = virtscsi_vq->vq;
184 
185 	spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
186 	do {
187 		virtqueue_disable_cb(vq);
188 		while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
189 			fn(vscsi, buf);
190 
191 		if (unlikely(virtqueue_is_broken(vq)))
192 			break;
193 	} while (!virtqueue_enable_cb(vq));
194 	spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
195 }
196 
197 static void virtscsi_req_done(struct virtqueue *vq)
198 {
199 	struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
200 	struct virtio_scsi *vscsi = shost_priv(sh);
201 	int index = vq->index - VIRTIO_SCSI_VQ_BASE;
202 	struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
203 
204 	virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
205 };
206 
207 static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
208 {
209 	int i, num_vqs;
210 
211 	num_vqs = vscsi->num_queues;
212 	for (i = 0; i < num_vqs; i++)
213 		virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
214 				 virtscsi_complete_cmd);
215 }
216 
217 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
218 {
219 	struct virtio_scsi_cmd *cmd = buf;
220 
221 	if (cmd->comp)
222 		complete(cmd->comp);
223 }
224 
225 static void virtscsi_ctrl_done(struct virtqueue *vq)
226 {
227 	struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
228 	struct virtio_scsi *vscsi = shost_priv(sh);
229 
230 	virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
231 };
232 
233 static void virtscsi_handle_event(struct work_struct *work);
234 
235 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
236 			       struct virtio_scsi_event_node *event_node)
237 {
238 	int err;
239 	struct scatterlist sg;
240 	unsigned long flags;
241 
242 	INIT_WORK(&event_node->work, virtscsi_handle_event);
243 	sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
244 
245 	spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
246 
247 	err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
248 				  GFP_ATOMIC);
249 	if (!err)
250 		virtqueue_kick(vscsi->event_vq.vq);
251 
252 	spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
253 
254 	return err;
255 }
256 
257 static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
258 {
259 	int i;
260 
261 	for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
262 		vscsi->event_list[i].vscsi = vscsi;
263 		virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
264 	}
265 
266 	return 0;
267 }
268 
269 static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
270 {
271 	int i;
272 
273 	/* Stop scheduling work before calling cancel_work_sync.  */
274 	spin_lock_irq(&vscsi->event_vq.vq_lock);
275 	vscsi->stop_events = true;
276 	spin_unlock_irq(&vscsi->event_vq.vq_lock);
277 
278 	for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
279 		cancel_work_sync(&vscsi->event_list[i].work);
280 }
281 
282 static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
283 					    struct virtio_scsi_event *event)
284 {
285 	struct scsi_device *sdev;
286 	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
287 	unsigned int target = event->lun[1];
288 	unsigned int lun = (event->lun[2] << 8) | event->lun[3];
289 
290 	switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
291 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
292 		if (lun == 0) {
293 			scsi_scan_target(&shost->shost_gendev, 0, target,
294 					 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
295 		} else {
296 			scsi_add_device(shost, 0, target, lun);
297 		}
298 		break;
299 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
300 		sdev = scsi_device_lookup(shost, 0, target, lun);
301 		if (sdev) {
302 			scsi_remove_device(sdev);
303 			scsi_device_put(sdev);
304 		} else {
305 			pr_err("SCSI device %d 0 %d %d not found\n",
306 				shost->host_no, target, lun);
307 		}
308 		break;
309 	default:
310 		pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
311 	}
312 }
313 
314 static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
315 					 struct virtio_scsi_event *event)
316 {
317 	struct scsi_device *sdev;
318 	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
319 	unsigned int target = event->lun[1];
320 	unsigned int lun = (event->lun[2] << 8) | event->lun[3];
321 	u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
322 	u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
323 
324 	sdev = scsi_device_lookup(shost, 0, target, lun);
325 	if (!sdev) {
326 		pr_err("SCSI device %d 0 %d %d not found\n",
327 			shost->host_no, target, lun);
328 		return;
329 	}
330 
331 	/* Handle "Parameters changed", "Mode parameters changed", and
332 	   "Capacity data has changed".  */
333 	if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
334 		scsi_rescan_device(sdev);
335 
336 	scsi_device_put(sdev);
337 }
338 
339 static int virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
340 {
341 	struct scsi_device *sdev;
342 	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
343 	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
344 	int result, inquiry_len, inq_result_len = 256;
345 	char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
346 
347 	if (!inq_result)
348 		return -ENOMEM;
349 
350 	shost_for_each_device(sdev, shost) {
351 		inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
352 
353 		memset(scsi_cmd, 0, sizeof(scsi_cmd));
354 		scsi_cmd[0] = INQUIRY;
355 		scsi_cmd[4] = (unsigned char) inquiry_len;
356 
357 		memset(inq_result, 0, inq_result_len);
358 
359 		result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
360 					  inq_result, inquiry_len,
361 					  SD_TIMEOUT, SD_MAX_RETRIES, NULL);
362 
363 		if (result == 0 && inq_result[0] >> 5) {
364 			/* PQ indicates the LUN is not attached */
365 			scsi_remove_device(sdev);
366 		} else if (result > 0 && host_byte(result) == DID_BAD_TARGET) {
367 			/*
368 			 * If all LUNs of a virtio-scsi device are unplugged
369 			 * it will respond with BAD TARGET on any INQUIRY
370 			 * command.
371 			 * Remove the device in this case as well.
372 			 */
373 			scsi_remove_device(sdev);
374 		}
375 	}
376 
377 	kfree(inq_result);
378 	return 0;
379 }
380 
381 static void virtscsi_handle_event(struct work_struct *work)
382 {
383 	struct virtio_scsi_event_node *event_node =
384 		container_of(work, struct virtio_scsi_event_node, work);
385 	struct virtio_scsi *vscsi = event_node->vscsi;
386 	struct virtio_scsi_event *event = &event_node->event;
387 
388 	if (event->event &
389 	    cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
390 		int ret;
391 
392 		event->event &= ~cpu_to_virtio32(vscsi->vdev,
393 						   VIRTIO_SCSI_T_EVENTS_MISSED);
394 		ret = virtscsi_rescan_hotunplug(vscsi);
395 		if (ret)
396 			return;
397 		scsi_scan_host(virtio_scsi_host(vscsi->vdev));
398 	}
399 
400 	switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
401 	case VIRTIO_SCSI_T_NO_EVENT:
402 		break;
403 	case VIRTIO_SCSI_T_TRANSPORT_RESET:
404 		virtscsi_handle_transport_reset(vscsi, event);
405 		break;
406 	case VIRTIO_SCSI_T_PARAM_CHANGE:
407 		virtscsi_handle_param_change(vscsi, event);
408 		break;
409 	default:
410 		pr_err("Unsupported virtio scsi event %x\n", event->event);
411 	}
412 	virtscsi_kick_event(vscsi, event_node);
413 }
414 
415 static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
416 {
417 	struct virtio_scsi_event_node *event_node = buf;
418 
419 	if (!vscsi->stop_events)
420 		queue_work(system_freezable_wq, &event_node->work);
421 }
422 
423 static void virtscsi_event_done(struct virtqueue *vq)
424 {
425 	struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
426 	struct virtio_scsi *vscsi = shost_priv(sh);
427 
428 	virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
429 };
430 
431 static int __virtscsi_add_cmd(struct virtqueue *vq,
432 			    struct virtio_scsi_cmd *cmd,
433 			    size_t req_size, size_t resp_size)
434 {
435 	struct scsi_cmnd *sc = cmd->sc;
436 	struct scatterlist *sgs[6], req, resp;
437 	struct sg_table *out, *in;
438 	unsigned out_num = 0, in_num = 0;
439 
440 	out = in = NULL;
441 
442 	if (sc && sc->sc_data_direction != DMA_NONE) {
443 		if (sc->sc_data_direction != DMA_FROM_DEVICE)
444 			out = &sc->sdb.table;
445 		if (sc->sc_data_direction != DMA_TO_DEVICE)
446 			in = &sc->sdb.table;
447 	}
448 
449 	/* Request header.  */
450 	sg_init_one(&req, &cmd->req, req_size);
451 	sgs[out_num++] = &req;
452 
453 	/* Data-out buffer.  */
454 	if (out) {
455 		/* Place WRITE protection SGLs before Data OUT payload */
456 		if (scsi_prot_sg_count(sc))
457 			sgs[out_num++] = scsi_prot_sglist(sc);
458 		sgs[out_num++] = out->sgl;
459 	}
460 
461 	/* Response header.  */
462 	sg_init_one(&resp, &cmd->resp, resp_size);
463 	sgs[out_num + in_num++] = &resp;
464 
465 	/* Data-in buffer */
466 	if (in) {
467 		/* Place READ protection SGLs before Data IN payload */
468 		if (scsi_prot_sg_count(sc))
469 			sgs[out_num + in_num++] = scsi_prot_sglist(sc);
470 		sgs[out_num + in_num++] = in->sgl;
471 	}
472 
473 	return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
474 }
475 
476 static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
477 {
478 	bool needs_kick;
479 	unsigned long flags;
480 
481 	spin_lock_irqsave(&vq->vq_lock, flags);
482 	needs_kick = virtqueue_kick_prepare(vq->vq);
483 	spin_unlock_irqrestore(&vq->vq_lock, flags);
484 
485 	if (needs_kick)
486 		virtqueue_notify(vq->vq);
487 }
488 
489 /**
490  * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
491  * @vq		: the struct virtqueue we're talking about
492  * @cmd		: command structure
493  * @req_size	: size of the request buffer
494  * @resp_size	: size of the response buffer
495  * @kick	: whether to kick the virtqueue immediately
496  */
497 static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
498 			     struct virtio_scsi_cmd *cmd,
499 			     size_t req_size, size_t resp_size,
500 			     bool kick)
501 {
502 	unsigned long flags;
503 	int err;
504 	bool needs_kick = false;
505 
506 	spin_lock_irqsave(&vq->vq_lock, flags);
507 	err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
508 	if (!err && kick)
509 		needs_kick = virtqueue_kick_prepare(vq->vq);
510 
511 	spin_unlock_irqrestore(&vq->vq_lock, flags);
512 
513 	if (needs_kick)
514 		virtqueue_notify(vq->vq);
515 	return err;
516 }
517 
518 static void virtio_scsi_init_hdr(struct virtio_device *vdev,
519 				 struct virtio_scsi_cmd_req *cmd,
520 				 struct scsi_cmnd *sc)
521 {
522 	cmd->lun[0] = 1;
523 	cmd->lun[1] = sc->device->id;
524 	cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
525 	cmd->lun[3] = sc->device->lun & 0xff;
526 	cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
527 	cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
528 	cmd->prio = 0;
529 	cmd->crn = 0;
530 }
531 
532 #ifdef CONFIG_BLK_DEV_INTEGRITY
533 static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
534 				    struct virtio_scsi_cmd_req_pi *cmd_pi,
535 				    struct scsi_cmnd *sc)
536 {
537 	struct request *rq = scsi_cmd_to_rq(sc);
538 	struct blk_integrity *bi;
539 
540 	virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
541 
542 	if (!rq || !scsi_prot_sg_count(sc))
543 		return;
544 
545 	bi = blk_get_integrity(rq->q->disk);
546 
547 	if (sc->sc_data_direction == DMA_TO_DEVICE)
548 		cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
549 						      bio_integrity_bytes(bi,
550 							blk_rq_sectors(rq)));
551 	else if (sc->sc_data_direction == DMA_FROM_DEVICE)
552 		cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
553 						     bio_integrity_bytes(bi,
554 							blk_rq_sectors(rq)));
555 }
556 #endif
557 
558 static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
559 						  struct scsi_cmnd *sc)
560 {
561 	u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
562 	u16 hwq = blk_mq_unique_tag_to_hwq(tag);
563 
564 	return &vscsi->req_vqs[hwq];
565 }
566 
567 static int virtscsi_queuecommand(struct Scsi_Host *shost,
568 				 struct scsi_cmnd *sc)
569 {
570 	struct virtio_scsi *vscsi = shost_priv(shost);
571 	struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
572 	struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
573 	bool kick;
574 	unsigned long flags;
575 	int req_size;
576 	int ret;
577 
578 	BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
579 
580 	/* TODO: check feature bit and fail if unsupported?  */
581 	BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
582 
583 	dev_dbg(&sc->device->sdev_gendev,
584 		"cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
585 
586 	cmd->sc = sc;
587 
588 	BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
589 
590 #ifdef CONFIG_BLK_DEV_INTEGRITY
591 	if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
592 		virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
593 		memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
594 		req_size = sizeof(cmd->req.cmd_pi);
595 	} else
596 #endif
597 	{
598 		virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
599 		memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
600 		req_size = sizeof(cmd->req.cmd);
601 	}
602 
603 	kick = (sc->flags & SCMD_LAST) != 0;
604 	ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
605 	if (ret == -EIO) {
606 		cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
607 		spin_lock_irqsave(&req_vq->vq_lock, flags);
608 		virtscsi_complete_cmd(vscsi, cmd);
609 		spin_unlock_irqrestore(&req_vq->vq_lock, flags);
610 	} else if (ret != 0) {
611 		return SCSI_MLQUEUE_HOST_BUSY;
612 	}
613 	return 0;
614 }
615 
616 static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
617 {
618 	DECLARE_COMPLETION_ONSTACK(comp);
619 	int ret = FAILED;
620 
621 	cmd->comp = &comp;
622 	if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
623 			      sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
624 		goto out;
625 
626 	wait_for_completion(&comp);
627 	if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
628 	    cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
629 		ret = SUCCESS;
630 
631 	/*
632 	 * The spec guarantees that all requests related to the TMF have
633 	 * been completed, but the callback might not have run yet if
634 	 * we're using independent interrupts (e.g. MSI).  Poll the
635 	 * virtqueues once.
636 	 *
637 	 * In the abort case, scsi_done() will do nothing, because the
638 	 * command timed out and hence SCMD_STATE_COMPLETE has been set.
639 	 */
640 	virtscsi_poll_requests(vscsi);
641 
642 out:
643 	mempool_free(cmd, virtscsi_cmd_pool);
644 	return ret;
645 }
646 
647 static int virtscsi_device_reset(struct scsi_cmnd *sc)
648 {
649 	struct virtio_scsi *vscsi = shost_priv(sc->device->host);
650 	struct virtio_scsi_cmd *cmd;
651 
652 	sdev_printk(KERN_INFO, sc->device, "device reset\n");
653 	cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
654 	if (!cmd)
655 		return FAILED;
656 
657 	memset(cmd, 0, sizeof(*cmd));
658 	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
659 		.type = VIRTIO_SCSI_T_TMF,
660 		.subtype = cpu_to_virtio32(vscsi->vdev,
661 					     VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
662 		.lun[0] = 1,
663 		.lun[1] = sc->device->id,
664 		.lun[2] = (sc->device->lun >> 8) | 0x40,
665 		.lun[3] = sc->device->lun & 0xff,
666 	};
667 	return virtscsi_tmf(vscsi, cmd);
668 }
669 
670 static int virtscsi_device_alloc(struct scsi_device *sdevice)
671 {
672 	/*
673 	 * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
674 	 * may have transfer limits which come from the host SCSI
675 	 * controller or something on the host side other than the
676 	 * target itself.
677 	 *
678 	 * To make this work properly, the hypervisor can adjust the
679 	 * target's VPD information to advertise these limits.  But
680 	 * for that to work, the guest has to look at the VPD pages,
681 	 * which we won't do by default if it is an SPC-2 device, even
682 	 * if it does actually support it.
683 	 *
684 	 * So, set the blist to always try to read the VPD pages.
685 	 */
686 	sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
687 
688 	return 0;
689 }
690 
691 
692 /**
693  * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
694  * @sdev:	Virtscsi target whose queue depth to change
695  * @qdepth:	New queue depth
696  */
697 static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
698 {
699 	struct Scsi_Host *shost = sdev->host;
700 	int max_depth = shost->cmd_per_lun;
701 
702 	return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
703 }
704 
705 static int virtscsi_abort(struct scsi_cmnd *sc)
706 {
707 	struct virtio_scsi *vscsi = shost_priv(sc->device->host);
708 	struct virtio_scsi_cmd *cmd;
709 
710 	scmd_printk(KERN_INFO, sc, "abort\n");
711 	cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
712 	if (!cmd)
713 		return FAILED;
714 
715 	memset(cmd, 0, sizeof(*cmd));
716 	cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
717 		.type = VIRTIO_SCSI_T_TMF,
718 		.subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
719 		.lun[0] = 1,
720 		.lun[1] = sc->device->id,
721 		.lun[2] = (sc->device->lun >> 8) | 0x40,
722 		.lun[3] = sc->device->lun & 0xff,
723 		.tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
724 	};
725 	return virtscsi_tmf(vscsi, cmd);
726 }
727 
728 static void virtscsi_map_queues(struct Scsi_Host *shost)
729 {
730 	struct virtio_scsi *vscsi = shost_priv(shost);
731 	int i, qoff;
732 
733 	for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
734 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
735 
736 		map->nr_queues = vscsi->io_queues[i];
737 		map->queue_offset = qoff;
738 		qoff += map->nr_queues;
739 
740 		if (map->nr_queues == 0)
741 			continue;
742 
743 		/*
744 		 * Regular queues have interrupts and hence CPU affinity is
745 		 * defined by the core virtio code, but polling queues have
746 		 * no interrupts so we let the block layer assign CPU affinity.
747 		 */
748 		if (i == HCTX_TYPE_POLL)
749 			blk_mq_map_queues(map);
750 		else
751 			blk_mq_virtio_map_queues(map, vscsi->vdev, 2);
752 	}
753 }
754 
755 static int virtscsi_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
756 {
757 	struct virtio_scsi *vscsi = shost_priv(shost);
758 	struct virtio_scsi_vq *virtscsi_vq = &vscsi->req_vqs[queue_num];
759 	unsigned long flags;
760 	unsigned int len;
761 	int found = 0;
762 	void *buf;
763 
764 	spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
765 
766 	while ((buf = virtqueue_get_buf(virtscsi_vq->vq, &len)) != NULL) {
767 		virtscsi_complete_cmd(vscsi, buf);
768 		found++;
769 	}
770 
771 	spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
772 
773 	return found;
774 }
775 
776 static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
777 {
778 	struct virtio_scsi *vscsi = shost_priv(shost);
779 
780 	virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
781 }
782 
783 /*
784  * The host guarantees to respond to each command, although I/O
785  * latencies might be higher than on bare metal.  Reset the timer
786  * unconditionally to give the host a chance to perform EH.
787  */
788 static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
789 {
790 	return SCSI_EH_RESET_TIMER;
791 }
792 
793 static const struct scsi_host_template virtscsi_host_template = {
794 	.module = THIS_MODULE,
795 	.name = "Virtio SCSI HBA",
796 	.proc_name = "virtio_scsi",
797 	.this_id = -1,
798 	.cmd_size = sizeof(struct virtio_scsi_cmd),
799 	.queuecommand = virtscsi_queuecommand,
800 	.mq_poll = virtscsi_mq_poll,
801 	.commit_rqs = virtscsi_commit_rqs,
802 	.change_queue_depth = virtscsi_change_queue_depth,
803 	.eh_abort_handler = virtscsi_abort,
804 	.eh_device_reset_handler = virtscsi_device_reset,
805 	.eh_timed_out = virtscsi_eh_timed_out,
806 	.slave_alloc = virtscsi_device_alloc,
807 
808 	.dma_boundary = UINT_MAX,
809 	.map_queues = virtscsi_map_queues,
810 	.track_queue_depth = 1,
811 };
812 
813 #define virtscsi_config_get(vdev, fld) \
814 	({ \
815 		__virtio_native_type(struct virtio_scsi_config, fld) __val; \
816 		virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
817 		__val; \
818 	})
819 
820 #define virtscsi_config_set(vdev, fld, val) \
821 	do { \
822 		__virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
823 		virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
824 	} while(0)
825 
826 static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
827 			     struct virtqueue *vq)
828 {
829 	spin_lock_init(&virtscsi_vq->vq_lock);
830 	virtscsi_vq->vq = vq;
831 }
832 
833 static void virtscsi_remove_vqs(struct virtio_device *vdev)
834 {
835 	/* Stop all the virtqueues. */
836 	virtio_reset_device(vdev);
837 	vdev->config->del_vqs(vdev);
838 }
839 
840 static int virtscsi_init(struct virtio_device *vdev,
841 			 struct virtio_scsi *vscsi)
842 {
843 	int err;
844 	u32 i;
845 	u32 num_vqs, num_poll_vqs, num_req_vqs;
846 	vq_callback_t **callbacks;
847 	const char **names;
848 	struct virtqueue **vqs;
849 	struct irq_affinity desc = { .pre_vectors = 2 };
850 
851 	num_req_vqs = vscsi->num_queues;
852 	num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE;
853 	vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
854 	callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *),
855 				  GFP_KERNEL);
856 	names = kmalloc_array(num_vqs, sizeof(char *), GFP_KERNEL);
857 
858 	if (!callbacks || !vqs || !names) {
859 		err = -ENOMEM;
860 		goto out;
861 	}
862 
863 	num_poll_vqs = min_t(unsigned int, virtscsi_poll_queues,
864 			     num_req_vqs - 1);
865 	vscsi->io_queues[HCTX_TYPE_DEFAULT] = num_req_vqs - num_poll_vqs;
866 	vscsi->io_queues[HCTX_TYPE_READ] = 0;
867 	vscsi->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
868 
869 	dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
870 		 vscsi->io_queues[HCTX_TYPE_DEFAULT],
871 		 vscsi->io_queues[HCTX_TYPE_READ],
872 		 vscsi->io_queues[HCTX_TYPE_POLL]);
873 
874 	callbacks[0] = virtscsi_ctrl_done;
875 	callbacks[1] = virtscsi_event_done;
876 	names[0] = "control";
877 	names[1] = "event";
878 	for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs - num_poll_vqs; i++) {
879 		callbacks[i] = virtscsi_req_done;
880 		names[i] = "request";
881 	}
882 
883 	for (; i < num_vqs; i++) {
884 		callbacks[i] = NULL;
885 		names[i] = "request_poll";
886 	}
887 
888 	/* Discover virtqueues and write information to configuration.  */
889 	err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
890 	if (err)
891 		goto out;
892 
893 	virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
894 	virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
895 	for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
896 		virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
897 				 vqs[i]);
898 
899 	virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
900 	virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
901 
902 	err = 0;
903 
904 out:
905 	kfree(names);
906 	kfree(callbacks);
907 	kfree(vqs);
908 	if (err)
909 		virtscsi_remove_vqs(vdev);
910 	return err;
911 }
912 
913 static int virtscsi_probe(struct virtio_device *vdev)
914 {
915 	struct Scsi_Host *shost;
916 	struct virtio_scsi *vscsi;
917 	int err;
918 	u32 sg_elems, num_targets;
919 	u32 cmd_per_lun;
920 	u32 num_queues;
921 
922 	if (!vdev->config->get) {
923 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
924 			__func__);
925 		return -EINVAL;
926 	}
927 
928 	/* We need to know how many queues before we allocate. */
929 	num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
930 	num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
931 
932 	num_targets = virtscsi_config_get(vdev, max_target) + 1;
933 
934 	shost = scsi_host_alloc(&virtscsi_host_template,
935 				struct_size(vscsi, req_vqs, num_queues));
936 	if (!shost)
937 		return -ENOMEM;
938 
939 	sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
940 	shost->sg_tablesize = sg_elems;
941 	shost->nr_maps = 1;
942 	vscsi = shost_priv(shost);
943 	vscsi->vdev = vdev;
944 	vscsi->num_queues = num_queues;
945 	vdev->priv = shost;
946 
947 	err = virtscsi_init(vdev, vscsi);
948 	if (err)
949 		goto virtscsi_init_failed;
950 
951 	if (vscsi->io_queues[HCTX_TYPE_POLL])
952 		shost->nr_maps = HCTX_TYPE_POLL + 1;
953 
954 	shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
955 
956 	cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
957 	shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
958 	shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
959 
960 	/* LUNs > 256 are reported with format 1, so they go in the range
961 	 * 16640-32767.
962 	 */
963 	shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
964 	shost->max_id = num_targets;
965 	shost->max_channel = 0;
966 	shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
967 	shost->nr_hw_queues = num_queues;
968 
969 #ifdef CONFIG_BLK_DEV_INTEGRITY
970 	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
971 		int host_prot;
972 
973 		host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
974 			    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
975 			    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
976 
977 		scsi_host_set_prot(shost, host_prot);
978 		scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
979 	}
980 #endif
981 
982 	err = scsi_add_host(shost, &vdev->dev);
983 	if (err)
984 		goto scsi_add_host_failed;
985 
986 	virtio_device_ready(vdev);
987 
988 	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
989 		virtscsi_kick_event_all(vscsi);
990 
991 	scsi_scan_host(shost);
992 	return 0;
993 
994 scsi_add_host_failed:
995 	vdev->config->del_vqs(vdev);
996 virtscsi_init_failed:
997 	scsi_host_put(shost);
998 	return err;
999 }
1000 
1001 static void virtscsi_remove(struct virtio_device *vdev)
1002 {
1003 	struct Scsi_Host *shost = virtio_scsi_host(vdev);
1004 	struct virtio_scsi *vscsi = shost_priv(shost);
1005 
1006 	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
1007 		virtscsi_cancel_event_work(vscsi);
1008 
1009 	scsi_remove_host(shost);
1010 	virtscsi_remove_vqs(vdev);
1011 	scsi_host_put(shost);
1012 }
1013 
1014 #ifdef CONFIG_PM_SLEEP
1015 static int virtscsi_freeze(struct virtio_device *vdev)
1016 {
1017 	virtscsi_remove_vqs(vdev);
1018 	return 0;
1019 }
1020 
1021 static int virtscsi_restore(struct virtio_device *vdev)
1022 {
1023 	struct Scsi_Host *sh = virtio_scsi_host(vdev);
1024 	struct virtio_scsi *vscsi = shost_priv(sh);
1025 	int err;
1026 
1027 	err = virtscsi_init(vdev, vscsi);
1028 	if (err)
1029 		return err;
1030 
1031 	virtio_device_ready(vdev);
1032 
1033 	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
1034 		virtscsi_kick_event_all(vscsi);
1035 
1036 	return err;
1037 }
1038 #endif
1039 
1040 static struct virtio_device_id id_table[] = {
1041 	{ VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
1042 	{ 0 },
1043 };
1044 
1045 static unsigned int features[] = {
1046 	VIRTIO_SCSI_F_HOTPLUG,
1047 	VIRTIO_SCSI_F_CHANGE,
1048 #ifdef CONFIG_BLK_DEV_INTEGRITY
1049 	VIRTIO_SCSI_F_T10_PI,
1050 #endif
1051 };
1052 
1053 static struct virtio_driver virtio_scsi_driver = {
1054 	.feature_table = features,
1055 	.feature_table_size = ARRAY_SIZE(features),
1056 	.driver.name = KBUILD_MODNAME,
1057 	.driver.owner = THIS_MODULE,
1058 	.id_table = id_table,
1059 	.probe = virtscsi_probe,
1060 #ifdef CONFIG_PM_SLEEP
1061 	.freeze = virtscsi_freeze,
1062 	.restore = virtscsi_restore,
1063 #endif
1064 	.remove = virtscsi_remove,
1065 };
1066 
1067 static int __init virtio_scsi_init(void)
1068 {
1069 	int ret = -ENOMEM;
1070 
1071 	virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
1072 	if (!virtscsi_cmd_cache) {
1073 		pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
1074 		goto error;
1075 	}
1076 
1077 
1078 	virtscsi_cmd_pool =
1079 		mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
1080 					 virtscsi_cmd_cache);
1081 	if (!virtscsi_cmd_pool) {
1082 		pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
1083 		goto error;
1084 	}
1085 	ret = register_virtio_driver(&virtio_scsi_driver);
1086 	if (ret < 0)
1087 		goto error;
1088 
1089 	return 0;
1090 
1091 error:
1092 	mempool_destroy(virtscsi_cmd_pool);
1093 	virtscsi_cmd_pool = NULL;
1094 	kmem_cache_destroy(virtscsi_cmd_cache);
1095 	virtscsi_cmd_cache = NULL;
1096 	return ret;
1097 }
1098 
1099 static void __exit virtio_scsi_fini(void)
1100 {
1101 	unregister_virtio_driver(&virtio_scsi_driver);
1102 	mempool_destroy(virtscsi_cmd_pool);
1103 	kmem_cache_destroy(virtscsi_cmd_cache);
1104 }
1105 module_init(virtio_scsi_init);
1106 module_exit(virtio_scsi_fini);
1107 
1108 MODULE_DEVICE_TABLE(virtio, id_table);
1109 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
1110 MODULE_LICENSE("GPL");
1111