xref: /linux/drivers/target/target_core_pscsi.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * Filename:  target_core_pscsi.c
4  *
5  * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
6  *
7  * (c) Copyright 2003-2013 Datera, Inc.
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  ******************************************************************************/
12 
13 #include <linux/string.h>
14 #include <linux/parser.h>
15 #include <linux/timer.h>
16 #include <linux/blkdev.h>
17 #include <linux/blk_types.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/cdrom.h>
21 #include <linux/ratelimit.h>
22 #include <linux/module.h>
23 #include <asm/unaligned.h>
24 
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_tcq.h>
28 
29 #include <target/target_core_base.h>
30 #include <target/target_core_backend.h>
31 
32 #include "target_core_alua.h"
33 #include "target_core_internal.h"
34 #include "target_core_pscsi.h"
35 
36 static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
37 {
38 	return container_of(dev, struct pscsi_dev_virt, dev);
39 }
40 
41 static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
42 static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t);
43 
44 /*	pscsi_attach_hba():
45  *
46  * 	pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
47  *	from the passed SCSI Host ID.
48  */
49 static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
50 {
51 	struct pscsi_hba_virt *phv;
52 
53 	phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
54 	if (!phv) {
55 		pr_err("Unable to allocate struct pscsi_hba_virt\n");
56 		return -ENOMEM;
57 	}
58 	phv->phv_host_id = host_id;
59 	phv->phv_mode = PHV_VIRTUAL_HOST_ID;
60 
61 	hba->hba_ptr = phv;
62 
63 	pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
64 		" Generic Target Core Stack %s\n", hba->hba_id,
65 		PSCSI_VERSION, TARGET_CORE_VERSION);
66 	pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
67 	       hba->hba_id);
68 
69 	return 0;
70 }
71 
72 static void pscsi_detach_hba(struct se_hba *hba)
73 {
74 	struct pscsi_hba_virt *phv = hba->hba_ptr;
75 	struct Scsi_Host *scsi_host = phv->phv_lld_host;
76 
77 	if (scsi_host) {
78 		scsi_host_put(scsi_host);
79 
80 		pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
81 			" Generic Target Core\n", hba->hba_id,
82 			(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
83 			"Unknown");
84 	} else
85 		pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
86 			" from Generic Target Core\n", hba->hba_id);
87 
88 	kfree(phv);
89 	hba->hba_ptr = NULL;
90 }
91 
92 static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
93 {
94 	struct pscsi_hba_virt *phv = hba->hba_ptr;
95 	struct Scsi_Host *sh = phv->phv_lld_host;
96 	/*
97 	 * Release the struct Scsi_Host
98 	 */
99 	if (!mode_flag) {
100 		if (!sh)
101 			return 0;
102 
103 		phv->phv_lld_host = NULL;
104 		phv->phv_mode = PHV_VIRTUAL_HOST_ID;
105 
106 		pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
107 			" %s\n", hba->hba_id, (sh->hostt->name) ?
108 			(sh->hostt->name) : "Unknown");
109 
110 		scsi_host_put(sh);
111 		return 0;
112 	}
113 	/*
114 	 * Otherwise, locate struct Scsi_Host from the original passed
115 	 * pSCSI Host ID and enable for phba mode
116 	 */
117 	sh = scsi_host_lookup(phv->phv_host_id);
118 	if (!sh) {
119 		pr_err("pSCSI: Unable to locate SCSI Host for"
120 			" phv_host_id: %d\n", phv->phv_host_id);
121 		return -EINVAL;
122 	}
123 
124 	phv->phv_lld_host = sh;
125 	phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
126 
127 	pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
128 		hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
129 
130 	return 1;
131 }
132 
133 static void pscsi_tape_read_blocksize(struct se_device *dev,
134 		struct scsi_device *sdev)
135 {
136 	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
137 	int ret;
138 
139 	buf = kzalloc(12, GFP_KERNEL);
140 	if (!buf)
141 		goto out_free;
142 
143 	memset(cdb, 0, MAX_COMMAND_SIZE);
144 	cdb[0] = MODE_SENSE;
145 	cdb[4] = 0x0c; /* 12 bytes */
146 
147 	ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf, 12, HZ, 1, NULL);
148 	if (ret)
149 		goto out_free;
150 
151 	/*
152 	 * If MODE_SENSE still returns zero, set the default value to 1024.
153 	 */
154 	sdev->sector_size = get_unaligned_be24(&buf[9]);
155 out_free:
156 	if (!sdev->sector_size)
157 		sdev->sector_size = 1024;
158 
159 	kfree(buf);
160 }
161 
162 static void
163 pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
164 {
165 	if (sdev->inquiry_len < INQUIRY_LEN)
166 		return;
167 	/*
168 	 * Use sdev->inquiry data from drivers/scsi/scsi_scan.c:scsi_add_lun()
169 	 */
170 	BUILD_BUG_ON(sizeof(wwn->vendor) != INQUIRY_VENDOR_LEN + 1);
171 	snprintf(wwn->vendor, sizeof(wwn->vendor),
172 		 "%." __stringify(INQUIRY_VENDOR_LEN) "s", sdev->vendor);
173 	BUILD_BUG_ON(sizeof(wwn->model) != INQUIRY_MODEL_LEN + 1);
174 	snprintf(wwn->model, sizeof(wwn->model),
175 		 "%." __stringify(INQUIRY_MODEL_LEN) "s", sdev->model);
176 	BUILD_BUG_ON(sizeof(wwn->revision) != INQUIRY_REVISION_LEN + 1);
177 	snprintf(wwn->revision, sizeof(wwn->revision),
178 		 "%." __stringify(INQUIRY_REVISION_LEN) "s", sdev->rev);
179 }
180 
181 static int
182 pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
183 {
184 	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
185 	int ret;
186 
187 	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
188 	if (!buf)
189 		return -ENOMEM;
190 
191 	memset(cdb, 0, MAX_COMMAND_SIZE);
192 	cdb[0] = INQUIRY;
193 	cdb[1] = 0x01; /* Query VPD */
194 	cdb[2] = 0x80; /* Unit Serial Number */
195 	put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]);
196 
197 	ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf,
198 			       INQUIRY_VPD_SERIAL_LEN, HZ, 1, NULL);
199 	if (ret)
200 		goto out_free;
201 
202 	snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
203 
204 	wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL;
205 
206 	kfree(buf);
207 	return 0;
208 
209 out_free:
210 	kfree(buf);
211 	return -EPERM;
212 }
213 
214 static void
215 pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
216 		struct t10_wwn *wwn)
217 {
218 	unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
219 	int ident_len, page_len, off = 4, ret;
220 	struct t10_vpd *vpd;
221 
222 	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
223 	if (!buf)
224 		return;
225 
226 	memset(cdb, 0, MAX_COMMAND_SIZE);
227 	cdb[0] = INQUIRY;
228 	cdb[1] = 0x01; /* Query VPD */
229 	cdb[2] = 0x83; /* Device Identifier */
230 	put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]);
231 
232 	ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf,
233 			       INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, HZ, 1, NULL);
234 	if (ret)
235 		goto out;
236 
237 	page_len = get_unaligned_be16(&buf[2]);
238 	while (page_len > 0) {
239 		/* Grab a pointer to the Identification descriptor */
240 		page_83 = &buf[off];
241 		ident_len = page_83[3];
242 		if (!ident_len) {
243 			pr_err("page_83[3]: identifier"
244 					" length zero!\n");
245 			break;
246 		}
247 		pr_debug("T10 VPD Identifier Length: %d\n", ident_len);
248 
249 		vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
250 		if (!vpd) {
251 			pr_err("Unable to allocate memory for"
252 					" struct t10_vpd\n");
253 			goto out;
254 		}
255 		INIT_LIST_HEAD(&vpd->vpd_list);
256 
257 		transport_set_vpd_proto_id(vpd, page_83);
258 		transport_set_vpd_assoc(vpd, page_83);
259 
260 		if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
261 			off += (ident_len + 4);
262 			page_len -= (ident_len + 4);
263 			kfree(vpd);
264 			continue;
265 		}
266 		if (transport_set_vpd_ident(vpd, page_83) < 0) {
267 			off += (ident_len + 4);
268 			page_len -= (ident_len + 4);
269 			kfree(vpd);
270 			continue;
271 		}
272 
273 		list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
274 		off += (ident_len + 4);
275 		page_len -= (ident_len + 4);
276 	}
277 
278 out:
279 	kfree(buf);
280 }
281 
282 static int pscsi_add_device_to_list(struct se_device *dev,
283 		struct scsi_device *sd)
284 {
285 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
286 	struct request_queue *q = sd->request_queue;
287 
288 	pdv->pdv_sd = sd;
289 
290 	if (!sd->queue_depth) {
291 		sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
292 
293 		pr_err("Set broken SCSI Device %d:%d:%llu"
294 			" queue_depth to %d\n", sd->channel, sd->id,
295 				sd->lun, sd->queue_depth);
296 	}
297 
298 	dev->dev_attrib.hw_block_size =
299 		min_not_zero((int)sd->sector_size, 512);
300 	dev->dev_attrib.hw_max_sectors =
301 		min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
302 	dev->dev_attrib.hw_queue_depth = sd->queue_depth;
303 
304 	/*
305 	 * Setup our standard INQUIRY info into se_dev->t10_wwn
306 	 */
307 	pscsi_set_inquiry_info(sd, &dev->t10_wwn);
308 
309 	/*
310 	 * Locate VPD WWN Information used for various purposes within
311 	 * the Storage Engine.
312 	 */
313 	if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) {
314 		/*
315 		 * If VPD Unit Serial returned GOOD status, try
316 		 * VPD Device Identification page (0x83).
317 		 */
318 		pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn);
319 	}
320 
321 	/*
322 	 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
323 	 */
324 	if (sd->type == TYPE_TAPE) {
325 		pscsi_tape_read_blocksize(dev, sd);
326 		dev->dev_attrib.hw_block_size = sd->sector_size;
327 	}
328 	return 0;
329 }
330 
331 static struct se_device *pscsi_alloc_device(struct se_hba *hba,
332 		const char *name)
333 {
334 	struct pscsi_dev_virt *pdv;
335 
336 	pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
337 	if (!pdv) {
338 		pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
339 		return NULL;
340 	}
341 
342 	pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
343 	return &pdv->dev;
344 }
345 
346 /*
347  * Called with struct Scsi_Host->host_lock called.
348  */
349 static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
350 	__releases(sh->host_lock)
351 {
352 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
353 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
354 	struct Scsi_Host *sh = sd->host;
355 	struct file *bdev_file;
356 	int ret;
357 
358 	if (scsi_device_get(sd)) {
359 		pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n",
360 			sh->host_no, sd->channel, sd->id, sd->lun);
361 		spin_unlock_irq(sh->host_lock);
362 		return -EIO;
363 	}
364 	spin_unlock_irq(sh->host_lock);
365 	/*
366 	 * Claim exclusive struct block_device access to struct scsi_device
367 	 * for TYPE_DISK and TYPE_ZBC using supplied udev_path
368 	 */
369 	bdev_file = bdev_file_open_by_path(dev->udev_path,
370 				BLK_OPEN_WRITE | BLK_OPEN_READ, pdv, NULL);
371 	if (IS_ERR(bdev_file)) {
372 		pr_err("pSCSI: bdev_open_by_path() failed\n");
373 		scsi_device_put(sd);
374 		return PTR_ERR(bdev_file);
375 	}
376 	pdv->pdv_bdev_file = bdev_file;
377 
378 	ret = pscsi_add_device_to_list(dev, sd);
379 	if (ret) {
380 		fput(bdev_file);
381 		scsi_device_put(sd);
382 		return ret;
383 	}
384 
385 	pr_debug("CORE_PSCSI[%d] - Added TYPE_%s for %d:%d:%d:%llu\n",
386 		phv->phv_host_id, sd->type == TYPE_DISK ? "DISK" : "ZBC",
387 		sh->host_no, sd->channel, sd->id, sd->lun);
388 	return 0;
389 }
390 
391 /*
392  * Called with struct Scsi_Host->host_lock called.
393  */
394 static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
395 	__releases(sh->host_lock)
396 {
397 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
398 	struct Scsi_Host *sh = sd->host;
399 	int ret;
400 
401 	if (scsi_device_get(sd)) {
402 		pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n",
403 			sh->host_no, sd->channel, sd->id, sd->lun);
404 		spin_unlock_irq(sh->host_lock);
405 		return -EIO;
406 	}
407 	spin_unlock_irq(sh->host_lock);
408 
409 	ret = pscsi_add_device_to_list(dev, sd);
410 	if (ret) {
411 		scsi_device_put(sd);
412 		return ret;
413 	}
414 	pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
415 		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
416 		sd->channel, sd->id, sd->lun);
417 
418 	return 0;
419 }
420 
421 static int pscsi_configure_device(struct se_device *dev)
422 {
423 	struct se_hba *hba = dev->se_hba;
424 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
425 	struct scsi_device *sd;
426 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
427 	struct Scsi_Host *sh = phv->phv_lld_host;
428 	int legacy_mode_enable = 0;
429 	int ret;
430 
431 	if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
432 	    !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
433 	    !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
434 		pr_err("Missing scsi_channel_id=, scsi_target_id= and"
435 			" scsi_lun_id= parameters\n");
436 		return -EINVAL;
437 	}
438 
439 	/*
440 	 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
441 	 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
442 	 */
443 	if (!sh) {
444 		if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
445 			pr_err("pSCSI: Unable to locate struct"
446 				" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
447 			return -ENODEV;
448 		}
449 		/*
450 		 * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device
451 		 * reference, we enforce that udev_path has been set
452 		 */
453 		if (!(dev->dev_flags & DF_USING_UDEV_PATH)) {
454 			pr_err("pSCSI: udev_path attribute has not"
455 				" been set before ENABLE=1\n");
456 			return -EINVAL;
457 		}
458 		/*
459 		 * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID,
460 		 * use the original TCM hba ID to reference Linux/SCSI Host No
461 		 * and enable for PHV_LLD_SCSI_HOST_NO mode.
462 		 */
463 		if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
464 			if (hba->dev_count) {
465 				pr_err("pSCSI: Unable to set hba_mode"
466 					" with active devices\n");
467 				return -EEXIST;
468 			}
469 
470 			if (pscsi_pmode_enable_hba(hba, 1) != 1)
471 				return -ENODEV;
472 
473 			legacy_mode_enable = 1;
474 			hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
475 			sh = phv->phv_lld_host;
476 		} else {
477 			sh = scsi_host_lookup(pdv->pdv_host_id);
478 			if (!sh) {
479 				pr_err("pSCSI: Unable to locate"
480 					" pdv_host_id: %d\n", pdv->pdv_host_id);
481 				return -EINVAL;
482 			}
483 			pdv->pdv_lld_host = sh;
484 		}
485 	} else {
486 		if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
487 			pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while"
488 				" struct Scsi_Host exists\n");
489 			return -EEXIST;
490 		}
491 	}
492 
493 	spin_lock_irq(sh->host_lock);
494 	list_for_each_entry(sd, &sh->__devices, siblings) {
495 		if ((pdv->pdv_channel_id != sd->channel) ||
496 		    (pdv->pdv_target_id != sd->id) ||
497 		    (pdv->pdv_lun_id != sd->lun))
498 			continue;
499 		/*
500 		 * Functions will release the held struct scsi_host->host_lock
501 		 * before calling pscsi_add_device_to_list() to register
502 		 * struct scsi_device with target_core_mod.
503 		 */
504 		switch (sd->type) {
505 		case TYPE_DISK:
506 		case TYPE_ZBC:
507 			ret = pscsi_create_type_disk(dev, sd);
508 			break;
509 		default:
510 			ret = pscsi_create_type_nondisk(dev, sd);
511 			break;
512 		}
513 
514 		if (ret) {
515 			if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
516 				scsi_host_put(sh);
517 			else if (legacy_mode_enable) {
518 				pscsi_pmode_enable_hba(hba, 0);
519 				hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
520 			}
521 			pdv->pdv_sd = NULL;
522 			return ret;
523 		}
524 		return 0;
525 	}
526 	spin_unlock_irq(sh->host_lock);
527 
528 	pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
529 		pdv->pdv_channel_id,  pdv->pdv_target_id, pdv->pdv_lun_id);
530 
531 	if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
532 		scsi_host_put(sh);
533 	else if (legacy_mode_enable) {
534 		pscsi_pmode_enable_hba(hba, 0);
535 		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
536 	}
537 
538 	return -ENODEV;
539 }
540 
541 static void pscsi_dev_call_rcu(struct rcu_head *p)
542 {
543 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
544 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
545 
546 	kfree(pdv);
547 }
548 
549 static void pscsi_free_device(struct se_device *dev)
550 {
551 	call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
552 }
553 
554 static void pscsi_destroy_device(struct se_device *dev)
555 {
556 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
557 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
558 	struct scsi_device *sd = pdv->pdv_sd;
559 
560 	if (sd) {
561 		/*
562 		 * Release exclusive pSCSI internal struct block_device claim for
563 		 * struct scsi_device with TYPE_DISK or TYPE_ZBC
564 		 * from pscsi_create_type_disk()
565 		 */
566 		if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
567 		    pdv->pdv_bdev_file) {
568 			fput(pdv->pdv_bdev_file);
569 			pdv->pdv_bdev_file = NULL;
570 		}
571 		/*
572 		 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
573 		 * to struct Scsi_Host now.
574 		 */
575 		if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
576 		    (phv->phv_lld_host != NULL))
577 			scsi_host_put(phv->phv_lld_host);
578 		else if (pdv->pdv_lld_host)
579 			scsi_host_put(pdv->pdv_lld_host);
580 
581 		scsi_device_put(sd);
582 
583 		pdv->pdv_sd = NULL;
584 	}
585 }
586 
587 static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
588 			       unsigned char *req_sense, int valid_data)
589 {
590 	struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
591 	struct scsi_device *sd = pdv->pdv_sd;
592 	unsigned char *cdb = cmd->priv;
593 
594 	/*
595 	 * Special case for REPORT_LUNs which is emulated and not passed on.
596 	 */
597 	if (!cdb)
598 		return;
599 
600 	/*
601 	 * Hack to make sure that Write-Protect modepage is set if R/O mode is
602 	 * forced.
603 	 */
604 	if (!cmd->data_length)
605 		goto after_mode_sense;
606 
607 	if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
608 	    scsi_status == SAM_STAT_GOOD) {
609 		bool read_only = target_lun_is_rdonly(cmd);
610 
611 		if (read_only) {
612 			unsigned char *buf;
613 
614 			buf = transport_kmap_data_sg(cmd);
615 			if (!buf) {
616 				; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
617 			} else {
618 				if (cdb[0] == MODE_SENSE_10) {
619 					if (!(buf[3] & 0x80))
620 						buf[3] |= 0x80;
621 				} else {
622 					if (!(buf[2] & 0x80))
623 						buf[2] |= 0x80;
624 				}
625 
626 				transport_kunmap_data_sg(cmd);
627 			}
628 		}
629 	}
630 after_mode_sense:
631 
632 	if (sd->type != TYPE_TAPE || !cmd->data_length)
633 		goto after_mode_select;
634 
635 	/*
636 	 * Hack to correctly obtain the initiator requested blocksize for
637 	 * TYPE_TAPE.  Since this value is dependent upon each tape media,
638 	 * struct scsi_device->sector_size will not contain the correct value
639 	 * by default, so we go ahead and set it so
640 	 * TRANSPORT(dev)->get_blockdev() returns the correct value to the
641 	 * storage engine.
642 	 */
643 	if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
644 	     scsi_status == SAM_STAT_GOOD) {
645 		unsigned char *buf;
646 		u16 bdl;
647 		u32 blocksize;
648 
649 		buf = sg_virt(&cmd->t_data_sg[0]);
650 		if (!buf) {
651 			pr_err("Unable to get buf for scatterlist\n");
652 			goto after_mode_select;
653 		}
654 
655 		if (cdb[0] == MODE_SELECT)
656 			bdl = buf[3];
657 		else
658 			bdl = get_unaligned_be16(&buf[6]);
659 
660 		if (!bdl)
661 			goto after_mode_select;
662 
663 		if (cdb[0] == MODE_SELECT)
664 			blocksize = get_unaligned_be24(&buf[9]);
665 		else
666 			blocksize = get_unaligned_be24(&buf[13]);
667 
668 		sd->sector_size = blocksize;
669 	}
670 after_mode_select:
671 
672 	if (scsi_status == SAM_STAT_CHECK_CONDITION) {
673 		transport_copy_sense_to_cmd(cmd, req_sense);
674 
675 		/*
676 		 * check for TAPE device reads with
677 		 * FM/EOM/ILI set, so that we can get data
678 		 * back despite framework assumption that a
679 		 * check condition means there is no data
680 		 */
681 		if (sd->type == TYPE_TAPE && valid_data &&
682 		    cmd->data_direction == DMA_FROM_DEVICE) {
683 			/*
684 			 * is sense data valid, fixed format,
685 			 * and have FM, EOM, or ILI set?
686 			 */
687 			if (req_sense[0] == 0xf0 &&	/* valid, fixed format */
688 			    req_sense[2] & 0xe0 &&	/* FM, EOM, or ILI */
689 			    (req_sense[2] & 0xf) == 0) { /* key==NO_SENSE */
690 				pr_debug("Tape FM/EOM/ILI status detected. Treat as normal read.\n");
691 				cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
692 			}
693 		}
694 	}
695 }
696 
697 enum {
698 	Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
699 	Opt_scsi_lun_id, Opt_err
700 };
701 
702 static match_table_t tokens = {
703 	{Opt_scsi_host_id, "scsi_host_id=%d"},
704 	{Opt_scsi_channel_id, "scsi_channel_id=%d"},
705 	{Opt_scsi_target_id, "scsi_target_id=%d"},
706 	{Opt_scsi_lun_id, "scsi_lun_id=%d"},
707 	{Opt_err, NULL}
708 };
709 
710 static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
711 		const char *page, ssize_t count)
712 {
713 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
714 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
715 	char *orig, *ptr, *opts;
716 	substring_t args[MAX_OPT_ARGS];
717 	int ret = 0, arg, token;
718 
719 	opts = kstrdup(page, GFP_KERNEL);
720 	if (!opts)
721 		return -ENOMEM;
722 
723 	orig = opts;
724 
725 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
726 		if (!*ptr)
727 			continue;
728 
729 		token = match_token(ptr, tokens, args);
730 		switch (token) {
731 		case Opt_scsi_host_id:
732 			if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
733 				pr_err("PSCSI[%d]: Unable to accept"
734 					" scsi_host_id while phv_mode =="
735 					" PHV_LLD_SCSI_HOST_NO\n",
736 					phv->phv_host_id);
737 				ret = -EINVAL;
738 				goto out;
739 			}
740 			ret = match_int(args, &arg);
741 			if (ret)
742 				goto out;
743 			pdv->pdv_host_id = arg;
744 			pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
745 				" %d\n", phv->phv_host_id, pdv->pdv_host_id);
746 			pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
747 			break;
748 		case Opt_scsi_channel_id:
749 			ret = match_int(args, &arg);
750 			if (ret)
751 				goto out;
752 			pdv->pdv_channel_id = arg;
753 			pr_debug("PSCSI[%d]: Referencing SCSI Channel"
754 				" ID: %d\n",  phv->phv_host_id,
755 				pdv->pdv_channel_id);
756 			pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
757 			break;
758 		case Opt_scsi_target_id:
759 			ret = match_int(args, &arg);
760 			if (ret)
761 				goto out;
762 			pdv->pdv_target_id = arg;
763 			pr_debug("PSCSI[%d]: Referencing SCSI Target"
764 				" ID: %d\n", phv->phv_host_id,
765 				pdv->pdv_target_id);
766 			pdv->pdv_flags |= PDF_HAS_TARGET_ID;
767 			break;
768 		case Opt_scsi_lun_id:
769 			ret = match_int(args, &arg);
770 			if (ret)
771 				goto out;
772 			pdv->pdv_lun_id = arg;
773 			pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
774 				" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
775 			pdv->pdv_flags |= PDF_HAS_LUN_ID;
776 			break;
777 		default:
778 			break;
779 		}
780 	}
781 
782 out:
783 	kfree(orig);
784 	return (!ret) ? count : ret;
785 }
786 
787 static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
788 {
789 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
790 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
791 	struct scsi_device *sd = pdv->pdv_sd;
792 	unsigned char host_id[16];
793 	ssize_t bl;
794 
795 	if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
796 		snprintf(host_id, 16, "%d", pdv->pdv_host_id);
797 	else
798 		snprintf(host_id, 16, "PHBA Mode");
799 
800 	bl = sprintf(b, "SCSI Device Bus Location:"
801 		" Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
802 		pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
803 		host_id);
804 
805 	if (sd) {
806 		bl += sprintf(b + bl, "        Vendor: %."
807 			__stringify(INQUIRY_VENDOR_LEN) "s", sd->vendor);
808 		bl += sprintf(b + bl, " Model: %."
809 			__stringify(INQUIRY_MODEL_LEN) "s", sd->model);
810 		bl += sprintf(b + bl, " Rev: %."
811 			__stringify(INQUIRY_REVISION_LEN) "s\n", sd->rev);
812 	}
813 	return bl;
814 }
815 
816 static void pscsi_bi_endio(struct bio *bio)
817 {
818 	bio_uninit(bio);
819 	kfree(bio);
820 }
821 
822 static sense_reason_t
823 pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
824 		struct request *req)
825 {
826 	struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
827 	struct bio *bio = NULL;
828 	struct page *page;
829 	struct scatterlist *sg;
830 	u32 data_len = cmd->data_length, i, len, bytes, off;
831 	int nr_pages = (cmd->data_length + sgl[0].offset +
832 			PAGE_SIZE - 1) >> PAGE_SHIFT;
833 	int nr_vecs = 0, rc;
834 	int rw = (cmd->data_direction == DMA_TO_DEVICE);
835 
836 	BUG_ON(!cmd->data_length);
837 
838 	pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
839 
840 	for_each_sg(sgl, sg, sgl_nents, i) {
841 		page = sg_page(sg);
842 		off = sg->offset;
843 		len = sg->length;
844 
845 		pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
846 			page, len, off);
847 
848 		/*
849 		 * We only have one page of data in each sg element,
850 		 * we can not cross a page boundary.
851 		 */
852 		if (off + len > PAGE_SIZE)
853 			goto fail;
854 
855 		if (len > 0 && data_len > 0) {
856 			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
857 			bytes = min(bytes, data_len);
858 
859 			if (!bio) {
860 new_bio:
861 				nr_vecs = bio_max_segs(nr_pages);
862 				bio = bio_kmalloc(nr_vecs, GFP_KERNEL);
863 				if (!bio)
864 					goto fail;
865 				bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs,
866 					 rw ? REQ_OP_WRITE : REQ_OP_READ);
867 				bio->bi_end_io = pscsi_bi_endio;
868 
869 				pr_debug("PSCSI: Allocated bio: %p,"
870 					" dir: %s nr_vecs: %d\n", bio,
871 					(rw) ? "rw" : "r", nr_vecs);
872 			}
873 
874 			pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
875 				" bio: %p page: %p len: %d off: %d\n", i, bio,
876 				page, len, off);
877 
878 			rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
879 					bio, page, bytes, off);
880 			pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
881 				bio_segments(bio), nr_vecs);
882 			if (rc != bytes) {
883 				pr_debug("PSCSI: Reached bio->bi_vcnt max:"
884 					" %d i: %d bio: %p, allocating another"
885 					" bio\n", bio->bi_vcnt, i, bio);
886 
887 				rc = blk_rq_append_bio(req, bio);
888 				if (rc) {
889 					pr_err("pSCSI: failed to append bio\n");
890 					goto fail;
891 				}
892 
893 				goto new_bio;
894 			}
895 
896 			data_len -= bytes;
897 		}
898 	}
899 
900 	if (bio) {
901 		rc = blk_rq_append_bio(req, bio);
902 		if (rc) {
903 			pr_err("pSCSI: failed to append bio\n");
904 			goto fail;
905 		}
906 	}
907 
908 	return 0;
909 fail:
910 	if (bio) {
911 		bio_uninit(bio);
912 		kfree(bio);
913 	}
914 	while (req->bio) {
915 		bio = req->bio;
916 		req->bio = bio->bi_next;
917 		bio_uninit(bio);
918 		kfree(bio);
919 	}
920 	req->biotail = NULL;
921 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
922 }
923 
924 static sense_reason_t
925 pscsi_parse_cdb(struct se_cmd *cmd)
926 {
927 	if (cmd->se_cmd_flags & SCF_BIDI)
928 		return TCM_UNSUPPORTED_SCSI_OPCODE;
929 
930 	return passthrough_parse_cdb(cmd, pscsi_execute_cmd);
931 }
932 
933 static sense_reason_t
934 pscsi_execute_cmd(struct se_cmd *cmd)
935 {
936 	struct scatterlist *sgl = cmd->t_data_sg;
937 	u32 sgl_nents = cmd->t_data_nents;
938 	struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
939 	struct scsi_cmnd *scmd;
940 	struct request *req;
941 	sense_reason_t ret;
942 
943 	req = scsi_alloc_request(pdv->pdv_sd->request_queue,
944 			cmd->data_direction == DMA_TO_DEVICE ?
945 			REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
946 	if (IS_ERR(req))
947 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
948 
949 	if (sgl) {
950 		ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
951 		if (ret)
952 			goto fail_put_request;
953 	}
954 
955 	req->end_io = pscsi_req_done;
956 	req->end_io_data = cmd;
957 
958 	scmd = blk_mq_rq_to_pdu(req);
959 	scmd->cmd_len = scsi_command_size(cmd->t_task_cdb);
960 	if (scmd->cmd_len > sizeof(scmd->cmnd)) {
961 		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
962 		goto fail_put_request;
963 	}
964 	memcpy(scmd->cmnd, cmd->t_task_cdb, scmd->cmd_len);
965 
966 	if (pdv->pdv_sd->type == TYPE_DISK ||
967 	    pdv->pdv_sd->type == TYPE_ZBC)
968 		req->timeout = PS_TIMEOUT_DISK;
969 	else
970 		req->timeout = PS_TIMEOUT_OTHER;
971 	scmd->allowed = PS_RETRY;
972 
973 	cmd->priv = scmd->cmnd;
974 
975 	blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG);
976 
977 	return 0;
978 
979 fail_put_request:
980 	blk_mq_free_request(req);
981 	return ret;
982 }
983 
984 /*	pscsi_get_device_type():
985  *
986  *
987  */
988 static u32 pscsi_get_device_type(struct se_device *dev)
989 {
990 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
991 	struct scsi_device *sd = pdv->pdv_sd;
992 
993 	return (sd) ? sd->type : TYPE_NO_LUN;
994 }
995 
996 static sector_t pscsi_get_blocks(struct se_device *dev)
997 {
998 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
999 
1000 	if (pdv->pdv_bdev_file)
1001 		return bdev_nr_sectors(file_bdev(pdv->pdv_bdev_file));
1002 	return 0;
1003 }
1004 
1005 static enum rq_end_io_ret pscsi_req_done(struct request *req,
1006 					 blk_status_t status)
1007 {
1008 	struct se_cmd *cmd = req->end_io_data;
1009 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
1010 	enum sam_status scsi_status = scmd->result & 0xff;
1011 	int valid_data = cmd->data_length - scmd->resid_len;
1012 	u8 *cdb = cmd->priv;
1013 
1014 	if (scsi_status != SAM_STAT_GOOD) {
1015 		pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
1016 			" 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
1017 	}
1018 
1019 	pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer, valid_data);
1020 
1021 	switch (host_byte(scmd->result)) {
1022 	case DID_OK:
1023 		target_complete_cmd_with_length(cmd, scsi_status, valid_data);
1024 		break;
1025 	default:
1026 		pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
1027 			" 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
1028 		target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
1029 		break;
1030 	}
1031 
1032 	blk_mq_free_request(req);
1033 	return RQ_END_IO_NONE;
1034 }
1035 
1036 static const struct target_backend_ops pscsi_ops = {
1037 	.name			= "pscsi",
1038 	.owner			= THIS_MODULE,
1039 	.transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH |
1040 				   TRANSPORT_FLAG_PASSTHROUGH_ALUA |
1041 				   TRANSPORT_FLAG_PASSTHROUGH_PGR,
1042 	.attach_hba		= pscsi_attach_hba,
1043 	.detach_hba		= pscsi_detach_hba,
1044 	.pmode_enable_hba	= pscsi_pmode_enable_hba,
1045 	.alloc_device		= pscsi_alloc_device,
1046 	.configure_device	= pscsi_configure_device,
1047 	.destroy_device		= pscsi_destroy_device,
1048 	.free_device		= pscsi_free_device,
1049 	.parse_cdb		= pscsi_parse_cdb,
1050 	.set_configfs_dev_params = pscsi_set_configfs_dev_params,
1051 	.show_configfs_dev_params = pscsi_show_configfs_dev_params,
1052 	.get_device_type	= pscsi_get_device_type,
1053 	.get_blocks		= pscsi_get_blocks,
1054 	.tb_dev_attrib_attrs	= passthrough_attrib_attrs,
1055 };
1056 
1057 static int __init pscsi_module_init(void)
1058 {
1059 	return transport_backend_register(&pscsi_ops);
1060 }
1061 
1062 static void __exit pscsi_module_exit(void)
1063 {
1064 	target_backend_unregister(&pscsi_ops);
1065 }
1066 
1067 MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
1068 MODULE_AUTHOR("nab@Linux-iSCSI.org");
1069 MODULE_LICENSE("GPL");
1070 
1071 module_init(pscsi_module_init);
1072 module_exit(pscsi_module_exit);
1073