Lines Matching +full:assoc +full:- +full:select

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-scsi.c - helper library for ATA
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
9 * as Documentation/driver-api/libata.rst
12 * - http://www.t10.org/
13 * - http://www.t13.org/
38 #include "libata-transport.h"
66 RW_RECOVERY_MPAGE_LEN - 2,
76 CACHE_MPAGE_LEN - 2,
85 CONTROL_MPAGE_LEN - 2,
87 0, /* [QAM+QERR may be 1, see 05-359r1] */
89 0, 30 /* extended self test time, see 05-359r1 */
103 ap = ata_shost_to_port(sdev->host); in ata_scsi_park_show()
105 spin_lock_irq(ap->lock); in ata_scsi_park_show()
108 rc = -ENODEV; in ata_scsi_park_show()
111 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { in ata_scsi_park_show()
112 rc = -EOPNOTSUPP; in ata_scsi_park_show()
116 link = dev->link; in ata_scsi_park_show()
118 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS && in ata_scsi_park_show()
119 link->eh_context.unloaded_mask & (1 << dev->devno) && in ata_scsi_park_show()
120 time_after(dev->unpark_deadline, now)) in ata_scsi_park_show()
121 msecs = jiffies_to_msecs(dev->unpark_deadline - now); in ata_scsi_park_show()
126 spin_unlock_irq(ap->lock); in ata_scsi_park_show()
145 if (input < -2) in ata_scsi_park_store()
146 return -EINVAL; in ata_scsi_park_store()
148 rc = -EOVERFLOW; in ata_scsi_park_store()
152 ap = ata_shost_to_port(sdev->host); in ata_scsi_park_store()
154 spin_lock_irqsave(ap->lock, flags); in ata_scsi_park_store()
157 rc = -ENODEV; in ata_scsi_park_store()
160 if (dev->class != ATA_DEV_ATA && in ata_scsi_park_store()
161 dev->class != ATA_DEV_ZAC) { in ata_scsi_park_store()
162 rc = -EOPNOTSUPP; in ata_scsi_park_store()
167 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { in ata_scsi_park_store()
168 rc = -EOPNOTSUPP; in ata_scsi_park_store()
172 dev->unpark_deadline = ata_deadline(jiffies, input); in ata_scsi_park_store()
173 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK; in ata_scsi_park_store()
175 complete(&ap->park_req_pending); in ata_scsi_park_store()
178 case -1: in ata_scsi_park_store()
179 dev->flags &= ~ATA_DFLAG_NO_UNLOAD; in ata_scsi_park_store()
181 case -2: in ata_scsi_park_store()
182 dev->flags |= ATA_DFLAG_NO_UNLOAD; in ata_scsi_park_store()
187 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_park_store()
214 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); in ata_scsi_set_sense()
229 scsi_set_sense_information(cmd->sense_buffer, in ata_scsi_set_sense_information()
234 * ata_scsi_set_passthru_sense_fields - Set ATA fields in sense buffer
235 * @qc: ATA PASS-THROUGH command.
245 struct ata_device *dev = qc->dev; in ata_scsi_set_passthru_sense_fields()
246 struct scsi_cmnd *cmd = qc->scsicmd; in ata_scsi_set_passthru_sense_fields()
247 struct ata_taskfile *tf = &qc->result_tf; in ata_scsi_set_passthru_sense_fields()
248 unsigned char *sb = cmd->sense_buffer; in ata_scsi_set_passthru_sense_fields()
250 if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { in ata_scsi_set_passthru_sense_fields()
275 desc[3] = tf->error; in ata_scsi_set_passthru_sense_fields()
276 desc[5] = tf->nsect; in ata_scsi_set_passthru_sense_fields()
277 desc[7] = tf->lbal; in ata_scsi_set_passthru_sense_fields()
278 desc[9] = tf->lbam; in ata_scsi_set_passthru_sense_fields()
279 desc[11] = tf->lbah; in ata_scsi_set_passthru_sense_fields()
280 desc[12] = tf->device; in ata_scsi_set_passthru_sense_fields()
281 desc[13] = tf->status; in ata_scsi_set_passthru_sense_fields()
287 if (tf->flags & ATA_TFLAG_LBA48) { in ata_scsi_set_passthru_sense_fields()
289 desc[4] = tf->hob_nsect; in ata_scsi_set_passthru_sense_fields()
290 desc[6] = tf->hob_lbal; in ata_scsi_set_passthru_sense_fields()
291 desc[8] = tf->hob_lbam; in ata_scsi_set_passthru_sense_fields()
292 desc[10] = tf->hob_lbah; in ata_scsi_set_passthru_sense_fields()
297 sb[3] = tf->error; in ata_scsi_set_passthru_sense_fields()
298 sb[4] = tf->status; in ata_scsi_set_passthru_sense_fields()
299 sb[5] = tf->device; in ata_scsi_set_passthru_sense_fields()
300 sb[6] = tf->nsect; in ata_scsi_set_passthru_sense_fields()
301 if (tf->flags & ATA_TFLAG_LBA48) { in ata_scsi_set_passthru_sense_fields()
303 if (tf->hob_nsect) in ata_scsi_set_passthru_sense_fields()
305 if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah) in ata_scsi_set_passthru_sense_fields()
308 sb[9] = tf->lbal; in ata_scsi_set_passthru_sense_fields()
309 sb[10] = tf->lbam; in ata_scsi_set_passthru_sense_fields()
310 sb[11] = tf->lbah; in ata_scsi_set_passthru_sense_fields()
319 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in ata_scsi_set_invalid_field()
328 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in ata_scsi_set_invalid_parameter()
348 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
378 * ata_scsi_unlock_native_capacity - unlock native capacity
389 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_unlock_native_capacity()
393 spin_lock_irqsave(ap->lock, flags); in ata_scsi_unlock_native_capacity()
396 if (dev && dev->n_sectors < dev->n_native_sectors) { in ata_scsi_unlock_native_capacity()
397 dev->flags |= ATA_DFLAG_UNLOCK_HPA; in ata_scsi_unlock_native_capacity()
398 dev->link->eh_info.action |= ATA_EH_RESET; in ata_scsi_unlock_native_capacity()
402 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_unlock_native_capacity()
408 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
427 return -ENOMSG; in ata_get_identity()
429 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16))) in ata_get_identity()
430 return -EFAULT; in ata_get_identity()
432 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN); in ata_get_identity()
434 return -EFAULT; in ata_get_identity()
436 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN); in ata_get_identity()
438 return -EFAULT; in ata_get_identity()
440 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN); in ata_get_identity()
442 return -EFAULT; in ata_get_identity()
448 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
474 return -EINVAL; in ata_cmd_ioctl()
477 return -EFAULT; in ata_cmd_ioctl()
486 rc = -ENOMEM; in ata_cmd_ioctl()
490 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ in ata_cmd_ioctl()
494 scsi_cmd[1] = (3 << 1); /* Non-data */ in ata_cmd_ioctl()
501 if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */ in ata_cmd_ioctl()
522 /* If we set cc then ATA pass-through will cause a in ata_cmd_ioctl()
537 rc = -EFAULT; in ata_cmd_ioctl()
543 rc = -EIO; in ata_cmd_ioctl()
549 rc = -EFAULT; in ata_cmd_ioctl()
556 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
581 return -EINVAL; in ata_task_ioctl()
584 return -EFAULT; in ata_task_ioctl()
589 scsi_cmd[1] = (3 << 1); /* Non-data */ in ata_task_ioctl()
610 /* If we set cc then ATA pass-through will cause a in ata_task_ioctl()
627 args[6] = desc[12]; /* select */ in ata_task_ioctl()
629 rc = -EFAULT; in ata_task_ioctl()
634 rc = -EIO; in ata_task_ioctl()
644 if (ap->flags & ATA_FLAG_PIO_DMA) in ata_ioc32()
646 if (ap->pflags & ATA_PFLAG_PIO32) in ata_ioc32()
659 int rc = -EINVAL; in ata_sas_scsi_ioctl()
664 spin_lock_irqsave(ap->lock, flags); in ata_sas_scsi_ioctl()
666 spin_unlock_irqrestore(ap->lock, flags); in ata_sas_scsi_ioctl()
676 spin_lock_irqsave(ap->lock, flags); in ata_sas_scsi_ioctl()
677 if (ap->pflags & ATA_PFLAG_PIO32CHANGE) { in ata_sas_scsi_ioctl()
679 ap->pflags |= ATA_PFLAG_PIO32; in ata_sas_scsi_ioctl()
681 ap->pflags &= ~ATA_PFLAG_PIO32; in ata_sas_scsi_ioctl()
684 rc = -EINVAL; in ata_sas_scsi_ioctl()
686 spin_unlock_irqrestore(ap->lock, flags); in ata_sas_scsi_ioctl()
694 return -EACCES; in ata_sas_scsi_ioctl()
699 return -EACCES; in ata_sas_scsi_ioctl()
703 rc = -ENOTTY; in ata_sas_scsi_ioctl()
714 return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host), in ata_scsi_ioctl()
720 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
728 * If a command was available, fill in the SCSI-specific
741 struct ata_port *ap = dev->link->ap; in ata_scsi_qc_new()
748 if (ap->flags & ATA_FLAG_SAS_HOST) { in ata_scsi_qc_new()
751 * unique per-device budget token as a tag. in ata_scsi_qc_new()
753 if (WARN_ON_ONCE(cmd->budget_token >= ATA_MAX_QUEUE)) in ata_scsi_qc_new()
755 tag = cmd->budget_token; in ata_scsi_qc_new()
757 tag = scsi_cmd_to_rq(cmd)->tag; in ata_scsi_qc_new()
761 qc->tag = qc->hw_tag = tag; in ata_scsi_qc_new()
762 qc->ap = ap; in ata_scsi_qc_new()
763 qc->dev = dev; in ata_scsi_qc_new()
767 qc->scsicmd = cmd; in ata_scsi_qc_new()
768 qc->scsidone = scsi_done; in ata_scsi_qc_new()
770 qc->sg = scsi_sglist(cmd); in ata_scsi_qc_new()
771 qc->n_elem = scsi_sg_count(cmd); in ata_scsi_qc_new()
773 if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET) in ata_scsi_qc_new()
774 qc->flags |= ATA_QCFLAG_QUIET; in ata_scsi_qc_new()
787 struct scsi_cmnd *scmd = qc->scsicmd; in ata_qc_set_pc_nbytes()
789 qc->extrabytes = scmd->extra_len; in ata_qc_set_pc_nbytes()
790 qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes; in ata_qc_set_pc_nbytes()
794 * ata_to_sense_error - convert ATA error to SCSI error
836 /* TRK0 - Track 0 not found */ in ata_to_sense_error()
843 /* SRV/IDNF - ID not found */ in ata_to_sense_error()
846 /* MC - Media Changed */ in ata_to_sense_error()
849 /* ECC - Uncorrectable ECC error */ in ata_to_sense_error()
852 /* BBD - block marked bad */ in ata_to_sense_error()
917 * ata_gen_passthru_sense - Generate check condition sense block.
925 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
933 struct ata_device *dev = qc->dev; in ata_gen_passthru_sense()
934 struct scsi_cmnd *cmd = qc->scsicmd; in ata_gen_passthru_sense()
935 struct ata_taskfile *tf = &qc->result_tf; in ata_gen_passthru_sense()
938 if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { in ata_gen_passthru_sense()
948 if (qc->err_mask || in ata_gen_passthru_sense()
949 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { in ata_gen_passthru_sense()
950 ata_to_sense_error(tf->status, tf->error, in ata_gen_passthru_sense()
952 ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); in ata_gen_passthru_sense()
955 * ATA PASS-THROUGH INFORMATION AVAILABLE in ata_gen_passthru_sense()
971 * ata_gen_ata_sense - generate a SCSI fixed sense block
982 struct ata_device *dev = qc->dev; in ata_gen_ata_sense()
983 struct scsi_cmnd *cmd = qc->scsicmd; in ata_gen_ata_sense()
984 struct ata_taskfile *tf = &qc->result_tf; in ata_gen_ata_sense()
985 unsigned char *sb = cmd->sense_buffer; in ata_gen_ata_sense()
996 if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { in ata_gen_ata_sense()
1005 if (qc->err_mask || in ata_gen_ata_sense()
1006 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { in ata_gen_ata_sense()
1007 ata_to_sense_error(tf->status, tf->error, in ata_gen_ata_sense()
1013 tf->status, qc->err_mask); in ata_gen_ata_sense()
1018 block = ata_tf_read_block(&qc->result_tf, dev); in ata_gen_ata_sense()
1027 sdev->use_10_for_rw = 1; in ata_scsi_sdev_config()
1028 sdev->use_10_for_ms = 1; in ata_scsi_sdev_config()
1029 sdev->no_write_same = 1; in ata_scsi_sdev_config()
1031 /* Schedule policy is determined by ->qc_defer() callback and in ata_scsi_sdev_config()
1036 sdev->max_device_blocked = 1; in ata_scsi_sdev_config()
1040 * ata_scsi_dma_need_drain - Check whether data transfer may overflow
1058 return atapi_cmd_type(scmd->cmnd[0]) == ATAPI_MISC; in ata_scsi_dma_need_drain()
1067 if (!ata_id_has_unload(dev->id)) in ata_scsi_dev_config()
1068 dev->flags |= ATA_DFLAG_NO_UNLOAD; in ata_scsi_dev_config()
1071 dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors); in ata_scsi_dev_config()
1072 lim->max_hw_sectors = dev->max_sectors; in ata_scsi_dev_config()
1074 if (dev->class == ATA_DEV_ATAPI) { in ata_scsi_dev_config()
1075 sdev->sector_size = ATA_SECT_SIZE; in ata_scsi_dev_config()
1078 lim->dma_pad_mask = ATA_DMA_PAD_SZ - 1; in ata_scsi_dev_config()
1081 lim->max_segments--; in ata_scsi_dev_config()
1083 sdev->dma_drain_len = ATAPI_MAX_DRAIN; in ata_scsi_dev_config()
1084 sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO); in ata_scsi_dev_config()
1085 if (!sdev->dma_drain_buf) { in ata_scsi_dev_config()
1087 return -ENOMEM; in ata_scsi_dev_config()
1090 sdev->sector_size = ata_id_logical_sector_size(dev->id); in ata_scsi_dev_config()
1101 sdev->manage_runtime_start_stop = 1; in ata_scsi_dev_config()
1102 sdev->manage_shutdown = 1; in ata_scsi_dev_config()
1103 sdev->force_runtime_start_on_system_start = 1; in ata_scsi_dev_config()
1113 if (sdev->sector_size > PAGE_SIZE) in ata_scsi_dev_config()
1116 sdev->sector_size); in ata_scsi_dev_config()
1118 lim->dma_alignment = sdev->sector_size - 1; in ata_scsi_dev_config()
1120 if (dev->flags & ATA_DFLAG_AN) in ata_scsi_dev_config()
1121 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); in ata_scsi_dev_config()
1124 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); in ata_scsi_dev_config()
1128 if (dev->flags & ATA_DFLAG_TRUSTED) in ata_scsi_dev_config()
1129 sdev->security_supported = 1; in ata_scsi_dev_config()
1131 dev->sdev = sdev; in ata_scsi_dev_config()
1136 * ata_scsi_sdev_init - Early setup of SCSI device
1148 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_sdev_init()
1158 link = device_link_add(&sdev->sdev_gendev, &ap->tdev, in ata_scsi_sdev_init()
1163 dev_name(&sdev->sdev_gendev)); in ata_scsi_sdev_init()
1164 return -ENODEV; in ata_scsi_sdev_init()
1172 * ata_scsi_sdev_configure - Set SCSI device attributes
1178 * SCSI mid-layer behaviors.
1186 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_sdev_configure()
1197 * ata_scsi_sdev_destroy - SCSI device is about to be destroyed
1202 * dev->sdev, this function doesn't have to do anything.
1203 * Otherwise, SCSI layer initiated warm-unplug is in progress.
1204 * Clear dev->sdev, schedule the device for ATA detach and invoke
1212 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_sdev_destroy()
1216 device_link_remove(&sdev->sdev_gendev, &ap->tdev); in ata_scsi_sdev_destroy()
1218 spin_lock_irqsave(ap->lock, flags); in ata_scsi_sdev_destroy()
1220 if (dev && dev->sdev) { in ata_scsi_sdev_destroy()
1222 dev->sdev = NULL; in ata_scsi_sdev_destroy()
1223 dev->flags |= ATA_DFLAG_DETACH; in ata_scsi_sdev_destroy()
1226 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_sdev_destroy()
1228 kfree(sdev->dma_drain_buf); in ata_scsi_sdev_destroy()
1233 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
1245 * Zero on success, non-zero on error.
1249 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_start_stop_xlat()
1250 const u8 *cdb = scmd->cmnd; in ata_scsi_start_stop_xlat()
1254 if (scmd->cmd_len < 5) { in ata_scsi_start_stop_xlat()
1273 /* Ignore IMMED bit (cdb[1] & 0x1), violates sat-r05 */ in ata_scsi_start_stop_xlat()
1274 if (!ata_dev_power_init_tf(qc->dev, &qc->tf, cdb[4] & 0x1)) { in ata_scsi_start_stop_xlat()
1275 ata_scsi_set_sense(qc->dev, scmd, ABORTED_COMMAND, 0, 0); in ata_scsi_start_stop_xlat()
1283 * MODE SELECT to be implemented. in ata_scsi_start_stop_xlat()
1289 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_start_stop_xlat()
1294 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
1304 * Zero on success, non-zero on error.
1308 struct ata_taskfile *tf = &qc->tf; in ata_scsi_flush_xlat()
1310 tf->flags |= ATA_TFLAG_DEVICE; in ata_scsi_flush_xlat()
1311 tf->protocol = ATA_PROT_NODATA; in ata_scsi_flush_xlat()
1313 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT) in ata_scsi_flush_xlat()
1314 tf->command = ATA_CMD_FLUSH_EXT; in ata_scsi_flush_xlat()
1316 tf->command = ATA_CMD_FLUSH; in ata_scsi_flush_xlat()
1319 qc->flags |= ATA_QCFLAG_IO; in ata_scsi_flush_xlat()
1325 * scsi_6_lba_len - Get LBA and transfer length
1328 * Calculate LBA and transfer length for 6-byte commands.
1341 * scsi_10_lba_len - Get LBA and transfer length
1344 * Calculate LBA and transfer length for 10-byte commands.
1357 * scsi_16_lba_len - Get LBA and transfer length
1360 * Calculate LBA and transfer length for 16-byte commands.
1373 * scsi_dld - Get duration limit descriptor index
1385 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
1394 * Zero on success, non-zero on error.
1398 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_verify_xlat()
1399 struct ata_taskfile *tf = &qc->tf; in ata_scsi_verify_xlat()
1400 struct ata_device *dev = qc->dev; in ata_scsi_verify_xlat()
1401 u64 dev_sectors = qc->dev->n_sectors; in ata_scsi_verify_xlat()
1402 const u8 *cdb = scmd->cmnd; in ata_scsi_verify_xlat()
1407 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_scsi_verify_xlat()
1408 tf->protocol = ATA_PROT_NODATA; in ata_scsi_verify_xlat()
1412 if (scmd->cmd_len < 10) { in ata_scsi_verify_xlat()
1419 if (scmd->cmd_len < 16) { in ata_scsi_verify_xlat()
1437 if (dev->flags & ATA_DFLAG_LBA) { in ata_scsi_verify_xlat()
1438 tf->flags |= ATA_TFLAG_LBA; in ata_scsi_verify_xlat()
1442 tf->command = ATA_CMD_VERIFY; in ata_scsi_verify_xlat()
1443 tf->device |= (block >> 24) & 0xf; in ata_scsi_verify_xlat()
1445 if (!(dev->flags & ATA_DFLAG_LBA48)) in ata_scsi_verify_xlat()
1449 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_verify_xlat()
1450 tf->command = ATA_CMD_VERIFY_EXT; in ata_scsi_verify_xlat()
1452 tf->hob_nsect = (n_block >> 8) & 0xff; in ata_scsi_verify_xlat()
1454 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_verify_xlat()
1455 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_verify_xlat()
1456 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_verify_xlat()
1461 tf->nsect = n_block & 0xff; in ata_scsi_verify_xlat()
1463 tf->lbah = (block >> 16) & 0xff; in ata_scsi_verify_xlat()
1464 tf->lbam = (block >> 8) & 0xff; in ata_scsi_verify_xlat()
1465 tf->lbal = block & 0xff; in ata_scsi_verify_xlat()
1467 tf->device |= ATA_LBA; in ata_scsi_verify_xlat()
1476 track = (u32)block / dev->sectors; in ata_scsi_verify_xlat()
1477 cyl = track / dev->heads; in ata_scsi_verify_xlat()
1478 head = track % dev->heads; in ata_scsi_verify_xlat()
1479 sect = (u32)block % dev->sectors + 1; in ata_scsi_verify_xlat()
1482 Cylinder: 0-65535 in ata_scsi_verify_xlat()
1483 Head: 0-15 in ata_scsi_verify_xlat()
1484 Sector: 1-255*/ in ata_scsi_verify_xlat()
1488 tf->command = ATA_CMD_VERIFY; in ata_scsi_verify_xlat()
1489 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ in ata_scsi_verify_xlat()
1490 tf->lbal = sect; in ata_scsi_verify_xlat()
1491 tf->lbam = cyl; in ata_scsi_verify_xlat()
1492 tf->lbah = cyl >> 8; in ata_scsi_verify_xlat()
1493 tf->device |= head; in ata_scsi_verify_xlat()
1499 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_verify_xlat()
1503 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); in ata_scsi_verify_xlat()
1508 scmd->result = SAM_STAT_GOOD; in ata_scsi_verify_xlat()
1520 req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size; in ata_check_nblocks()
1528 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1543 * Zero on success, non-zero on error.
1547 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_rw_xlat()
1548 const u8 *cdb = scmd->cmnd; in ata_scsi_rw_xlat()
1570 if (unlikely(scmd->cmd_len < 10)) { in ata_scsi_rw_xlat()
1582 if (unlikely(scmd->cmd_len < 6)) { in ata_scsi_rw_xlat()
1588 /* for 6-byte r/w commands, transfer length 0 in ata_scsi_rw_xlat()
1598 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_rw_xlat()
1616 /* For 10-byte and 16-byte SCSI R/W commands, transfer in ata_scsi_rw_xlat()
1625 qc->flags |= ATA_QCFLAG_IO; in ata_scsi_rw_xlat()
1626 qc->nbytes = n_block * scmd->device->sector_size; in ata_scsi_rw_xlat()
1632 if (rc == -ERANGE) in ata_scsi_rw_xlat()
1634 /* treat all other errors as -EINVAL, fall through */ in ata_scsi_rw_xlat()
1636 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_rw_xlat()
1640 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); in ata_scsi_rw_xlat()
1645 scmd->result = SAM_STAT_GOOD; in ata_scsi_rw_xlat()
1651 struct scsi_cmnd *cmd = qc->scsicmd; in ata_qc_done()
1652 void (*done)(struct scsi_cmnd *) = qc->scsidone; in ata_qc_done()
1660 struct scsi_cmnd *cmd = qc->scsicmd; in ata_scsi_qc_complete()
1661 u8 *cdb = cmd->cmnd; in ata_scsi_qc_complete()
1662 bool have_sense = qc->flags & ATA_QCFLAG_SENSE_VALID; in ata_scsi_qc_complete()
1665 bool is_error = qc->err_mask != 0; in ata_scsi_qc_complete()
1674 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE in ata_scsi_qc_complete()
1681 set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); in ata_scsi_qc_complete()
1690 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1695 * Our ->queuecommand() function has decided that the SCSI
1704 * then cmd->result (and possibly cmd->sense_buffer) are assumed
1718 struct ata_port *ap = dev->link->ap; in ata_scsi_translate()
1726 /* data is present; dma-map it */ in ata_scsi_translate()
1727 if (cmd->sc_data_direction == DMA_FROM_DEVICE || in ata_scsi_translate()
1728 cmd->sc_data_direction == DMA_TO_DEVICE) { in ata_scsi_translate()
1736 qc->dma_dir = cmd->sc_data_direction; in ata_scsi_translate()
1739 qc->complete_fn = ata_scsi_qc_complete; in ata_scsi_translate()
1744 if (ap->ops->qc_defer) { in ata_scsi_translate()
1745 if ((rc = ap->ops->qc_defer(qc))) in ata_scsi_translate()
1749 /* select device, send command to hardware */ in ata_scsi_translate()
1761 cmd->result = (DID_ERROR << 16); in ata_scsi_translate()
1775 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
1784 * completed successfully (0), or not (in which case cmd->result
1804 cmd->result = SAM_STAT_GOOD; in ata_scsi_rbuf_fill()
1806 scsi_set_resid(cmd, scsi_bufflen(cmd) - len); in ata_scsi_rbuf_fill()
1813 * ata_scsiop_inq_std - Simulate standard INQUIRY command
1819 * with non-VPD INQUIRY command output.
1829 0x60, /* SAM-3 (no version claimed) */ in ata_scsiop_inq_std()
1832 0x20, /* SBC-2 (no version claimed) */ in ata_scsiop_inq_std()
1835 0x00 /* SPC-3 (no version claimed) */ in ata_scsiop_inq_std()
1839 0xA0, /* SAM-5 (no version claimed) */ in ata_scsiop_inq_std()
1842 0x00, /* SBC-4 (no version claimed) */ in ata_scsiop_inq_std()
1845 0xC0, /* SPC-5 (no version claimed) */ in ata_scsiop_inq_std()
1854 0x5, /* claim SPC-3 version compatibility */ in ata_scsiop_inq_std()
1856 95 - 4, in ata_scsiop_inq_std()
1864 * device bit (obsolete since ATA-8 ACS) is set. in ata_scsiop_inq_std()
1866 if (ata_id_removable(dev->id)) in ata_scsiop_inq_std()
1869 if (dev->class == ATA_DEV_ZAC) { in ata_scsiop_inq_std()
1871 hdr[2] = 0x7; /* claim SPC-5 version compatibility */ in ata_scsiop_inq_std()
1874 if (dev->flags & ATA_DFLAG_CDL) in ata_scsiop_inq_std()
1875 hdr[2] = 0xd; /* claim SPC-6 version compatibility */ in ata_scsiop_inq_std()
1879 ata_id_string(dev->id, &rbuf[16], ATA_ID_PROD, 16); in ata_scsiop_inq_std()
1882 ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV + 2, 4); in ata_scsiop_inq_std()
1884 ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV, 4); in ata_scsiop_inq_std()
1889 if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC) in ata_scsiop_inq_std()
1902 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
1930 !(dev->flags & ATA_DFLAG_ZAC)) in ata_scsiop_inq_00()
1941 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
1962 ata_id_string(dev->id, (unsigned char *) &rbuf[4], in ata_scsiop_inq_80()
1969 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
1975 * - vendor specific ASCII containing the ATA serial number
1976 * - SAT defined "t10 vendor id based" containing ASCII vendor
1991 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ in ata_scsiop_inq_83()
1995 ata_id_string(dev->id, (unsigned char *) rbuf + num, in ata_scsiop_inq_83()
2000 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ in ata_scsiop_inq_83()
2007 ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_PROD, in ata_scsiop_inq_83()
2010 ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_SERNO, in ata_scsiop_inq_83()
2014 if (ata_id_has_wwn(dev->id)) { in ata_scsiop_inq_83()
2016 /* piv=0, assoc=lu, code_set=binary, designator=NAA */ in ata_scsiop_inq_83()
2021 ata_id_string(dev->id, (unsigned char *) rbuf + num, in ata_scsiop_inq_83()
2025 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ in ata_scsiop_inq_83()
2031 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
2036 * Yields SAT-specified ATA VPD page.
2063 memcpy(&rbuf[60], &dev->id[0], 512); in ata_scsiop_inq_89()
2069 * ata_scsiop_inq_b0 - Simulate INQUIRY VPD page B0, Block Limits
2094 min_io_sectors = 1 << ata_id_log2_per_physical_sector(dev->id); in ata_scsiop_inq_b0()
2103 * that we support some form of unmap - in thise case via WRITE SAME in ata_scsiop_inq_b0()
2106 if (ata_id_has_trim(dev->id)) { in ata_scsiop_inq_b0()
2109 if (dev->quirks & ATA_QUIRK_MAX_TRIM_128M) in ata_scsiop_inq_b0()
2110 max_blocks = 128 << (20 - SECTOR_SHIFT); in ata_scsiop_inq_b0()
2120 * ata_scsiop_inq_b1 - Simulate INQUIRY VPD page B1, Block Device
2134 int form_factor = ata_id_form_factor(dev->id); in ata_scsiop_inq_b1()
2135 int media_rotation_rate = ata_id_rotation_rate(dev->id); in ata_scsiop_inq_b1()
2136 u8 zoned = ata_id_zoned_cap(dev->id); in ata_scsiop_inq_b1()
2150 * ata_scsiop_inq_b2 - Simulate INQUIRY VPD page B2, Logical Block
2164 /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */ in ata_scsiop_inq_b2()
2173 * ata_scsiop_inq_b6 - Simulate INQUIRY VPD page B6, Zoned Block Device
2187 if (!(dev->flags & ATA_DFLAG_ZAC)) { in ata_scsiop_inq_b6()
2193 * zbc-r05 SCSI Zoned Block device characteristics VPD page in ata_scsiop_inq_b6()
2199 * URSWRZ bit is only meaningful for host-managed ZAC drives in ata_scsiop_inq_b6()
2201 if (dev->zac_zoned_cap & 1) in ata_scsiop_inq_b6()
2203 put_unaligned_be32(dev->zac_zones_optimal_open, &rbuf[8]); in ata_scsiop_inq_b6()
2204 put_unaligned_be32(dev->zac_zones_optimal_nonseq, &rbuf[12]); in ata_scsiop_inq_b6()
2205 put_unaligned_be32(dev->zac_zones_max_open, &rbuf[16]); in ata_scsiop_inq_b6()
2211 * ata_scsiop_inq_b9 - Simulate INQUIRY VPD page B9, Concurrent Positioning
2225 struct ata_cpr_log *cpr_log = dev->cpr_log; in ata_scsiop_inq_b9()
2234 /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */ in ata_scsiop_inq_b9()
2236 put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]); in ata_scsiop_inq_b9()
2238 for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) { in ata_scsiop_inq_b9()
2239 desc[0] = cpr_log->cpr[i].num; in ata_scsiop_inq_b9()
2240 desc[1] = cpr_log->cpr[i].num_storage_elements; in ata_scsiop_inq_b9()
2241 put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]); in ata_scsiop_inq_b9()
2242 put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]); in ata_scsiop_inq_b9()
2249 * ata_scsiop_inquiry - Simulate INQUIRY command
2262 const u8 *scsicmd = cmd->cmnd; in ata_scsiop_inquiry()
2300 * modecpy - Prepare response for MODE SENSE
2316 memset(dest + 2, 0, n - 2); in modecpy()
2323 * ata_msense_caching - Simulate MODE SENSE caching info page
2348 * Simulate MODE SENSE control mode page, sub-page 0.
2359 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); in ata_msense_control_spg0()
2370 * using the t2cdlunits 0xa (10ms). Since the SCSI duration limits are 2-bytes
2381 * Simulate MODE SENSE control mode page, sub-pages 07h and 08h
2391 if (!(dev->flags & ATA_DFLAG_CDL) || !dev->cdl) in ata_msense_control_spgt2()
2394 cdl = dev->cdl->desc_log_buf; in ata_msense_control_spgt2()
2403 put_unaligned_be16(CDL_T2_SUB_MPAGE_LEN - 4, &buf[2]); in ata_msense_control_spgt2()
2440 * Simulate MODE SENSE control mode page, sub-page f2h
2454 put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]); in ata_msense_control_ata_feature()
2456 if (dev->flags & ATA_DFLAG_CDL) in ata_msense_control_ata_feature()
2465 * ata_msense_control - Simulate MODE SENSE control mode page
2468 * @spg: sub-page code
2501 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
2518 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
2533 u8 *scsicmd = cmd->cmnd, *p = rbuf; in ata_scsiop_mode_sense()
2573 * Supported subpages: all subpages and sub-pages 07h, 08h and f2h of in ata_scsiop_mode_sense()
2583 if (dev->flags & ATA_DFLAG_CDL && pg == CONTROL_MPAGE) in ata_scsiop_mode_sense()
2598 p += ata_msense_caching(dev->id, p, page_control == 1); in ata_scsiop_mode_sense()
2607 p += ata_msense_caching(dev->id, p, page_control == 1); in ata_scsiop_mode_sense()
2616 if (dev->flags & ATA_DFLAG_FUA) in ata_scsiop_mode_sense()
2620 rbuf[0] = p - rbuf - 1; in ata_scsiop_mode_sense()
2630 put_unaligned_be16(p - rbuf - 2, &rbuf[0]); in ata_scsiop_mode_sense()
2650 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2663 u8 *scsicmd = cmd->cmnd; in ata_scsiop_read_cap()
2664 u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ in ata_scsiop_read_cap()
2669 sector_size = ata_id_logical_sector_size(dev->id); in ata_scsiop_read_cap()
2670 log2_per_phys = ata_id_log2_per_physical_sector(dev->id); in ata_scsiop_read_cap()
2671 lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys); in ata_scsiop_read_cap()
2677 /* sector count, 32-bit */ in ata_scsiop_read_cap()
2702 /* sector count, 64-bit */ in ata_scsiop_read_cap()
2718 if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC) in ata_scsiop_read_cap()
2724 if (ata_id_has_trim(dev->id) && !(dev->quirks & ATA_QUIRK_NOTRIM)) { in ata_scsiop_read_cap()
2727 if (ata_id_has_zero_after_trim(dev->id) && in ata_scsiop_read_cap()
2728 dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) { in ata_scsiop_read_cap()
2738 * ata_scsiop_report_luns - Simulate REPORT LUNS command
2760 * 1) Fake MMC-5 version, to indicate to the Linux scsi midlayer this is a
2778 struct scsi_cmnd *cmd = qc->scsicmd; in atapi_qc_complete()
2779 unsigned int err_mask = qc->err_mask; in atapi_qc_complete()
2782 if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) { in atapi_qc_complete()
2784 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) in atapi_qc_complete()
2787 /* SCSI EH automatically locks door if sdev->locked is in atapi_qc_complete()
2790 * creates a loop - SCSI EH issues door lock which in atapi_qc_complete()
2794 * If door lock fails, always clear sdev->locked to in atapi_qc_complete()
2798 * sure qc->dev->sdev isn't NULL before dereferencing. in atapi_qc_complete()
2800 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) in atapi_qc_complete()
2801 qc->dev->sdev->locked = 0; in atapi_qc_complete()
2803 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; in atapi_qc_complete()
2809 if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0) in atapi_qc_complete()
2811 cmd->result = SAM_STAT_GOOD; in atapi_qc_complete()
2816 * atapi_xlat - Initialize PACKET taskfile
2823 * Zero on success, non-zero on failure.
2827 struct scsi_cmnd *scmd = qc->scsicmd; in atapi_xlat()
2828 struct ata_device *dev = qc->dev; in atapi_xlat()
2829 int nodata = (scmd->sc_data_direction == DMA_NONE); in atapi_xlat()
2830 int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO); in atapi_xlat()
2833 memset(qc->cdb, 0, dev->cdb_len); in atapi_xlat()
2834 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); in atapi_xlat()
2836 qc->complete_fn = atapi_qc_complete; in atapi_xlat()
2838 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in atapi_xlat()
2839 if (scmd->sc_data_direction == DMA_TO_DEVICE) { in atapi_xlat()
2840 qc->tf.flags |= ATA_TFLAG_WRITE; in atapi_xlat()
2843 qc->tf.command = ATA_CMD_PACKET; in atapi_xlat()
2884 qc->tf.lbam = (nbytes & 0xFF); in atapi_xlat()
2885 qc->tf.lbah = (nbytes >> 8); in atapi_xlat()
2888 qc->tf.protocol = ATAPI_PROT_NODATA; in atapi_xlat()
2890 qc->tf.protocol = ATAPI_PROT_PIO; in atapi_xlat()
2893 qc->tf.protocol = ATAPI_PROT_DMA; in atapi_xlat()
2894 qc->tf.feature |= ATAPI_PKT_DMA; in atapi_xlat()
2896 if ((dev->flags & ATA_DFLAG_DMADIR) && in atapi_xlat()
2897 (scmd->sc_data_direction != DMA_TO_DEVICE)) in atapi_xlat()
2899 qc->tf.feature |= ATAPI_DMADIR; in atapi_xlat()
2911 * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case), in ata_find_dev()
2919 int link_max_devices = ata_link_max_devices(&ap->link); in ata_find_dev()
2922 return &ap->link.device[0]; in ata_find_dev()
2925 return &ap->link.device[devno]; in ata_find_dev()
2931 * For PMP-attached devices, the device number corresponds to C in ata_find_dev()
2935 if (devno < ap->nr_pmp_links) in ata_find_dev()
2936 return &ap->pmp_link[devno].device[0]; in ata_find_dev()
2948 if (unlikely(scsidev->channel || scsidev->lun)) in __ata_scsi_find_dev()
2950 devno = scsidev->id; in __ata_scsi_find_dev()
2952 if (unlikely(scsidev->id || scsidev->lun)) in __ata_scsi_find_dev()
2954 devno = scsidev->channel; in __ata_scsi_find_dev()
2961 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2988 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
2989 * @byte1: Byte 1 from pass-thru CDB.
2998 case 3: /* Non-data */ in ata_scsi_map_proto()
3002 case 10: /* UDMA Data-in */ in ata_scsi_map_proto()
3003 case 11: /* UDMA Data-Out */ in ata_scsi_map_proto()
3006 case 4: /* PIO Data-in */ in ata_scsi_map_proto()
3007 case 5: /* PIO Data-out */ in ata_scsi_map_proto()
3027 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
3030 * Handles either 12, 16, or 32-byte versions of the CDB.
3033 * Zero on success, non-zero on failure.
3037 struct ata_taskfile *tf = &(qc->tf); in ata_scsi_pass_thru()
3038 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_pass_thru()
3039 struct ata_device *dev = qc->dev; in ata_scsi_pass_thru()
3040 const u8 *cdb = scmd->cmnd; in ata_scsi_pass_thru()
3044 /* 7Fh variable length cmd means a ata pass-thru(32) */ in ata_scsi_pass_thru()
3048 tf->protocol = ata_scsi_map_proto(cdb[1 + cdb_offset]); in ata_scsi_pass_thru()
3049 if (tf->protocol == ATA_PROT_UNKNOWN) { in ata_scsi_pass_thru()
3059 if (scmd->sc_data_direction != DMA_NONE) { in ata_scsi_pass_thru()
3064 if (ata_is_ncq(tf->protocol)) in ata_scsi_pass_thru()
3065 tf->protocol = ATA_PROT_NCQ_NODATA; in ata_scsi_pass_thru()
3069 tf->flags |= ATA_TFLAG_LBA; in ata_scsi_pass_thru()
3078 * 16-byte CDB - may contain extended commands. in ata_scsi_pass_thru()
3083 tf->hob_feature = cdb[3]; in ata_scsi_pass_thru()
3084 tf->hob_nsect = cdb[5]; in ata_scsi_pass_thru()
3085 tf->hob_lbal = cdb[7]; in ata_scsi_pass_thru()
3086 tf->hob_lbam = cdb[9]; in ata_scsi_pass_thru()
3087 tf->hob_lbah = cdb[11]; in ata_scsi_pass_thru()
3088 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
3090 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
3095 tf->feature = cdb[4]; in ata_scsi_pass_thru()
3096 tf->nsect = cdb[6]; in ata_scsi_pass_thru()
3097 tf->lbal = cdb[8]; in ata_scsi_pass_thru()
3098 tf->lbam = cdb[10]; in ata_scsi_pass_thru()
3099 tf->lbah = cdb[12]; in ata_scsi_pass_thru()
3100 tf->device = cdb[13]; in ata_scsi_pass_thru()
3101 tf->command = cdb[14]; in ata_scsi_pass_thru()
3105 * 12-byte CDB - incapable of extended commands. in ata_scsi_pass_thru()
3107 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
3109 tf->feature = cdb[3]; in ata_scsi_pass_thru()
3110 tf->nsect = cdb[4]; in ata_scsi_pass_thru()
3111 tf->lbal = cdb[5]; in ata_scsi_pass_thru()
3112 tf->lbam = cdb[6]; in ata_scsi_pass_thru()
3113 tf->lbah = cdb[7]; in ata_scsi_pass_thru()
3114 tf->device = cdb[8]; in ata_scsi_pass_thru()
3115 tf->command = cdb[9]; in ata_scsi_pass_thru()
3119 * 32-byte CDB - may contain extended command fields. in ata_scsi_pass_thru()
3124 tf->hob_feature = cdb[20]; in ata_scsi_pass_thru()
3125 tf->hob_nsect = cdb[22]; in ata_scsi_pass_thru()
3126 tf->hob_lbal = cdb[16]; in ata_scsi_pass_thru()
3127 tf->hob_lbam = cdb[15]; in ata_scsi_pass_thru()
3128 tf->hob_lbah = cdb[14]; in ata_scsi_pass_thru()
3129 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
3131 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
3133 tf->feature = cdb[21]; in ata_scsi_pass_thru()
3134 tf->nsect = cdb[23]; in ata_scsi_pass_thru()
3135 tf->lbal = cdb[19]; in ata_scsi_pass_thru()
3136 tf->lbam = cdb[18]; in ata_scsi_pass_thru()
3137 tf->lbah = cdb[17]; in ata_scsi_pass_thru()
3138 tf->device = cdb[24]; in ata_scsi_pass_thru()
3139 tf->command = cdb[25]; in ata_scsi_pass_thru()
3140 tf->auxiliary = get_unaligned_be32(&cdb[28]); in ata_scsi_pass_thru()
3145 if (ata_is_ncq(tf->protocol)) in ata_scsi_pass_thru()
3146 tf->nsect = qc->hw_tag << 3; in ata_scsi_pass_thru()
3149 tf->device = dev->devno ? in ata_scsi_pass_thru()
3150 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; in ata_scsi_pass_thru()
3152 switch (tf->command) { in ata_scsi_pass_thru()
3153 /* READ/WRITE LONG use a non-standard sect_size */ in ata_scsi_pass_thru()
3158 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) { in ata_scsi_pass_thru()
3162 qc->sect_size = scsi_bufflen(scmd); in ata_scsi_pass_thru()
3196 qc->sect_size = scmd->device->sector_size; in ata_scsi_pass_thru()
3201 qc->sect_size = ATA_SECT_SIZE; in ata_scsi_pass_thru()
3209 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_scsi_pass_thru()
3210 if (scmd->sc_data_direction == DMA_TO_DEVICE) in ata_scsi_pass_thru()
3211 tf->flags |= ATA_TFLAG_WRITE; in ata_scsi_pass_thru()
3213 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; in ata_scsi_pass_thru()
3224 if (tf->protocol == ATA_PROT_DMA && !ata_dma_enabled(dev)) { in ata_scsi_pass_thru()
3230 if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) { in ata_scsi_pass_thru()
3247 if (multi_count != dev->multi_count) in ata_scsi_pass_thru()
3253 * Filter SET_FEATURES - XFER MODE command -- otherwise, in ata_scsi_pass_thru()
3254 * SET_FEATURES - XFER MODE must be preceded/succeeded in ata_scsi_pass_thru()
3255 * by an update to hardware-specific registers for each in ata_scsi_pass_thru()
3256 * controller (i.e. the reason for ->set_piomode(), in ata_scsi_pass_thru()
3257 * ->set_dmamode(), and ->post_set_mode() hooks). in ata_scsi_pass_thru()
3259 if (tf->command == ATA_CMD_SET_FEATURES && in ata_scsi_pass_thru()
3260 tf->feature == SETFEATURES_XFER) { in ata_scsi_pass_thru()
3280 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) { in ata_scsi_pass_thru()
3293 * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
3299 * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
3316 struct scsi_device *sdp = cmd->device; in ata_format_dsm_trim_descr()
3317 size_t len = sdp->sector_size; in ata_format_dsm_trim_descr()
3337 count -= 0xffff; in ata_format_dsm_trim_descr()
3347 * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
3354 * - When set translate to DSM TRIM
3355 * - When clear translate to SCT Write Same
3359 struct ata_taskfile *tf = &qc->tf; in ata_scsi_write_same_xlat()
3360 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_write_same_xlat()
3361 struct scsi_device *sdp = scmd->device; in ata_scsi_write_same_xlat()
3362 size_t len = sdp->sector_size; in ata_scsi_write_same_xlat()
3363 struct ata_device *dev = qc->dev; in ata_scsi_write_same_xlat()
3364 const u8 *cdb = scmd->cmnd; in ata_scsi_write_same_xlat()
3385 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_write_same_xlat()
3391 if (!unmap || (dev->quirks & ATA_QUIRK_NOTRIM) || in ata_scsi_write_same_xlat()
3392 !ata_id_has_trim(dev->id)) { in ata_scsi_write_same_xlat()
3412 * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count) in ata_scsi_write_same_xlat()
3422 tf->protocol = ATA_PROT_NCQ; in ata_scsi_write_same_xlat()
3423 tf->command = ATA_CMD_FPDMA_SEND; in ata_scsi_write_same_xlat()
3424 tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; in ata_scsi_write_same_xlat()
3425 tf->nsect = qc->hw_tag << 3; in ata_scsi_write_same_xlat()
3426 tf->hob_feature = (size / 512) >> 8; in ata_scsi_write_same_xlat()
3427 tf->feature = size / 512; in ata_scsi_write_same_xlat()
3429 tf->auxiliary = 1; in ata_scsi_write_same_xlat()
3431 tf->protocol = ATA_PROT_DMA; in ata_scsi_write_same_xlat()
3432 tf->hob_feature = 0; in ata_scsi_write_same_xlat()
3433 tf->feature = ATA_DSM_TRIM; in ata_scsi_write_same_xlat()
3434 tf->hob_nsect = (size / 512) >> 8; in ata_scsi_write_same_xlat()
3435 tf->nsect = size / 512; in ata_scsi_write_same_xlat()
3436 tf->command = ATA_CMD_DSM; in ata_scsi_write_same_xlat()
3439 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | in ata_scsi_write_same_xlat()
3460 * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
3473 u8 *cdb = cmd->cmnd; in ata_scsiop_maint_in()
3518 if (dev->flags & ATA_DFLAG_CDL) { in ata_scsiop_maint_in()
3529 if (dev->flags & ATA_DFLAG_CDL) { in ata_scsiop_maint_in()
3540 if (ata_id_zoned_cap(dev->id) || in ata_scsiop_maint_in()
3541 dev->class == ATA_DEV_ZAC) in ata_scsiop_maint_in()
3546 if (dev->flags & ATA_DFLAG_TRUSTED) in ata_scsiop_maint_in()
3561 * ata_scsi_report_zones_complete - convert ATA output
3564 * Convert T-13 little-endian field representation into
3565 * T-10 big-endian field representation.
3570 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_report_zones_complete()
3633 struct ata_taskfile *tf = &qc->tf; in ata_scsi_zbc_in_xlat()
3634 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_zbc_in_xlat()
3635 const u8 *cdb = scmd->cmnd; in ata_scsi_zbc_in_xlat()
3636 u16 sect, fp = (u16)-1; in ata_scsi_zbc_in_xlat()
3641 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_zbc_in_xlat()
3642 ata_dev_warn(qc->dev, "invalid cdb length %d\n", in ata_scsi_zbc_in_xlat()
3643 scmd->cmd_len); in ata_scsi_zbc_in_xlat()
3649 ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n", in ata_scsi_zbc_in_xlat()
3655 ata_dev_warn(qc->dev, "invalid service action %d\n", sa); in ata_scsi_zbc_in_xlat()
3664 ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block); in ata_scsi_zbc_in_xlat()
3670 if (ata_ncq_enabled(qc->dev) && in ata_scsi_zbc_in_xlat()
3671 ata_fpdma_zac_mgmt_in_supported(qc->dev)) { in ata_scsi_zbc_in_xlat()
3672 tf->protocol = ATA_PROT_NCQ; in ata_scsi_zbc_in_xlat()
3673 tf->command = ATA_CMD_FPDMA_RECV; in ata_scsi_zbc_in_xlat()
3674 tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f; in ata_scsi_zbc_in_xlat()
3675 tf->nsect = qc->hw_tag << 3; in ata_scsi_zbc_in_xlat()
3676 tf->feature = sect & 0xff; in ata_scsi_zbc_in_xlat()
3677 tf->hob_feature = (sect >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3678 tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8); in ata_scsi_zbc_in_xlat()
3680 tf->command = ATA_CMD_ZAC_MGMT_IN; in ata_scsi_zbc_in_xlat()
3681 tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES; in ata_scsi_zbc_in_xlat()
3682 tf->protocol = ATA_PROT_DMA; in ata_scsi_zbc_in_xlat()
3683 tf->hob_feature = options; in ata_scsi_zbc_in_xlat()
3684 tf->hob_nsect = (sect >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3685 tf->nsect = sect & 0xff; in ata_scsi_zbc_in_xlat()
3687 tf->device = ATA_LBA; in ata_scsi_zbc_in_xlat()
3688 tf->lbah = (block >> 16) & 0xff; in ata_scsi_zbc_in_xlat()
3689 tf->lbam = (block >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3690 tf->lbal = block & 0xff; in ata_scsi_zbc_in_xlat()
3691 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_zbc_in_xlat()
3692 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_zbc_in_xlat()
3693 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_zbc_in_xlat()
3695 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; in ata_scsi_zbc_in_xlat()
3696 qc->flags |= ATA_QCFLAG_RESULT_TF; in ata_scsi_zbc_in_xlat()
3700 qc->complete_fn = ata_scsi_report_zones_complete; in ata_scsi_zbc_in_xlat()
3705 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_zbc_in_xlat()
3710 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_zbc_in_xlat()
3716 struct ata_taskfile *tf = &qc->tf; in ata_scsi_zbc_out_xlat()
3717 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_zbc_out_xlat()
3718 struct ata_device *dev = qc->dev; in ata_scsi_zbc_out_xlat()
3719 const u8 *cdb = scmd->cmnd; in ata_scsi_zbc_out_xlat()
3723 u16 fp = (u16)-1; in ata_scsi_zbc_out_xlat()
3725 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_zbc_out_xlat()
3751 } else if (block >= dev->n_sectors) { in ata_scsi_zbc_out_xlat()
3759 if (ata_ncq_enabled(qc->dev) && in ata_scsi_zbc_out_xlat()
3760 ata_fpdma_zac_mgmt_out_supported(qc->dev)) { in ata_scsi_zbc_out_xlat()
3761 tf->protocol = ATA_PROT_NCQ_NODATA; in ata_scsi_zbc_out_xlat()
3762 tf->command = ATA_CMD_NCQ_NON_DATA; in ata_scsi_zbc_out_xlat()
3763 tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT; in ata_scsi_zbc_out_xlat()
3764 tf->nsect = qc->hw_tag << 3; in ata_scsi_zbc_out_xlat()
3765 tf->auxiliary = sa | ((u16)all << 8); in ata_scsi_zbc_out_xlat()
3767 tf->protocol = ATA_PROT_NODATA; in ata_scsi_zbc_out_xlat()
3768 tf->command = ATA_CMD_ZAC_MGMT_OUT; in ata_scsi_zbc_out_xlat()
3769 tf->feature = sa; in ata_scsi_zbc_out_xlat()
3770 tf->hob_feature = all; in ata_scsi_zbc_out_xlat()
3772 tf->lbah = (block >> 16) & 0xff; in ata_scsi_zbc_out_xlat()
3773 tf->lbam = (block >> 8) & 0xff; in ata_scsi_zbc_out_xlat()
3774 tf->lbal = block & 0xff; in ata_scsi_zbc_out_xlat()
3775 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_zbc_out_xlat()
3776 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_zbc_out_xlat()
3777 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_zbc_out_xlat()
3778 tf->device = ATA_LBA; in ata_scsi_zbc_out_xlat()
3779 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; in ata_scsi_zbc_out_xlat()
3784 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_zbc_out_xlat()
3788 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_zbc_out_xlat()
3793 * ata_mselect_caching - Simulate MODE SELECT for caching info page
3807 struct ata_taskfile *tf = &qc->tf; in ata_mselect_caching()
3808 struct ata_device *dev = qc->dev; in ata_mselect_caching()
3818 if (len != CACHE_MPAGE_LEN - 2) { in ata_mselect_caching()
3819 *fp = min(len, CACHE_MPAGE_LEN - 2); in ata_mselect_caching()
3820 return -EINVAL; in ata_mselect_caching()
3826 * Check that read-only bits are not modified. in ata_mselect_caching()
3828 ata_msense_caching(dev->id, mpage, false); in ata_mselect_caching()
3829 for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) { in ata_mselect_caching()
3834 return -EINVAL; in ata_mselect_caching()
3838 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_mselect_caching()
3839 tf->protocol = ATA_PROT_NODATA; in ata_mselect_caching()
3840 tf->nsect = 0; in ata_mselect_caching()
3841 tf->command = ATA_CMD_SET_FEATURES; in ata_mselect_caching()
3842 tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF; in ata_mselect_caching()
3847 * Simulate MODE SELECT control mode page, sub-page 0.
3852 struct ata_device *dev = qc->dev; in ata_mselect_control_spg0()
3862 if (len != CONTROL_MPAGE_LEN - 2) { in ata_mselect_control_spg0()
3863 *fp = min(len, CONTROL_MPAGE_LEN - 2); in ata_mselect_control_spg0()
3864 return -EINVAL; in ata_mselect_control_spg0()
3870 * Check that read-only bits are not modified. in ata_mselect_control_spg0()
3873 for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) { in ata_mselect_control_spg0()
3878 return -EINVAL; in ata_mselect_control_spg0()
3882 dev->flags |= ATA_DFLAG_D_SENSE; in ata_mselect_control_spg0()
3884 dev->flags &= ~ATA_DFLAG_D_SENSE; in ata_mselect_control_spg0()
3889 * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
3896 struct ata_device *dev = qc->dev; in ata_mselect_control_ata_feature()
3897 struct ata_taskfile *tf = &qc->tf; in ata_mselect_control_ata_feature()
3904 if (len != ATA_FEATURE_SUB_MPAGE_LEN - 4) { in ata_mselect_control_ata_feature()
3905 *fp = min(len, ATA_FEATURE_SUB_MPAGE_LEN - 4); in ata_mselect_control_ata_feature()
3906 return -EINVAL; in ata_mselect_control_ata_feature()
3914 dev->flags &= ~ATA_DFLAG_CDL_ENABLED; in ata_mselect_control_ata_feature()
3918 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { in ata_mselect_control_ata_feature()
3921 return -EINVAL; in ata_mselect_control_ata_feature()
3924 dev->flags |= ATA_DFLAG_CDL_ENABLED; in ata_mselect_control_ata_feature()
3928 return -EINVAL; in ata_mselect_control_ata_feature()
3931 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_mselect_control_ata_feature()
3932 tf->protocol = ATA_PROT_NODATA; in ata_mselect_control_ata_feature()
3933 tf->command = ATA_CMD_SET_FEATURES; in ata_mselect_control_ata_feature()
3934 tf->feature = SETFEATURES_CDL; in ata_mselect_control_ata_feature()
3935 tf->nsect = cdl_action; in ata_mselect_control_ata_feature()
3941 * ata_mselect_control - Simulate MODE SELECT for control page
3943 * @spg: target sub-page of the control page
3962 return -EINVAL; in ata_mselect_control()
3967 * ata_scsi_mode_select_xlat - Simulate MODE SELECT 6, 10 commands
3970 * Converts a MODE SELECT command to an ATA SET FEATURES taskfile.
3979 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_mode_select_xlat()
3980 const u8 *cdb = scmd->cmnd; in ata_scsi_mode_select_xlat()
3984 u16 fp = (u16)-1; in ata_scsi_mode_select_xlat()
3991 if (scmd->cmd_len < 5) { in ata_scsi_mode_select_xlat()
3999 if (scmd->cmd_len < 9) { in ata_scsi_mode_select_xlat()
4016 if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) in ata_scsi_mode_select_xlat()
4032 len -= hdr_len; in ata_scsi_mode_select_xlat()
4042 len -= bd_len; in ata_scsi_mode_select_xlat()
4056 len -= 4; in ata_scsi_mode_select_xlat()
4064 len -= 2; in ata_scsi_mode_select_xlat()
4068 * Supported subpages: all subpages and ATA feature sub-page f2h of in ata_scsi_mode_select_xlat()
4082 if (qc->dev->flags & ATA_DFLAG_CDL && in ata_scsi_mode_select_xlat()
4127 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_mode_select_xlat()
4131 ata_scsi_set_invalid_parameter(qc->dev, scmd, fp); in ata_scsi_mode_select_xlat()
4136 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_mode_select_xlat()
4140 scmd->result = SAM_STAT_GOOD; in ata_scsi_mode_select_xlat()
4156 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_security_inout_xlat()
4157 const u8 *cdb = scmd->cmnd; in ata_scsi_security_inout_xlat()
4158 struct ata_taskfile *tf = &qc->tf; in ata_scsi_security_inout_xlat()
4163 bool dma = !(qc->dev->flags & ATA_DFLAG_PIO); in ata_scsi_security_inout_xlat()
4169 ata_scsi_set_invalid_field(qc->dev, scmd, 1, 0); in ata_scsi_security_inout_xlat()
4175 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); in ata_scsi_security_inout_xlat()
4180 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); in ata_scsi_security_inout_xlat()
4184 /* convert to the sector-based ATA addressing */ in ata_scsi_security_inout_xlat()
4188 tf->protocol = dma ? ATA_PROT_DMA : ATA_PROT_PIO; in ata_scsi_security_inout_xlat()
4189 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR | ATA_TFLAG_LBA; in ata_scsi_security_inout_xlat()
4191 tf->flags |= ATA_TFLAG_WRITE; in ata_scsi_security_inout_xlat()
4192 tf->command = ata_scsi_trusted_op(len, send, dma); in ata_scsi_security_inout_xlat()
4193 tf->feature = secp; in ata_scsi_security_inout_xlat()
4194 tf->lbam = spsp & 0xff; in ata_scsi_security_inout_xlat()
4195 tf->lbah = spsp >> 8; in ata_scsi_security_inout_xlat()
4198 tf->nsect = len & 0xff; in ata_scsi_security_inout_xlat()
4199 tf->lbal = len >> 8; in ata_scsi_security_inout_xlat()
4202 tf->lbah = (1 << 7); in ata_scsi_security_inout_xlat()
4210 * ata_scsi_var_len_cdb_xlat - SATL variable length CDB to Handler
4217 * Zero on success, non-zero on failure
4222 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_var_len_cdb_xlat()
4223 const u8 *cdb = scmd->cmnd; in ata_scsi_var_len_cdb_xlat()
4227 * if service action represents a ata pass-thru(32) command, in ata_scsi_var_len_cdb_xlat()
4238 * ata_get_xlat_func - check if SCSI to ATA translation is possible
4293 if (!(dev->flags & ATA_DFLAG_TRUSTED)) in ata_get_xlat_func()
4306 struct ata_port *ap = dev->link->ap; in __ata_scsi_queuecmd()
4307 u8 scsi_op = scmd->cmnd[0]; in __ata_scsi_queuecmd()
4312 * However, this check is done without holding the ap->lock (a libata in __ata_scsi_queuecmd()
4314 * therefore we must check if EH is pending, while holding ap->lock. in __ata_scsi_queuecmd()
4316 if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) in __ata_scsi_queuecmd()
4319 if (unlikely(!scmd->cmd_len)) in __ata_scsi_queuecmd()
4322 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { in __ata_scsi_queuecmd()
4323 if (unlikely(scmd->cmd_len > dev->cdb_len)) in __ata_scsi_queuecmd()
4331 if (unlikely(len > scmd->cmd_len || in __ata_scsi_queuecmd()
4332 len > dev->cdb_len || in __ata_scsi_queuecmd()
4333 scmd->cmd_len > ATAPI_CDB_LEN)) in __ata_scsi_queuecmd()
4339 if (unlikely(scmd->cmd_len > 16)) in __ata_scsi_queuecmd()
4353 scmd->result = DID_ERROR << 16; in __ata_scsi_queuecmd()
4359 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
4381 struct scsi_device *scsidev = cmd->device; in ata_scsi_queuecmd()
4387 spin_lock_irqsave(ap->lock, irq_flags); in ata_scsi_queuecmd()
4393 cmd->result = (DID_BAD_TARGET << 16); in ata_scsi_queuecmd()
4397 spin_unlock_irqrestore(ap->lock, irq_flags); in ata_scsi_queuecmd()
4404 * ata_scsi_simulate - simulate SCSI command on ATA device
4408 * Interprets and directly executes a select list of SCSI commands
4417 const u8 *scsicmd = cmd->cmnd; in ata_scsi_simulate()
4444 * turning this into a no-op. in ata_scsi_simulate()
4450 /* no-op's, complete with success */ in ata_scsi_simulate()
4481 for (i = 0; i < host->n_ports; i++) { in ata_scsi_add_hosts()
4482 struct ata_port *ap = host->ports[i]; in ata_scsi_add_hosts()
4485 rc = -ENOMEM; in ata_scsi_add_hosts()
4490 shost->eh_noresume = 1; in ata_scsi_add_hosts()
4491 *(struct ata_port **)&shost->hostdata[0] = ap; in ata_scsi_add_hosts()
4492 ap->scsi_host = shost; in ata_scsi_add_hosts()
4494 shost->transportt = ata_scsi_transport_template; in ata_scsi_add_hosts()
4495 shost->unique_id = ap->print_id; in ata_scsi_add_hosts()
4496 shost->max_id = 16; in ata_scsi_add_hosts()
4497 shost->max_lun = 1; in ata_scsi_add_hosts()
4498 shost->max_channel = 1; in ata_scsi_add_hosts()
4499 shost->max_cmd_len = 32; in ata_scsi_add_hosts()
4501 /* Schedule policy is determined by ->qc_defer() in ata_scsi_add_hosts()
4506 shost->max_host_blocked = 1; in ata_scsi_add_hosts()
4508 rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); in ata_scsi_add_hosts()
4516 while (--i >= 0) { in ata_scsi_add_hosts()
4517 struct Scsi_Host *shost = host->ports[i]->scsi_host; in ata_scsi_add_hosts()
4528 struct scsi_device *sdev = dev->sdev; in ata_scsi_assign_ofnode()
4529 struct device *d = ap->host->dev; in ata_scsi_assign_ofnode()
4530 struct device_node *np = d->of_node; in ata_scsi_assign_ofnode()
4540 if (val == dev->devno) { in ata_scsi_assign_ofnode()
4542 sdev->sdev_gendev.of_node = child; in ata_scsi_assign_ofnode()
4566 if (dev->sdev) in ata_scsi_scan_host()
4570 id = dev->devno; in ata_scsi_scan_host()
4572 channel = link->pmp; in ata_scsi_scan_host()
4574 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, in ata_scsi_scan_host()
4577 dev->sdev = sdev; in ata_scsi_scan_host()
4581 dev->sdev = NULL; in ata_scsi_scan_host()
4592 if (!dev->sdev) in ata_scsi_scan_host()
4614 if (--tries) { in ata_scsi_scan_host()
4623 queue_delayed_work(system_long_wq, &ap->hotplug_task, in ata_scsi_scan_host()
4628 * ata_scsi_offline_dev - offline attached SCSI device
4633 * function is called with host lock which protects dev->sdev
4644 if (dev->sdev) { in ata_scsi_offline_dev()
4645 scsi_device_set_state(dev->sdev, SDEV_OFFLINE); in ata_scsi_offline_dev()
4652 * ata_scsi_remove_dev - remove attached SCSI device
4663 struct ata_port *ap = dev->link->ap; in ata_scsi_remove_dev()
4673 mutex_lock(&ap->scsi_host->scan_mutex); in ata_scsi_remove_dev()
4674 spin_lock_irqsave(ap->lock, flags); in ata_scsi_remove_dev()
4676 /* clearing dev->sdev is protected by host lock */ in ata_scsi_remove_dev()
4677 sdev = dev->sdev; in ata_scsi_remove_dev()
4678 dev->sdev = NULL; in ata_scsi_remove_dev()
4698 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_remove_dev()
4699 mutex_unlock(&ap->scsi_host->scan_mutex); in ata_scsi_remove_dev()
4703 dev_name(&sdev->sdev_gendev)); in ata_scsi_remove_dev()
4712 struct ata_port *ap = link->ap; in ata_scsi_handle_link_detach()
4718 spin_lock_irqsave(ap->lock, flags); in ata_scsi_handle_link_detach()
4719 if (!(dev->flags & ATA_DFLAG_DETACHED)) { in ata_scsi_handle_link_detach()
4720 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_handle_link_detach()
4724 dev->flags &= ~ATA_DFLAG_DETACHED; in ata_scsi_handle_link_detach()
4725 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_handle_link_detach()
4732 * ata_scsi_media_change_notify - send media change event
4743 if (dev->sdev) in ata_scsi_media_change_notify()
4744 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE, in ata_scsi_media_change_notify()
4749 * ata_scsi_hotplug - SCSI part of hotplug
4766 if (ap->pflags & ATA_PFLAG_UNLOADING) in ata_scsi_hotplug()
4769 mutex_lock(&ap->scsi_scan_mutex); in ata_scsi_hotplug()
4775 ata_scsi_handle_link_detach(&ap->link); in ata_scsi_hotplug()
4776 if (ap->pmp_link) in ata_scsi_hotplug()
4778 ata_scsi_handle_link_detach(&ap->pmp_link[i]); in ata_scsi_hotplug()
4783 mutex_unlock(&ap->scsi_scan_mutex); in ata_scsi_hotplug()
4787 * ata_scsi_user_scan - indication for user-initiated bus scan
4810 return -EINVAL; in ata_scsi_user_scan()
4814 return -EINVAL; in ata_scsi_user_scan()
4818 return -EINVAL; in ata_scsi_user_scan()
4822 spin_lock_irqsave(ap->lock, flags); in ata_scsi_user_scan()
4828 struct ata_eh_info *ehi = &link->eh_info; in ata_scsi_user_scan()
4829 ehi->probe_mask |= ATA_ALL_DEVICES; in ata_scsi_user_scan()
4830 ehi->action |= ATA_EH_RESET; in ata_scsi_user_scan()
4836 struct ata_eh_info *ehi = &dev->link->eh_info; in ata_scsi_user_scan()
4837 ehi->probe_mask |= 1 << dev->devno; in ata_scsi_user_scan()
4838 ehi->action |= ATA_EH_RESET; in ata_scsi_user_scan()
4840 rc = -EINVAL; in ata_scsi_user_scan()
4845 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_user_scan()
4848 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_user_scan()
4854 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
4873 mutex_lock(&ap->scsi_scan_mutex); in ata_scsi_dev_rescan()
4874 spin_lock_irqsave(ap->lock, flags); in ata_scsi_dev_rescan()
4878 struct scsi_device *sdev = dev->sdev; in ata_scsi_dev_rescan()
4884 if (ap->pflags & ATA_PFLAG_SUSPENDED) in ata_scsi_dev_rescan()
4892 do_resume = dev->flags & ATA_DFLAG_RESUMING; in ata_scsi_dev_rescan()
4894 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_dev_rescan()
4897 if (ret == -EWOULDBLOCK) in ata_scsi_dev_rescan()
4899 dev->flags &= ~ATA_DFLAG_RESUMING; in ata_scsi_dev_rescan()
4903 spin_lock_irqsave(ap->lock, flags); in ata_scsi_dev_rescan()
4911 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_dev_rescan()
4913 mutex_unlock(&ap->scsi_scan_mutex); in ata_scsi_dev_rescan()
4917 schedule_delayed_work(&ap->scsi_rescan_task, in ata_scsi_dev_rescan()