Lines Matching +full:idle +full:- +full:state +full:- +full:spc

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-scsi.c - helper library for ATA
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
9 * as Documentation/driver-api/libata.rst
12 * - http://www.t10.org/
13 * - http://www.t13.org/
38 #include "libata-transport.h"
66 RW_RECOVERY_MPAGE_LEN - 2,
76 CACHE_MPAGE_LEN - 2,
85 CONTROL_MPAGE_LEN - 2,
87 0, /* [QAM+QERR may be 1, see 05-359r1] */
89 0, 30 /* extended self test time, see 05-359r1 */
103 ap = ata_shost_to_port(sdev->host); in ata_scsi_park_show()
105 spin_lock_irq(ap->lock); in ata_scsi_park_show()
108 rc = -ENODEV; in ata_scsi_park_show()
111 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { in ata_scsi_park_show()
112 rc = -EOPNOTSUPP; in ata_scsi_park_show()
116 link = dev->link; in ata_scsi_park_show()
118 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS && in ata_scsi_park_show()
119 link->eh_context.unloaded_mask & (1 << dev->devno) && in ata_scsi_park_show()
120 time_after(dev->unpark_deadline, now)) in ata_scsi_park_show()
121 msecs = jiffies_to_msecs(dev->unpark_deadline - now); in ata_scsi_park_show()
126 spin_unlock_irq(ap->lock); in ata_scsi_park_show()
145 if (input < -2) in ata_scsi_park_store()
146 return -EINVAL; in ata_scsi_park_store()
148 rc = -EOVERFLOW; in ata_scsi_park_store()
152 ap = ata_shost_to_port(sdev->host); in ata_scsi_park_store()
154 spin_lock_irqsave(ap->lock, flags); in ata_scsi_park_store()
157 rc = -ENODEV; in ata_scsi_park_store()
160 if (dev->class != ATA_DEV_ATA && in ata_scsi_park_store()
161 dev->class != ATA_DEV_ZAC) { in ata_scsi_park_store()
162 rc = -EOPNOTSUPP; in ata_scsi_park_store()
167 if (dev->flags & ATA_DFLAG_NO_UNLOAD) { in ata_scsi_park_store()
168 rc = -EOPNOTSUPP; in ata_scsi_park_store()
172 dev->unpark_deadline = ata_deadline(jiffies, input); in ata_scsi_park_store()
173 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK; in ata_scsi_park_store()
175 complete(&ap->park_req_pending); in ata_scsi_park_store()
178 case -1: in ata_scsi_park_store()
179 dev->flags &= ~ATA_DFLAG_NO_UNLOAD; in ata_scsi_park_store()
181 case -2: in ata_scsi_park_store()
182 dev->flags |= ATA_DFLAG_NO_UNLOAD; in ata_scsi_park_store()
187 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_park_store()
214 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); in ata_scsi_set_sense()
229 scsi_set_sense_information(cmd->sense_buffer, in ata_scsi_set_sense_information()
234 * ata_scsi_set_passthru_sense_fields - Set ATA fields in sense buffer
235 * @qc: ATA PASS-THROUGH command.
245 struct ata_device *dev = qc->dev; in ata_scsi_set_passthru_sense_fields()
246 struct scsi_cmnd *cmd = qc->scsicmd; in ata_scsi_set_passthru_sense_fields()
247 struct ata_taskfile *tf = &qc->result_tf; in ata_scsi_set_passthru_sense_fields()
248 unsigned char *sb = cmd->sense_buffer; in ata_scsi_set_passthru_sense_fields()
250 if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { in ata_scsi_set_passthru_sense_fields()
275 desc[3] = tf->error; in ata_scsi_set_passthru_sense_fields()
276 desc[5] = tf->nsect; in ata_scsi_set_passthru_sense_fields()
277 desc[7] = tf->lbal; in ata_scsi_set_passthru_sense_fields()
278 desc[9] = tf->lbam; in ata_scsi_set_passthru_sense_fields()
279 desc[11] = tf->lbah; in ata_scsi_set_passthru_sense_fields()
280 desc[12] = tf->device; in ata_scsi_set_passthru_sense_fields()
281 desc[13] = tf->status; in ata_scsi_set_passthru_sense_fields()
287 if (tf->flags & ATA_TFLAG_LBA48) { in ata_scsi_set_passthru_sense_fields()
289 desc[4] = tf->hob_nsect; in ata_scsi_set_passthru_sense_fields()
290 desc[6] = tf->hob_lbal; in ata_scsi_set_passthru_sense_fields()
291 desc[8] = tf->hob_lbam; in ata_scsi_set_passthru_sense_fields()
292 desc[10] = tf->hob_lbah; in ata_scsi_set_passthru_sense_fields()
297 sb[3] = tf->error; in ata_scsi_set_passthru_sense_fields()
298 sb[4] = tf->status; in ata_scsi_set_passthru_sense_fields()
299 sb[5] = tf->device; in ata_scsi_set_passthru_sense_fields()
300 sb[6] = tf->nsect; in ata_scsi_set_passthru_sense_fields()
301 if (tf->flags & ATA_TFLAG_LBA48) { in ata_scsi_set_passthru_sense_fields()
303 if (tf->hob_nsect) in ata_scsi_set_passthru_sense_fields()
305 if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah) in ata_scsi_set_passthru_sense_fields()
308 sb[9] = tf->lbal; in ata_scsi_set_passthru_sense_fields()
309 sb[10] = tf->lbam; in ata_scsi_set_passthru_sense_fields()
310 sb[11] = tf->lbah; in ata_scsi_set_passthru_sense_fields()
319 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in ata_scsi_set_invalid_field()
328 scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in ata_scsi_set_invalid_parameter()
348 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
378 * ata_scsi_unlock_native_capacity - unlock native capacity
389 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_unlock_native_capacity()
393 spin_lock_irqsave(ap->lock, flags); in ata_scsi_unlock_native_capacity()
396 if (dev && dev->n_sectors < dev->n_native_sectors) { in ata_scsi_unlock_native_capacity()
397 dev->flags |= ATA_DFLAG_UNLOCK_HPA; in ata_scsi_unlock_native_capacity()
398 dev->link->eh_info.action |= ATA_EH_RESET; in ata_scsi_unlock_native_capacity()
402 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_unlock_native_capacity()
408 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
427 return -ENOMSG; in ata_get_identity()
429 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16))) in ata_get_identity()
430 return -EFAULT; in ata_get_identity()
432 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN); in ata_get_identity()
434 return -EFAULT; in ata_get_identity()
436 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN); in ata_get_identity()
438 return -EFAULT; in ata_get_identity()
440 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN); in ata_get_identity()
442 return -EFAULT; in ata_get_identity()
448 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
474 return -EINVAL; in ata_cmd_ioctl()
477 return -EFAULT; in ata_cmd_ioctl()
486 rc = -ENOMEM; in ata_cmd_ioctl()
490 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ in ata_cmd_ioctl()
494 scsi_cmd[1] = (3 << 1); /* Non-data */ in ata_cmd_ioctl()
501 if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */ in ata_cmd_ioctl()
522 /* If we set cc then ATA pass-through will cause a in ata_cmd_ioctl()
537 rc = -EFAULT; in ata_cmd_ioctl()
543 rc = -EIO; in ata_cmd_ioctl()
549 rc = -EFAULT; in ata_cmd_ioctl()
556 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
581 return -EINVAL; in ata_task_ioctl()
584 return -EFAULT; in ata_task_ioctl()
589 scsi_cmd[1] = (3 << 1); /* Non-data */ in ata_task_ioctl()
610 /* If we set cc then ATA pass-through will cause a in ata_task_ioctl()
629 rc = -EFAULT; in ata_task_ioctl()
634 rc = -EIO; in ata_task_ioctl()
644 if (ap->flags & ATA_FLAG_PIO_DMA) in ata_ioc32()
646 if (ap->pflags & ATA_PFLAG_PIO32) in ata_ioc32()
659 int rc = -EINVAL; in ata_sas_scsi_ioctl()
664 spin_lock_irqsave(ap->lock, flags); in ata_sas_scsi_ioctl()
666 spin_unlock_irqrestore(ap->lock, flags); in ata_sas_scsi_ioctl()
676 spin_lock_irqsave(ap->lock, flags); in ata_sas_scsi_ioctl()
677 if (ap->pflags & ATA_PFLAG_PIO32CHANGE) { in ata_sas_scsi_ioctl()
679 ap->pflags |= ATA_PFLAG_PIO32; in ata_sas_scsi_ioctl()
681 ap->pflags &= ~ATA_PFLAG_PIO32; in ata_sas_scsi_ioctl()
684 rc = -EINVAL; in ata_sas_scsi_ioctl()
686 spin_unlock_irqrestore(ap->lock, flags); in ata_sas_scsi_ioctl()
694 return -EACCES; in ata_sas_scsi_ioctl()
699 return -EACCES; in ata_sas_scsi_ioctl()
703 rc = -ENOTTY; in ata_sas_scsi_ioctl()
714 return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host), in ata_scsi_ioctl()
720 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
728 * If a command was available, fill in the SCSI-specific
741 struct ata_port *ap = dev->link->ap; in ata_scsi_qc_new()
748 if (ap->flags & ATA_FLAG_SAS_HOST) { in ata_scsi_qc_new()
751 * unique per-device budget token as a tag. in ata_scsi_qc_new()
753 if (WARN_ON_ONCE(cmd->budget_token >= ATA_MAX_QUEUE)) in ata_scsi_qc_new()
755 tag = cmd->budget_token; in ata_scsi_qc_new()
757 tag = scsi_cmd_to_rq(cmd)->tag; in ata_scsi_qc_new()
761 qc->tag = qc->hw_tag = tag; in ata_scsi_qc_new()
762 qc->ap = ap; in ata_scsi_qc_new()
763 qc->dev = dev; in ata_scsi_qc_new()
767 qc->scsicmd = cmd; in ata_scsi_qc_new()
768 qc->scsidone = scsi_done; in ata_scsi_qc_new()
770 qc->sg = scsi_sglist(cmd); in ata_scsi_qc_new()
771 qc->n_elem = scsi_sg_count(cmd); in ata_scsi_qc_new()
773 if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET) in ata_scsi_qc_new()
774 qc->flags |= ATA_QCFLAG_QUIET; in ata_scsi_qc_new()
787 struct scsi_cmnd *scmd = qc->scsicmd; in ata_qc_set_pc_nbytes()
789 qc->extrabytes = scmd->extra_len; in ata_qc_set_pc_nbytes()
790 qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes; in ata_qc_set_pc_nbytes()
794 * ata_to_sense_error - convert ATA error to SCSI error
836 /* TRK0 - Track 0 not found */ in ata_to_sense_error()
843 /* SRV/IDNF - ID not found */ in ata_to_sense_error()
846 /* MC - Media Changed */ in ata_to_sense_error()
849 /* ECC - Uncorrectable ECC error */ in ata_to_sense_error()
852 /* BBD - block marked bad */ in ata_to_sense_error()
917 * ata_gen_passthru_sense - Generate check condition sense block.
925 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
933 struct ata_device *dev = qc->dev; in ata_gen_passthru_sense()
934 struct scsi_cmnd *cmd = qc->scsicmd; in ata_gen_passthru_sense()
935 struct ata_taskfile *tf = &qc->result_tf; in ata_gen_passthru_sense()
938 if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { in ata_gen_passthru_sense()
948 if (qc->err_mask || in ata_gen_passthru_sense()
949 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { in ata_gen_passthru_sense()
950 ata_to_sense_error(tf->status, tf->error, in ata_gen_passthru_sense()
952 ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); in ata_gen_passthru_sense()
955 * ATA PASS-THROUGH INFORMATION AVAILABLE in ata_gen_passthru_sense()
971 * ata_gen_ata_sense - generate a SCSI fixed sense block
982 struct ata_device *dev = qc->dev; in ata_gen_ata_sense()
983 struct scsi_cmnd *cmd = qc->scsicmd; in ata_gen_ata_sense()
984 struct ata_taskfile *tf = &qc->result_tf; in ata_gen_ata_sense()
985 unsigned char *sb = cmd->sense_buffer; in ata_gen_ata_sense()
996 if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) { in ata_gen_ata_sense()
1005 if (qc->err_mask || in ata_gen_ata_sense()
1006 tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { in ata_gen_ata_sense()
1007 ata_to_sense_error(tf->status, tf->error, in ata_gen_ata_sense()
1013 tf->status, qc->err_mask); in ata_gen_ata_sense()
1018 block = ata_tf_read_block(&qc->result_tf, dev); in ata_gen_ata_sense()
1027 sdev->use_10_for_rw = 1; in ata_scsi_sdev_config()
1028 sdev->use_10_for_ms = 1; in ata_scsi_sdev_config()
1029 sdev->no_write_same = 1; in ata_scsi_sdev_config()
1031 /* Schedule policy is determined by ->qc_defer() callback and in ata_scsi_sdev_config()
1036 sdev->max_device_blocked = 1; in ata_scsi_sdev_config()
1040 * ata_scsi_dma_need_drain - Check whether data transfer may overflow
1058 return atapi_cmd_type(scmd->cmnd[0]) == ATAPI_MISC; in ata_scsi_dma_need_drain()
1067 if (!ata_id_has_unload(dev->id)) in ata_scsi_dev_config()
1068 dev->flags |= ATA_DFLAG_NO_UNLOAD; in ata_scsi_dev_config()
1071 dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors); in ata_scsi_dev_config()
1072 lim->max_hw_sectors = dev->max_sectors; in ata_scsi_dev_config()
1074 if (dev->class == ATA_DEV_ATAPI) { in ata_scsi_dev_config()
1075 sdev->sector_size = ATA_SECT_SIZE; in ata_scsi_dev_config()
1078 lim->dma_pad_mask = ATA_DMA_PAD_SZ - 1; in ata_scsi_dev_config()
1081 lim->max_segments--; in ata_scsi_dev_config()
1083 sdev->dma_drain_len = ATAPI_MAX_DRAIN; in ata_scsi_dev_config()
1084 sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO); in ata_scsi_dev_config()
1085 if (!sdev->dma_drain_buf) { in ata_scsi_dev_config()
1087 return -ENOMEM; in ata_scsi_dev_config()
1090 sdev->sector_size = ata_id_logical_sector_size(dev->id); in ata_scsi_dev_config()
1095 * devices power state is handled directly by libata EH. in ata_scsi_dev_config()
1098 * to be resumed to correctly reflect the power state of the in ata_scsi_dev_config()
1101 sdev->manage_runtime_start_stop = 1; in ata_scsi_dev_config()
1102 sdev->manage_shutdown = 1; in ata_scsi_dev_config()
1103 sdev->force_runtime_start_on_system_start = 1; in ata_scsi_dev_config()
1113 if (sdev->sector_size > PAGE_SIZE) in ata_scsi_dev_config()
1116 sdev->sector_size); in ata_scsi_dev_config()
1118 lim->dma_alignment = sdev->sector_size - 1; in ata_scsi_dev_config()
1120 if (dev->flags & ATA_DFLAG_AN) in ata_scsi_dev_config()
1121 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); in ata_scsi_dev_config()
1124 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); in ata_scsi_dev_config()
1128 if (dev->flags & ATA_DFLAG_TRUSTED) in ata_scsi_dev_config()
1129 sdev->security_supported = 1; in ata_scsi_dev_config()
1131 dev->sdev = sdev; in ata_scsi_dev_config()
1136 * ata_scsi_slave_alloc - Early setup of SCSI device
1148 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_slave_alloc()
1158 link = device_link_add(&sdev->sdev_gendev, &ap->tdev, in ata_scsi_slave_alloc()
1163 dev_name(&sdev->sdev_gendev)); in ata_scsi_slave_alloc()
1164 return -ENODEV; in ata_scsi_slave_alloc()
1172 * ata_scsi_device_configure - Set SCSI device attributes
1178 * SCSI mid-layer behaviors.
1187 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_device_configure()
1198 * ata_scsi_slave_destroy - SCSI device is about to be destroyed
1203 * dev->sdev, this function doesn't have to do anything.
1204 * Otherwise, SCSI layer initiated warm-unplug is in progress.
1205 * Clear dev->sdev, schedule the device for ATA detach and invoke
1213 struct ata_port *ap = ata_shost_to_port(sdev->host); in ata_scsi_slave_destroy()
1217 device_link_remove(&sdev->sdev_gendev, &ap->tdev); in ata_scsi_slave_destroy()
1219 spin_lock_irqsave(ap->lock, flags); in ata_scsi_slave_destroy()
1221 if (dev && dev->sdev) { in ata_scsi_slave_destroy()
1222 /* SCSI device already in CANCEL state, no need to offline it */ in ata_scsi_slave_destroy()
1223 dev->sdev = NULL; in ata_scsi_slave_destroy()
1224 dev->flags |= ATA_DFLAG_DETACH; in ata_scsi_slave_destroy()
1227 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_slave_destroy()
1229 kfree(sdev->dma_drain_buf); in ata_scsi_slave_destroy()
1234 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
1246 * Zero on success, non-zero on error.
1250 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_start_stop_xlat()
1251 const u8 *cdb = scmd->cmnd; in ata_scsi_start_stop_xlat()
1255 if (scmd->cmd_len < 5) { in ata_scsi_start_stop_xlat()
1274 /* Ignore IMMED bit (cdb[1] & 0x1), violates sat-r05 */ in ata_scsi_start_stop_xlat()
1275 if (!ata_dev_power_init_tf(qc->dev, &qc->tf, cdb[4] & 0x1)) { in ata_scsi_start_stop_xlat()
1276 ata_scsi_set_sense(qc->dev, scmd, ABORTED_COMMAND, 0, 0); in ata_scsi_start_stop_xlat()
1281 * Standby and Idle condition timers could be implemented but that in ata_scsi_start_stop_xlat()
1290 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_start_stop_xlat()
1295 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
1305 * Zero on success, non-zero on error.
1309 struct ata_taskfile *tf = &qc->tf; in ata_scsi_flush_xlat()
1311 tf->flags |= ATA_TFLAG_DEVICE; in ata_scsi_flush_xlat()
1312 tf->protocol = ATA_PROT_NODATA; in ata_scsi_flush_xlat()
1314 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT) in ata_scsi_flush_xlat()
1315 tf->command = ATA_CMD_FLUSH_EXT; in ata_scsi_flush_xlat()
1317 tf->command = ATA_CMD_FLUSH; in ata_scsi_flush_xlat()
1320 qc->flags |= ATA_QCFLAG_IO; in ata_scsi_flush_xlat()
1326 * scsi_6_lba_len - Get LBA and transfer length
1329 * Calculate LBA and transfer length for 6-byte commands.
1351 * scsi_10_lba_len - Get LBA and transfer length
1354 * Calculate LBA and transfer length for 10-byte commands.
1367 * scsi_16_lba_len - Get LBA and transfer length
1370 * Calculate LBA and transfer length for 16-byte commands.
1383 * scsi_dld - Get duration limit descriptor index
1395 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
1404 * Zero on success, non-zero on error.
1408 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_verify_xlat()
1409 struct ata_taskfile *tf = &qc->tf; in ata_scsi_verify_xlat()
1410 struct ata_device *dev = qc->dev; in ata_scsi_verify_xlat()
1411 u64 dev_sectors = qc->dev->n_sectors; in ata_scsi_verify_xlat()
1412 const u8 *cdb = scmd->cmnd; in ata_scsi_verify_xlat()
1417 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_scsi_verify_xlat()
1418 tf->protocol = ATA_PROT_NODATA; in ata_scsi_verify_xlat()
1422 if (scmd->cmd_len < 10) { in ata_scsi_verify_xlat()
1429 if (scmd->cmd_len < 16) { in ata_scsi_verify_xlat()
1447 if (dev->flags & ATA_DFLAG_LBA) { in ata_scsi_verify_xlat()
1448 tf->flags |= ATA_TFLAG_LBA; in ata_scsi_verify_xlat()
1452 tf->command = ATA_CMD_VERIFY; in ata_scsi_verify_xlat()
1453 tf->device |= (block >> 24) & 0xf; in ata_scsi_verify_xlat()
1455 if (!(dev->flags & ATA_DFLAG_LBA48)) in ata_scsi_verify_xlat()
1459 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_verify_xlat()
1460 tf->command = ATA_CMD_VERIFY_EXT; in ata_scsi_verify_xlat()
1462 tf->hob_nsect = (n_block >> 8) & 0xff; in ata_scsi_verify_xlat()
1464 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_verify_xlat()
1465 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_verify_xlat()
1466 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_verify_xlat()
1471 tf->nsect = n_block & 0xff; in ata_scsi_verify_xlat()
1473 tf->lbah = (block >> 16) & 0xff; in ata_scsi_verify_xlat()
1474 tf->lbam = (block >> 8) & 0xff; in ata_scsi_verify_xlat()
1475 tf->lbal = block & 0xff; in ata_scsi_verify_xlat()
1477 tf->device |= ATA_LBA; in ata_scsi_verify_xlat()
1486 track = (u32)block / dev->sectors; in ata_scsi_verify_xlat()
1487 cyl = track / dev->heads; in ata_scsi_verify_xlat()
1488 head = track % dev->heads; in ata_scsi_verify_xlat()
1489 sect = (u32)block % dev->sectors + 1; in ata_scsi_verify_xlat()
1492 Cylinder: 0-65535 in ata_scsi_verify_xlat()
1493 Head: 0-15 in ata_scsi_verify_xlat()
1494 Sector: 1-255*/ in ata_scsi_verify_xlat()
1498 tf->command = ATA_CMD_VERIFY; in ata_scsi_verify_xlat()
1499 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ in ata_scsi_verify_xlat()
1500 tf->lbal = sect; in ata_scsi_verify_xlat()
1501 tf->lbam = cyl; in ata_scsi_verify_xlat()
1502 tf->lbah = cyl >> 8; in ata_scsi_verify_xlat()
1503 tf->device |= head; in ata_scsi_verify_xlat()
1509 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_verify_xlat()
1513 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); in ata_scsi_verify_xlat()
1518 scmd->result = SAM_STAT_GOOD; in ata_scsi_verify_xlat()
1530 req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size; in ata_check_nblocks()
1538 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1553 * Zero on success, non-zero on error.
1557 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_rw_xlat()
1558 const u8 *cdb = scmd->cmnd; in ata_scsi_rw_xlat()
1580 if (unlikely(scmd->cmd_len < 10)) { in ata_scsi_rw_xlat()
1592 if (unlikely(scmd->cmd_len < 6)) { in ata_scsi_rw_xlat()
1598 /* for 6-byte r/w commands, transfer length 0 in ata_scsi_rw_xlat()
1608 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_rw_xlat()
1626 /* For 10-byte and 16-byte SCSI R/W commands, transfer in ata_scsi_rw_xlat()
1635 qc->flags |= ATA_QCFLAG_IO; in ata_scsi_rw_xlat()
1636 qc->nbytes = n_block * scmd->device->sector_size; in ata_scsi_rw_xlat()
1642 if (rc == -ERANGE) in ata_scsi_rw_xlat()
1644 /* treat all other errors as -EINVAL, fall through */ in ata_scsi_rw_xlat()
1646 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_rw_xlat()
1650 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0); in ata_scsi_rw_xlat()
1655 scmd->result = SAM_STAT_GOOD; in ata_scsi_rw_xlat()
1661 struct scsi_cmnd *cmd = qc->scsicmd; in ata_qc_done()
1662 void (*done)(struct scsi_cmnd *) = qc->scsidone; in ata_qc_done()
1670 struct scsi_cmnd *cmd = qc->scsicmd; in ata_scsi_qc_complete()
1671 u8 *cdb = cmd->cmnd; in ata_scsi_qc_complete()
1672 bool have_sense = qc->flags & ATA_QCFLAG_SENSE_VALID; in ata_scsi_qc_complete()
1675 bool is_error = qc->err_mask != 0; in ata_scsi_qc_complete()
1684 * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE in ata_scsi_qc_complete()
1691 set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); in ata_scsi_qc_complete()
1700 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1705 * Our ->queuecommand() function has decided that the SCSI
1714 * then cmd->result (and possibly cmd->sense_buffer) are assumed
1728 struct ata_port *ap = dev->link->ap; in ata_scsi_translate()
1736 /* data is present; dma-map it */ in ata_scsi_translate()
1737 if (cmd->sc_data_direction == DMA_FROM_DEVICE || in ata_scsi_translate()
1738 cmd->sc_data_direction == DMA_TO_DEVICE) { in ata_scsi_translate()
1746 qc->dma_dir = cmd->sc_data_direction; in ata_scsi_translate()
1749 qc->complete_fn = ata_scsi_qc_complete; in ata_scsi_translate()
1754 if (ap->ops->qc_defer) { in ata_scsi_translate()
1755 if ((rc = ap->ops->qc_defer(qc))) in ata_scsi_translate()
1771 cmd->result = (DID_ERROR << 16); in ata_scsi_translate()
1791 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
1799 * completed successfully (0), or not (in which case cmd->result
1809 struct scsi_cmnd *cmd = args->cmd; in ata_scsi_rbuf_fill()
1823 cmd->result = SAM_STAT_GOOD; in ata_scsi_rbuf_fill()
1827 * ata_scsiop_inq_std - Simulate INQUIRY command
1832 * with non-VPD INQUIRY command output.
1841 0x60, /* SAM-3 (no version claimed) */ in ata_scsiop_inq_std()
1844 0x20, /* SBC-2 (no version claimed) */ in ata_scsiop_inq_std()
1847 0x00 /* SPC-3 (no version claimed) */ in ata_scsiop_inq_std()
1851 0xA0, /* SAM-5 (no version claimed) */ in ata_scsiop_inq_std()
1854 0x00, /* SBC-4 (no version claimed) */ in ata_scsiop_inq_std()
1857 0xC0, /* SPC-5 (no version claimed) */ in ata_scsiop_inq_std()
1866 0x5, /* claim SPC-3 version compatibility */ in ata_scsiop_inq_std()
1868 95 - 4, in ata_scsiop_inq_std()
1876 * device bit (obsolete since ATA-8 ACS) is set. in ata_scsiop_inq_std()
1878 if (ata_id_removable(args->id)) in ata_scsiop_inq_std()
1881 if (args->dev->class == ATA_DEV_ZAC) { in ata_scsiop_inq_std()
1883 hdr[2] = 0x7; /* claim SPC-5 version compatibility */ in ata_scsiop_inq_std()
1886 if (args->dev->flags & ATA_DFLAG_CDL) in ata_scsiop_inq_std()
1887 hdr[2] = 0xd; /* claim SPC-6 version compatibility */ in ata_scsiop_inq_std()
1891 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); in ata_scsiop_inq_std()
1894 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4); in ata_scsiop_inq_std()
1896 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); in ata_scsiop_inq_std()
1901 if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC) in ata_scsiop_inq_std()
1910 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
1936 !(args->dev->flags & ATA_DFLAG_ZAC)) in ata_scsiop_inq_00()
1946 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
1965 ata_id_string(args->id, (unsigned char *) &rbuf[4], in ata_scsiop_inq_80()
1971 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
1976 * - vendor specific ASCII containing the ATA serial number
1977 * - SAT defined "t10 vendor id based" containing ASCII vendor
1995 ata_id_string(args->id, (unsigned char *) rbuf + num, in ata_scsiop_inq_83()
2007 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD, in ata_scsiop_inq_83()
2010 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO, in ata_scsiop_inq_83()
2014 if (ata_id_has_wwn(args->id)) { in ata_scsiop_inq_83()
2021 ata_id_string(args->id, (unsigned char *) rbuf + num, in ata_scsiop_inq_83()
2025 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ in ata_scsiop_inq_83()
2030 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
2034 * Yields SAT-specified ATA VPD page.
2060 memcpy(&rbuf[60], &args->id[0], 512); in ata_scsiop_inq_89()
2066 struct ata_device *dev = args->dev; in ata_scsiop_inq_b0()
2079 min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id); in ata_scsiop_inq_b0()
2088 * that we support some form of unmap - in thise case via WRITE SAME in ata_scsiop_inq_b0()
2091 if (ata_id_has_trim(args->id)) { in ata_scsiop_inq_b0()
2094 if (dev->quirks & ATA_QUIRK_MAX_TRIM_128M) in ata_scsiop_inq_b0()
2095 max_blocks = 128 << (20 - SECTOR_SHIFT); in ata_scsiop_inq_b0()
2106 int form_factor = ata_id_form_factor(args->id); in ata_scsiop_inq_b1()
2107 int media_rotation_rate = ata_id_rotation_rate(args->id); in ata_scsiop_inq_b1()
2108 u8 zoned = ata_id_zoned_cap(args->id); in ata_scsiop_inq_b1()
2123 /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */ in ata_scsiop_inq_b2()
2134 * zbc-r05 SCSI Zoned Block device characteristics VPD page in ata_scsiop_inq_b6()
2140 * URSWRZ bit is only meaningful for host-managed ZAC drives in ata_scsiop_inq_b6()
2142 if (args->dev->zac_zoned_cap & 1) in ata_scsiop_inq_b6()
2144 put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]); in ata_scsiop_inq_b6()
2145 put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]); in ata_scsiop_inq_b6()
2146 put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]); in ata_scsiop_inq_b6()
2153 struct ata_cpr_log *cpr_log = args->dev->cpr_log; in ata_scsiop_inq_b9()
2157 /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */ in ata_scsiop_inq_b9()
2159 put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]); in ata_scsiop_inq_b9()
2161 for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) { in ata_scsiop_inq_b9()
2162 desc[0] = cpr_log->cpr[i].num; in ata_scsiop_inq_b9()
2163 desc[1] = cpr_log->cpr[i].num_storage_elements; in ata_scsiop_inq_b9()
2164 put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]); in ata_scsiop_inq_b9()
2165 put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]); in ata_scsiop_inq_b9()
2172 * modecpy - Prepare response for MODE SENSE
2188 memset(dest + 2, 0, n - 2); in modecpy()
2195 * ata_msense_caching - Simulate MODE SENSE caching info page
2220 * Simulate MODE SENSE control mode page, sub-page 0.
2231 bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); in ata_msense_control_spg0()
2242 * using the t2cdlunits 0xa (10ms). Since the SCSI duration limits are 2-bytes
2253 * Simulate MODE SENSE control mode page, sub-pages 07h and 08h
2263 if (!(dev->flags & ATA_DFLAG_CDL) || !dev->cdl) in ata_msense_control_spgt2()
2266 cdl = dev->cdl->desc_log_buf; in ata_msense_control_spgt2()
2275 put_unaligned_be16(CDL_T2_SUB_MPAGE_LEN - 4, &buf[2]); in ata_msense_control_spgt2()
2312 * Simulate MODE SENSE control mode page, sub-page f2h
2326 put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]); in ata_msense_control_ata_feature()
2328 if (dev->flags & ATA_DFLAG_CDL) in ata_msense_control_ata_feature()
2337 * ata_msense_control - Simulate MODE SENSE control mode page
2340 * @spg: sub-page code
2373 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
2390 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
2403 struct ata_device *dev = args->dev; in ata_scsiop_mode_sense()
2404 u8 *scsicmd = args->cmd->cmnd, *p = rbuf; in ata_scsiop_mode_sense()
2444 * Supported subpages: all subpages and sub-pages 07h, 08h and f2h of in ata_scsiop_mode_sense()
2454 if (dev->flags & ATA_DFLAG_CDL && pg == CONTROL_MPAGE) in ata_scsiop_mode_sense()
2469 p += ata_msense_caching(args->id, p, page_control == 1); in ata_scsiop_mode_sense()
2473 p += ata_msense_control(args->dev, p, spg, page_control == 1); in ata_scsiop_mode_sense()
2478 p += ata_msense_caching(args->id, p, page_control == 1); in ata_scsiop_mode_sense()
2479 p += ata_msense_control(args->dev, p, spg, page_control == 1); in ata_scsiop_mode_sense()
2487 if (dev->flags & ATA_DFLAG_FUA) in ata_scsiop_mode_sense()
2491 rbuf[0] = p - rbuf - 1; in ata_scsiop_mode_sense()
2498 put_unaligned_be16(p - rbuf - 2, &rbuf[0]); in ata_scsiop_mode_sense()
2508 ata_scsi_set_invalid_field(dev, args->cmd, fp, bp); in ata_scsiop_mode_sense()
2512 ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); in ata_scsiop_mode_sense()
2518 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2529 struct ata_device *dev = args->dev; in ata_scsiop_read_cap()
2530 u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ in ata_scsiop_read_cap()
2535 sector_size = ata_id_logical_sector_size(dev->id); in ata_scsiop_read_cap()
2536 log2_per_phys = ata_id_log2_per_physical_sector(dev->id); in ata_scsiop_read_cap()
2537 lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys); in ata_scsiop_read_cap()
2539 if (args->cmd->cmnd[0] == READ_CAPACITY) { in ata_scsiop_read_cap()
2543 /* sector count, 32-bit */ in ata_scsiop_read_cap()
2555 /* sector count, 64-bit */ in ata_scsiop_read_cap()
2576 if (ata_id_has_trim(args->id) && in ata_scsiop_read_cap()
2577 !(dev->quirks & ATA_QUIRK_NOTRIM)) { in ata_scsiop_read_cap()
2580 if (ata_id_has_zero_after_trim(args->id) && in ata_scsiop_read_cap()
2581 dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) { in ata_scsiop_read_cap()
2586 if (ata_id_zoned_cap(args->id) || in ata_scsiop_read_cap()
2587 args->dev->class == ATA_DEV_ZAC) in ata_scsiop_read_cap()
2594 * ata_scsiop_report_luns - Simulate REPORT LUNS command
2614 * 1) Fake MMC-5 version, to indicate to the Linux scsi midlayer this is a
2632 struct scsi_cmnd *cmd = qc->scsicmd; in atapi_qc_complete()
2633 unsigned int err_mask = qc->err_mask; in atapi_qc_complete()
2636 if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) { in atapi_qc_complete()
2638 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) in atapi_qc_complete()
2641 /* SCSI EH automatically locks door if sdev->locked is in atapi_qc_complete()
2644 * creates a loop - SCSI EH issues door lock which in atapi_qc_complete()
2648 * If door lock fails, always clear sdev->locked to in atapi_qc_complete()
2652 * sure qc->dev->sdev isn't NULL before dereferencing. in atapi_qc_complete()
2654 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) in atapi_qc_complete()
2655 qc->dev->sdev->locked = 0; in atapi_qc_complete()
2657 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; in atapi_qc_complete()
2663 if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0) in atapi_qc_complete()
2665 cmd->result = SAM_STAT_GOOD; in atapi_qc_complete()
2670 * atapi_xlat - Initialize PACKET taskfile
2677 * Zero on success, non-zero on failure.
2681 struct scsi_cmnd *scmd = qc->scsicmd; in atapi_xlat()
2682 struct ata_device *dev = qc->dev; in atapi_xlat()
2683 int nodata = (scmd->sc_data_direction == DMA_NONE); in atapi_xlat()
2684 int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO); in atapi_xlat()
2687 memset(qc->cdb, 0, dev->cdb_len); in atapi_xlat()
2688 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); in atapi_xlat()
2690 qc->complete_fn = atapi_qc_complete; in atapi_xlat()
2692 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in atapi_xlat()
2693 if (scmd->sc_data_direction == DMA_TO_DEVICE) { in atapi_xlat()
2694 qc->tf.flags |= ATA_TFLAG_WRITE; in atapi_xlat()
2697 qc->tf.command = ATA_CMD_PACKET; in atapi_xlat()
2705 * transfers to do state machine and FIFO management. Thus we in atapi_xlat()
2738 qc->tf.lbam = (nbytes & 0xFF); in atapi_xlat()
2739 qc->tf.lbah = (nbytes >> 8); in atapi_xlat()
2742 qc->tf.protocol = ATAPI_PROT_NODATA; in atapi_xlat()
2744 qc->tf.protocol = ATAPI_PROT_PIO; in atapi_xlat()
2747 qc->tf.protocol = ATAPI_PROT_DMA; in atapi_xlat()
2748 qc->tf.feature |= ATAPI_PKT_DMA; in atapi_xlat()
2750 if ((dev->flags & ATA_DFLAG_DMADIR) && in atapi_xlat()
2751 (scmd->sc_data_direction != DMA_TO_DEVICE)) in atapi_xlat()
2753 qc->tf.feature |= ATAPI_DMADIR; in atapi_xlat()
2765 * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case), in ata_find_dev()
2773 int link_max_devices = ata_link_max_devices(&ap->link); in ata_find_dev()
2776 return &ap->link.device[0]; in ata_find_dev()
2779 return &ap->link.device[devno]; in ata_find_dev()
2785 * For PMP-attached devices, the device number corresponds to C in ata_find_dev()
2789 if (devno < ap->nr_pmp_links) in ata_find_dev()
2790 return &ap->pmp_link[devno].device[0]; in ata_find_dev()
2802 if (unlikely(scsidev->channel || scsidev->lun)) in __ata_scsi_find_dev()
2804 devno = scsidev->id; in __ata_scsi_find_dev()
2806 if (unlikely(scsidev->id || scsidev->lun)) in __ata_scsi_find_dev()
2808 devno = scsidev->channel; in __ata_scsi_find_dev()
2815 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2842 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
2843 * @byte1: Byte 1 from pass-thru CDB.
2852 case 3: /* Non-data */ in ata_scsi_map_proto()
2856 case 10: /* UDMA Data-in */ in ata_scsi_map_proto()
2857 case 11: /* UDMA Data-Out */ in ata_scsi_map_proto()
2860 case 4: /* PIO Data-in */ in ata_scsi_map_proto()
2861 case 5: /* PIO Data-out */ in ata_scsi_map_proto()
2881 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
2884 * Handles either 12, 16, or 32-byte versions of the CDB.
2887 * Zero on success, non-zero on failure.
2891 struct ata_taskfile *tf = &(qc->tf); in ata_scsi_pass_thru()
2892 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_pass_thru()
2893 struct ata_device *dev = qc->dev; in ata_scsi_pass_thru()
2894 const u8 *cdb = scmd->cmnd; in ata_scsi_pass_thru()
2898 /* 7Fh variable length cmd means a ata pass-thru(32) */ in ata_scsi_pass_thru()
2902 tf->protocol = ata_scsi_map_proto(cdb[1 + cdb_offset]); in ata_scsi_pass_thru()
2903 if (tf->protocol == ATA_PROT_UNKNOWN) { in ata_scsi_pass_thru()
2913 if (scmd->sc_data_direction != DMA_NONE) { in ata_scsi_pass_thru()
2918 if (ata_is_ncq(tf->protocol)) in ata_scsi_pass_thru()
2919 tf->protocol = ATA_PROT_NCQ_NODATA; in ata_scsi_pass_thru()
2923 tf->flags |= ATA_TFLAG_LBA; in ata_scsi_pass_thru()
2932 * 16-byte CDB - may contain extended commands. in ata_scsi_pass_thru()
2937 tf->hob_feature = cdb[3]; in ata_scsi_pass_thru()
2938 tf->hob_nsect = cdb[5]; in ata_scsi_pass_thru()
2939 tf->hob_lbal = cdb[7]; in ata_scsi_pass_thru()
2940 tf->hob_lbam = cdb[9]; in ata_scsi_pass_thru()
2941 tf->hob_lbah = cdb[11]; in ata_scsi_pass_thru()
2942 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2944 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2949 tf->feature = cdb[4]; in ata_scsi_pass_thru()
2950 tf->nsect = cdb[6]; in ata_scsi_pass_thru()
2951 tf->lbal = cdb[8]; in ata_scsi_pass_thru()
2952 tf->lbam = cdb[10]; in ata_scsi_pass_thru()
2953 tf->lbah = cdb[12]; in ata_scsi_pass_thru()
2954 tf->device = cdb[13]; in ata_scsi_pass_thru()
2955 tf->command = cdb[14]; in ata_scsi_pass_thru()
2959 * 12-byte CDB - incapable of extended commands. in ata_scsi_pass_thru()
2961 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2963 tf->feature = cdb[3]; in ata_scsi_pass_thru()
2964 tf->nsect = cdb[4]; in ata_scsi_pass_thru()
2965 tf->lbal = cdb[5]; in ata_scsi_pass_thru()
2966 tf->lbam = cdb[6]; in ata_scsi_pass_thru()
2967 tf->lbah = cdb[7]; in ata_scsi_pass_thru()
2968 tf->device = cdb[8]; in ata_scsi_pass_thru()
2969 tf->command = cdb[9]; in ata_scsi_pass_thru()
2973 * 32-byte CDB - may contain extended command fields. in ata_scsi_pass_thru()
2978 tf->hob_feature = cdb[20]; in ata_scsi_pass_thru()
2979 tf->hob_nsect = cdb[22]; in ata_scsi_pass_thru()
2980 tf->hob_lbal = cdb[16]; in ata_scsi_pass_thru()
2981 tf->hob_lbam = cdb[15]; in ata_scsi_pass_thru()
2982 tf->hob_lbah = cdb[14]; in ata_scsi_pass_thru()
2983 tf->flags |= ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2985 tf->flags &= ~ATA_TFLAG_LBA48; in ata_scsi_pass_thru()
2987 tf->feature = cdb[21]; in ata_scsi_pass_thru()
2988 tf->nsect = cdb[23]; in ata_scsi_pass_thru()
2989 tf->lbal = cdb[19]; in ata_scsi_pass_thru()
2990 tf->lbam = cdb[18]; in ata_scsi_pass_thru()
2991 tf->lbah = cdb[17]; in ata_scsi_pass_thru()
2992 tf->device = cdb[24]; in ata_scsi_pass_thru()
2993 tf->command = cdb[25]; in ata_scsi_pass_thru()
2994 tf->auxiliary = get_unaligned_be32(&cdb[28]); in ata_scsi_pass_thru()
2999 if (ata_is_ncq(tf->protocol)) in ata_scsi_pass_thru()
3000 tf->nsect = qc->hw_tag << 3; in ata_scsi_pass_thru()
3003 tf->device = dev->devno ? in ata_scsi_pass_thru()
3004 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; in ata_scsi_pass_thru()
3006 switch (tf->command) { in ata_scsi_pass_thru()
3007 /* READ/WRITE LONG use a non-standard sect_size */ in ata_scsi_pass_thru()
3012 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) { in ata_scsi_pass_thru()
3016 qc->sect_size = scsi_bufflen(scmd); in ata_scsi_pass_thru()
3050 qc->sect_size = scmd->device->sector_size; in ata_scsi_pass_thru()
3055 qc->sect_size = ATA_SECT_SIZE; in ata_scsi_pass_thru()
3063 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; in ata_scsi_pass_thru()
3064 if (scmd->sc_data_direction == DMA_TO_DEVICE) in ata_scsi_pass_thru()
3065 tf->flags |= ATA_TFLAG_WRITE; in ata_scsi_pass_thru()
3067 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; in ata_scsi_pass_thru()
3078 if (tf->protocol == ATA_PROT_DMA && !ata_dma_enabled(dev)) { in ata_scsi_pass_thru()
3084 if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) { in ata_scsi_pass_thru()
3101 if (multi_count != dev->multi_count) in ata_scsi_pass_thru()
3107 * Filter SET_FEATURES - XFER MODE command -- otherwise, in ata_scsi_pass_thru()
3108 * SET_FEATURES - XFER MODE must be preceded/succeeded in ata_scsi_pass_thru()
3109 * by an update to hardware-specific registers for each in ata_scsi_pass_thru()
3110 * controller (i.e. the reason for ->set_piomode(), in ata_scsi_pass_thru()
3111 * ->set_dmamode(), and ->post_set_mode() hooks). in ata_scsi_pass_thru()
3113 if (tf->command == ATA_CMD_SET_FEATURES && in ata_scsi_pass_thru()
3114 tf->feature == SETFEATURES_XFER) { in ata_scsi_pass_thru()
3134 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) { in ata_scsi_pass_thru()
3147 * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
3153 * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
3170 struct scsi_device *sdp = cmd->device; in ata_format_dsm_trim_descr()
3171 size_t len = sdp->sector_size; in ata_format_dsm_trim_descr()
3191 count -= 0xffff; in ata_format_dsm_trim_descr()
3201 * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
3208 * - When set translate to DSM TRIM
3209 * - When clear translate to SCT Write Same
3213 struct ata_taskfile *tf = &qc->tf; in ata_scsi_write_same_xlat()
3214 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_write_same_xlat()
3215 struct scsi_device *sdp = scmd->device; in ata_scsi_write_same_xlat()
3216 size_t len = sdp->sector_size; in ata_scsi_write_same_xlat()
3217 struct ata_device *dev = qc->dev; in ata_scsi_write_same_xlat()
3218 const u8 *cdb = scmd->cmnd; in ata_scsi_write_same_xlat()
3239 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_write_same_xlat()
3245 if (!unmap || (dev->quirks & ATA_QUIRK_NOTRIM) || in ata_scsi_write_same_xlat()
3246 !ata_id_has_trim(dev->id)) { in ata_scsi_write_same_xlat()
3266 * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count) in ata_scsi_write_same_xlat()
3276 tf->protocol = ATA_PROT_NCQ; in ata_scsi_write_same_xlat()
3277 tf->command = ATA_CMD_FPDMA_SEND; in ata_scsi_write_same_xlat()
3278 tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; in ata_scsi_write_same_xlat()
3279 tf->nsect = qc->hw_tag << 3; in ata_scsi_write_same_xlat()
3280 tf->hob_feature = (size / 512) >> 8; in ata_scsi_write_same_xlat()
3281 tf->feature = size / 512; in ata_scsi_write_same_xlat()
3283 tf->auxiliary = 1; in ata_scsi_write_same_xlat()
3285 tf->protocol = ATA_PROT_DMA; in ata_scsi_write_same_xlat()
3286 tf->hob_feature = 0; in ata_scsi_write_same_xlat()
3287 tf->feature = ATA_DSM_TRIM; in ata_scsi_write_same_xlat()
3288 tf->hob_nsect = (size / 512) >> 8; in ata_scsi_write_same_xlat()
3289 tf->nsect = size / 512; in ata_scsi_write_same_xlat()
3290 tf->command = ATA_CMD_DSM; in ata_scsi_write_same_xlat()
3293 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | in ata_scsi_write_same_xlat()
3314 * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
3325 struct ata_device *dev = args->dev; in ata_scsiop_maint_in()
3326 u8 *cdb = args->cmd->cmnd; in ata_scsiop_maint_in()
3367 if (dev->flags & ATA_DFLAG_CDL) { in ata_scsiop_maint_in()
3378 if (dev->flags & ATA_DFLAG_CDL) { in ata_scsiop_maint_in()
3389 if (ata_id_zoned_cap(dev->id) || in ata_scsiop_maint_in()
3390 dev->class == ATA_DEV_ZAC) in ata_scsiop_maint_in()
3395 if (dev->flags & ATA_DFLAG_TRUSTED) in ata_scsiop_maint_in()
3409 * ata_scsi_report_zones_complete - convert ATA output
3412 * Convert T-13 little-endian field representation into
3413 * T-10 big-endian field representation.
3418 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_report_zones_complete()
3481 struct ata_taskfile *tf = &qc->tf; in ata_scsi_zbc_in_xlat()
3482 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_zbc_in_xlat()
3483 const u8 *cdb = scmd->cmnd; in ata_scsi_zbc_in_xlat()
3484 u16 sect, fp = (u16)-1; in ata_scsi_zbc_in_xlat()
3489 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_zbc_in_xlat()
3490 ata_dev_warn(qc->dev, "invalid cdb length %d\n", in ata_scsi_zbc_in_xlat()
3491 scmd->cmd_len); in ata_scsi_zbc_in_xlat()
3497 ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n", in ata_scsi_zbc_in_xlat()
3503 ata_dev_warn(qc->dev, "invalid service action %d\n", sa); in ata_scsi_zbc_in_xlat()
3512 ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block); in ata_scsi_zbc_in_xlat()
3518 if (ata_ncq_enabled(qc->dev) && in ata_scsi_zbc_in_xlat()
3519 ata_fpdma_zac_mgmt_in_supported(qc->dev)) { in ata_scsi_zbc_in_xlat()
3520 tf->protocol = ATA_PROT_NCQ; in ata_scsi_zbc_in_xlat()
3521 tf->command = ATA_CMD_FPDMA_RECV; in ata_scsi_zbc_in_xlat()
3522 tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f; in ata_scsi_zbc_in_xlat()
3523 tf->nsect = qc->hw_tag << 3; in ata_scsi_zbc_in_xlat()
3524 tf->feature = sect & 0xff; in ata_scsi_zbc_in_xlat()
3525 tf->hob_feature = (sect >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3526 tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8); in ata_scsi_zbc_in_xlat()
3528 tf->command = ATA_CMD_ZAC_MGMT_IN; in ata_scsi_zbc_in_xlat()
3529 tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES; in ata_scsi_zbc_in_xlat()
3530 tf->protocol = ATA_PROT_DMA; in ata_scsi_zbc_in_xlat()
3531 tf->hob_feature = options; in ata_scsi_zbc_in_xlat()
3532 tf->hob_nsect = (sect >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3533 tf->nsect = sect & 0xff; in ata_scsi_zbc_in_xlat()
3535 tf->device = ATA_LBA; in ata_scsi_zbc_in_xlat()
3536 tf->lbah = (block >> 16) & 0xff; in ata_scsi_zbc_in_xlat()
3537 tf->lbam = (block >> 8) & 0xff; in ata_scsi_zbc_in_xlat()
3538 tf->lbal = block & 0xff; in ata_scsi_zbc_in_xlat()
3539 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_zbc_in_xlat()
3540 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_zbc_in_xlat()
3541 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_zbc_in_xlat()
3543 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; in ata_scsi_zbc_in_xlat()
3544 qc->flags |= ATA_QCFLAG_RESULT_TF; in ata_scsi_zbc_in_xlat()
3548 qc->complete_fn = ata_scsi_report_zones_complete; in ata_scsi_zbc_in_xlat()
3553 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_zbc_in_xlat()
3558 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_zbc_in_xlat()
3564 struct ata_taskfile *tf = &qc->tf; in ata_scsi_zbc_out_xlat()
3565 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_zbc_out_xlat()
3566 struct ata_device *dev = qc->dev; in ata_scsi_zbc_out_xlat()
3567 const u8 *cdb = scmd->cmnd; in ata_scsi_zbc_out_xlat()
3571 u16 fp = (u16)-1; in ata_scsi_zbc_out_xlat()
3573 if (unlikely(scmd->cmd_len < 16)) { in ata_scsi_zbc_out_xlat()
3599 } else if (block >= dev->n_sectors) { in ata_scsi_zbc_out_xlat()
3607 if (ata_ncq_enabled(qc->dev) && in ata_scsi_zbc_out_xlat()
3608 ata_fpdma_zac_mgmt_out_supported(qc->dev)) { in ata_scsi_zbc_out_xlat()
3609 tf->protocol = ATA_PROT_NCQ_NODATA; in ata_scsi_zbc_out_xlat()
3610 tf->command = ATA_CMD_NCQ_NON_DATA; in ata_scsi_zbc_out_xlat()
3611 tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT; in ata_scsi_zbc_out_xlat()
3612 tf->nsect = qc->hw_tag << 3; in ata_scsi_zbc_out_xlat()
3613 tf->auxiliary = sa | ((u16)all << 8); in ata_scsi_zbc_out_xlat()
3615 tf->protocol = ATA_PROT_NODATA; in ata_scsi_zbc_out_xlat()
3616 tf->command = ATA_CMD_ZAC_MGMT_OUT; in ata_scsi_zbc_out_xlat()
3617 tf->feature = sa; in ata_scsi_zbc_out_xlat()
3618 tf->hob_feature = all; in ata_scsi_zbc_out_xlat()
3620 tf->lbah = (block >> 16) & 0xff; in ata_scsi_zbc_out_xlat()
3621 tf->lbam = (block >> 8) & 0xff; in ata_scsi_zbc_out_xlat()
3622 tf->lbal = block & 0xff; in ata_scsi_zbc_out_xlat()
3623 tf->hob_lbah = (block >> 40) & 0xff; in ata_scsi_zbc_out_xlat()
3624 tf->hob_lbam = (block >> 32) & 0xff; in ata_scsi_zbc_out_xlat()
3625 tf->hob_lbal = (block >> 24) & 0xff; in ata_scsi_zbc_out_xlat()
3626 tf->device = ATA_LBA; in ata_scsi_zbc_out_xlat()
3627 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48; in ata_scsi_zbc_out_xlat()
3632 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); in ata_scsi_zbc_out_xlat()
3636 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_zbc_out_xlat()
3641 * ata_mselect_caching - Simulate MODE SELECT for caching info page
3655 struct ata_taskfile *tf = &qc->tf; in ata_mselect_caching()
3656 struct ata_device *dev = qc->dev; in ata_mselect_caching()
3666 if (len != CACHE_MPAGE_LEN - 2) { in ata_mselect_caching()
3667 *fp = min(len, CACHE_MPAGE_LEN - 2); in ata_mselect_caching()
3668 return -EINVAL; in ata_mselect_caching()
3674 * Check that read-only bits are not modified. in ata_mselect_caching()
3676 ata_msense_caching(dev->id, mpage, false); in ata_mselect_caching()
3677 for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) { in ata_mselect_caching()
3682 return -EINVAL; in ata_mselect_caching()
3686 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_mselect_caching()
3687 tf->protocol = ATA_PROT_NODATA; in ata_mselect_caching()
3688 tf->nsect = 0; in ata_mselect_caching()
3689 tf->command = ATA_CMD_SET_FEATURES; in ata_mselect_caching()
3690 tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF; in ata_mselect_caching()
3695 * Simulate MODE SELECT control mode page, sub-page 0.
3700 struct ata_device *dev = qc->dev; in ata_mselect_control_spg0()
3710 if (len != CONTROL_MPAGE_LEN - 2) { in ata_mselect_control_spg0()
3711 *fp = min(len, CONTROL_MPAGE_LEN - 2); in ata_mselect_control_spg0()
3712 return -EINVAL; in ata_mselect_control_spg0()
3718 * Check that read-only bits are not modified. in ata_mselect_control_spg0()
3721 for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) { in ata_mselect_control_spg0()
3726 return -EINVAL; in ata_mselect_control_spg0()
3730 dev->flags |= ATA_DFLAG_D_SENSE; in ata_mselect_control_spg0()
3732 dev->flags &= ~ATA_DFLAG_D_SENSE; in ata_mselect_control_spg0()
3737 * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
3744 struct ata_device *dev = qc->dev; in ata_mselect_control_ata_feature()
3745 struct ata_taskfile *tf = &qc->tf; in ata_mselect_control_ata_feature()
3752 if (len != ATA_FEATURE_SUB_MPAGE_LEN - 4) { in ata_mselect_control_ata_feature()
3753 *fp = min(len, ATA_FEATURE_SUB_MPAGE_LEN - 4); in ata_mselect_control_ata_feature()
3754 return -EINVAL; in ata_mselect_control_ata_feature()
3762 dev->flags &= ~ATA_DFLAG_CDL_ENABLED; in ata_mselect_control_ata_feature()
3766 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { in ata_mselect_control_ata_feature()
3769 return -EINVAL; in ata_mselect_control_ata_feature()
3772 dev->flags |= ATA_DFLAG_CDL_ENABLED; in ata_mselect_control_ata_feature()
3776 return -EINVAL; in ata_mselect_control_ata_feature()
3779 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; in ata_mselect_control_ata_feature()
3780 tf->protocol = ATA_PROT_NODATA; in ata_mselect_control_ata_feature()
3781 tf->command = ATA_CMD_SET_FEATURES; in ata_mselect_control_ata_feature()
3782 tf->feature = SETFEATURES_CDL; in ata_mselect_control_ata_feature()
3783 tf->nsect = cdl_action; in ata_mselect_control_ata_feature()
3789 * ata_mselect_control - Simulate MODE SELECT for control page
3791 * @spg: target sub-page of the control page
3810 return -EINVAL; in ata_mselect_control()
3815 * ata_scsi_mode_select_xlat - Simulate MODE SELECT 6, 10 commands
3827 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_mode_select_xlat()
3828 const u8 *cdb = scmd->cmnd; in ata_scsi_mode_select_xlat()
3832 u16 fp = (u16)-1; in ata_scsi_mode_select_xlat()
3839 if (scmd->cmd_len < 5) { in ata_scsi_mode_select_xlat()
3847 if (scmd->cmd_len < 9) { in ata_scsi_mode_select_xlat()
3864 if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) in ata_scsi_mode_select_xlat()
3880 len -= hdr_len; in ata_scsi_mode_select_xlat()
3890 len -= bd_len; in ata_scsi_mode_select_xlat()
3904 len -= 4; in ata_scsi_mode_select_xlat()
3912 len -= 2; in ata_scsi_mode_select_xlat()
3916 * Supported subpages: all subpages and ATA feature sub-page f2h of in ata_scsi_mode_select_xlat()
3930 if (qc->dev->flags & ATA_DFLAG_CDL && in ata_scsi_mode_select_xlat()
3975 ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp); in ata_scsi_mode_select_xlat()
3979 ata_scsi_set_invalid_parameter(qc->dev, scmd, fp); in ata_scsi_mode_select_xlat()
3984 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); in ata_scsi_mode_select_xlat()
3988 scmd->result = SAM_STAT_GOOD; in ata_scsi_mode_select_xlat()
4004 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_security_inout_xlat()
4005 const u8 *cdb = scmd->cmnd; in ata_scsi_security_inout_xlat()
4006 struct ata_taskfile *tf = &qc->tf; in ata_scsi_security_inout_xlat()
4011 bool dma = !(qc->dev->flags & ATA_DFLAG_PIO); in ata_scsi_security_inout_xlat()
4017 ata_scsi_set_invalid_field(qc->dev, scmd, 1, 0); in ata_scsi_security_inout_xlat()
4023 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); in ata_scsi_security_inout_xlat()
4028 ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0); in ata_scsi_security_inout_xlat()
4032 /* convert to the sector-based ATA addressing */ in ata_scsi_security_inout_xlat()
4036 tf->protocol = dma ? ATA_PROT_DMA : ATA_PROT_PIO; in ata_scsi_security_inout_xlat()
4037 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR | ATA_TFLAG_LBA; in ata_scsi_security_inout_xlat()
4039 tf->flags |= ATA_TFLAG_WRITE; in ata_scsi_security_inout_xlat()
4040 tf->command = ata_scsi_trusted_op(len, send, dma); in ata_scsi_security_inout_xlat()
4041 tf->feature = secp; in ata_scsi_security_inout_xlat()
4042 tf->lbam = spsp & 0xff; in ata_scsi_security_inout_xlat()
4043 tf->lbah = spsp >> 8; in ata_scsi_security_inout_xlat()
4046 tf->nsect = len & 0xff; in ata_scsi_security_inout_xlat()
4047 tf->lbal = len >> 8; in ata_scsi_security_inout_xlat()
4050 tf->lbah = (1 << 7); in ata_scsi_security_inout_xlat()
4058 * ata_scsi_var_len_cdb_xlat - SATL variable length CDB to Handler
4065 * Zero on success, non-zero on failure
4070 struct scsi_cmnd *scmd = qc->scsicmd; in ata_scsi_var_len_cdb_xlat()
4071 const u8 *cdb = scmd->cmnd; in ata_scsi_var_len_cdb_xlat()
4075 * if service action represents a ata pass-thru(32) command, in ata_scsi_var_len_cdb_xlat()
4086 * ata_get_xlat_func - check if SCSI to ATA translation is possible
4141 if (!(dev->flags & ATA_DFLAG_TRUSTED)) in ata_get_xlat_func()
4154 struct ata_port *ap = dev->link->ap; in __ata_scsi_queuecmd()
4155 u8 scsi_op = scmd->cmnd[0]; in __ata_scsi_queuecmd()
4160 * However, this check is done without holding the ap->lock (a libata in __ata_scsi_queuecmd()
4162 * therefore we must check if EH is pending, while holding ap->lock. in __ata_scsi_queuecmd()
4164 if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) in __ata_scsi_queuecmd()
4167 if (unlikely(!scmd->cmd_len)) in __ata_scsi_queuecmd()
4170 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { in __ata_scsi_queuecmd()
4171 if (unlikely(scmd->cmd_len > dev->cdb_len)) in __ata_scsi_queuecmd()
4179 if (unlikely(len > scmd->cmd_len || in __ata_scsi_queuecmd()
4180 len > dev->cdb_len || in __ata_scsi_queuecmd()
4181 scmd->cmd_len > ATAPI_CDB_LEN)) in __ata_scsi_queuecmd()
4187 if (unlikely(scmd->cmd_len > 16)) in __ata_scsi_queuecmd()
4201 scmd->result = DID_ERROR << 16; in __ata_scsi_queuecmd()
4207 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
4229 struct scsi_device *scsidev = cmd->device; in ata_scsi_queuecmd()
4235 spin_lock_irqsave(ap->lock, irq_flags); in ata_scsi_queuecmd()
4241 cmd->result = (DID_BAD_TARGET << 16); in ata_scsi_queuecmd()
4245 spin_unlock_irqrestore(ap->lock, irq_flags); in ata_scsi_queuecmd()
4252 * ata_scsi_simulate - simulate SCSI command on ATA device
4266 const u8 *scsicmd = cmd->cmnd; in ata_scsi_simulate()
4270 args.id = dev->id; in ata_scsi_simulate()
4302 if (dev->flags & ATA_DFLAG_ZAC) in ata_scsi_simulate()
4308 if (dev->cpr_log) in ata_scsi_simulate()
4344 * turning this into a no-op. in ata_scsi_simulate()
4350 /* no-op's, complete with success */ in ata_scsi_simulate()
4384 for (i = 0; i < host->n_ports; i++) { in ata_scsi_add_hosts()
4385 struct ata_port *ap = host->ports[i]; in ata_scsi_add_hosts()
4388 rc = -ENOMEM; in ata_scsi_add_hosts()
4393 shost->eh_noresume = 1; in ata_scsi_add_hosts()
4394 *(struct ata_port **)&shost->hostdata[0] = ap; in ata_scsi_add_hosts()
4395 ap->scsi_host = shost; in ata_scsi_add_hosts()
4397 shost->transportt = ata_scsi_transport_template; in ata_scsi_add_hosts()
4398 shost->unique_id = ap->print_id; in ata_scsi_add_hosts()
4399 shost->max_id = 16; in ata_scsi_add_hosts()
4400 shost->max_lun = 1; in ata_scsi_add_hosts()
4401 shost->max_channel = 1; in ata_scsi_add_hosts()
4402 shost->max_cmd_len = 32; in ata_scsi_add_hosts()
4404 /* Schedule policy is determined by ->qc_defer() in ata_scsi_add_hosts()
4409 shost->max_host_blocked = 1; in ata_scsi_add_hosts()
4411 rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev); in ata_scsi_add_hosts()
4419 while (--i >= 0) { in ata_scsi_add_hosts()
4420 struct Scsi_Host *shost = host->ports[i]->scsi_host; in ata_scsi_add_hosts()
4431 struct scsi_device *sdev = dev->sdev; in ata_scsi_assign_ofnode()
4432 struct device *d = ap->host->dev; in ata_scsi_assign_ofnode()
4433 struct device_node *np = d->of_node; in ata_scsi_assign_ofnode()
4443 if (val == dev->devno) { in ata_scsi_assign_ofnode()
4445 sdev->sdev_gendev.of_node = child; in ata_scsi_assign_ofnode()
4469 if (dev->sdev) in ata_scsi_scan_host()
4473 id = dev->devno; in ata_scsi_scan_host()
4475 channel = link->pmp; in ata_scsi_scan_host()
4477 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, in ata_scsi_scan_host()
4480 dev->sdev = sdev; in ata_scsi_scan_host()
4484 dev->sdev = NULL; in ata_scsi_scan_host()
4495 if (!dev->sdev) in ata_scsi_scan_host()
4517 if (--tries) { in ata_scsi_scan_host()
4526 queue_delayed_work(system_long_wq, &ap->hotplug_task, in ata_scsi_scan_host()
4531 * ata_scsi_offline_dev - offline attached SCSI device
4536 * function is called with host lock which protects dev->sdev
4547 if (dev->sdev) { in ata_scsi_offline_dev()
4548 scsi_device_set_state(dev->sdev, SDEV_OFFLINE); in ata_scsi_offline_dev()
4555 * ata_scsi_remove_dev - remove attached SCSI device
4566 struct ata_port *ap = dev->link->ap; in ata_scsi_remove_dev()
4571 * state doesn't change underneath us and thus in ata_scsi_remove_dev()
4574 * increments reference counts regardless of device state. in ata_scsi_remove_dev()
4576 mutex_lock(&ap->scsi_host->scan_mutex); in ata_scsi_remove_dev()
4577 spin_lock_irqsave(ap->lock, flags); in ata_scsi_remove_dev()
4579 /* clearing dev->sdev is protected by host lock */ in ata_scsi_remove_dev()
4580 sdev = dev->sdev; in ata_scsi_remove_dev()
4581 dev->sdev = NULL; in ata_scsi_remove_dev()
4601 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_remove_dev()
4602 mutex_unlock(&ap->scsi_host->scan_mutex); in ata_scsi_remove_dev()
4606 dev_name(&sdev->sdev_gendev)); in ata_scsi_remove_dev()
4615 struct ata_port *ap = link->ap; in ata_scsi_handle_link_detach()
4621 spin_lock_irqsave(ap->lock, flags); in ata_scsi_handle_link_detach()
4622 if (!(dev->flags & ATA_DFLAG_DETACHED)) { in ata_scsi_handle_link_detach()
4623 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_handle_link_detach()
4627 dev->flags &= ~ATA_DFLAG_DETACHED; in ata_scsi_handle_link_detach()
4628 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_handle_link_detach()
4635 * ata_scsi_media_change_notify - send media change event
4646 if (dev->sdev) in ata_scsi_media_change_notify()
4647 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE, in ata_scsi_media_change_notify()
4652 * ata_scsi_hotplug - SCSI part of hotplug
4669 if (ap->pflags & ATA_PFLAG_UNLOADING) in ata_scsi_hotplug()
4672 mutex_lock(&ap->scsi_scan_mutex); in ata_scsi_hotplug()
4678 ata_scsi_handle_link_detach(&ap->link); in ata_scsi_hotplug()
4679 if (ap->pmp_link) in ata_scsi_hotplug()
4681 ata_scsi_handle_link_detach(&ap->pmp_link[i]); in ata_scsi_hotplug()
4686 mutex_unlock(&ap->scsi_scan_mutex); in ata_scsi_hotplug()
4690 * ata_scsi_user_scan - indication for user-initiated bus scan
4713 return -EINVAL; in ata_scsi_user_scan()
4717 return -EINVAL; in ata_scsi_user_scan()
4721 return -EINVAL; in ata_scsi_user_scan()
4725 spin_lock_irqsave(ap->lock, flags); in ata_scsi_user_scan()
4731 struct ata_eh_info *ehi = &link->eh_info; in ata_scsi_user_scan()
4732 ehi->probe_mask |= ATA_ALL_DEVICES; in ata_scsi_user_scan()
4733 ehi->action |= ATA_EH_RESET; in ata_scsi_user_scan()
4739 struct ata_eh_info *ehi = &dev->link->eh_info; in ata_scsi_user_scan()
4740 ehi->probe_mask |= 1 << dev->devno; in ata_scsi_user_scan()
4741 ehi->action |= ATA_EH_RESET; in ata_scsi_user_scan()
4743 rc = -EINVAL; in ata_scsi_user_scan()
4748 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_user_scan()
4751 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_user_scan()
4757 * ata_scsi_dev_rescan - initiate scsi_rescan_device()
4776 mutex_lock(&ap->scsi_scan_mutex); in ata_scsi_dev_rescan()
4777 spin_lock_irqsave(ap->lock, flags); in ata_scsi_dev_rescan()
4781 struct scsi_device *sdev = dev->sdev; in ata_scsi_dev_rescan()
4787 if (ap->pflags & ATA_PFLAG_SUSPENDED) in ata_scsi_dev_rescan()
4795 do_resume = dev->flags & ATA_DFLAG_RESUMING; in ata_scsi_dev_rescan()
4797 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_dev_rescan()
4800 if (ret == -EWOULDBLOCK) in ata_scsi_dev_rescan()
4802 dev->flags &= ~ATA_DFLAG_RESUMING; in ata_scsi_dev_rescan()
4806 spin_lock_irqsave(ap->lock, flags); in ata_scsi_dev_rescan()
4814 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_dev_rescan()
4816 mutex_unlock(&ap->scsi_scan_mutex); in ata_scsi_dev_rescan()
4820 schedule_delayed_work(&ap->scsi_rescan_task, in ata_scsi_dev_rescan()