1 /* 2 * libata-scsi.c - helper library for ATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from 31 * - http://www.t10.org/ 32 * - http://www.t13.org/ 33 * 34 */ 35 36 #include <linux/kernel.h> 37 #include <linux/blkdev.h> 38 #include <linux/spinlock.h> 39 #include <scsi/scsi.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_eh.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_tcq.h> 45 #include <scsi/scsi_transport.h> 46 #include <linux/libata.h> 47 #include <linux/hdreg.h> 48 #include <linux/uaccess.h> 49 50 #include "libata.h" 51 52 #define SECTOR_SIZE 512 53 54 typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); 55 56 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 57 const struct scsi_device *scsidev); 58 static struct ata_device *ata_scsi_find_dev(struct ata_port *ap, 59 const struct scsi_device *scsidev); 60 static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 61 unsigned int id, unsigned int lun); 62 63 64 #define RW_RECOVERY_MPAGE 0x1 65 #define RW_RECOVERY_MPAGE_LEN 12 66 #define CACHE_MPAGE 0x8 67 #define CACHE_MPAGE_LEN 20 68 #define CONTROL_MPAGE 0xa 69 #define CONTROL_MPAGE_LEN 12 70 #define ALL_MPAGES 0x3f 71 #define ALL_SUB_MPAGES 0xff 72 73 74 static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = { 75 RW_RECOVERY_MPAGE, 76 RW_RECOVERY_MPAGE_LEN - 2, 77 (1 << 7), /* AWRE */ 78 0, /* read retry count */ 79 0, 0, 0, 0, 80 0, /* write retry count */ 81 0, 0, 0 82 }; 83 84 static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = { 85 CACHE_MPAGE, 86 CACHE_MPAGE_LEN - 2, 87 0, /* contains WCE, needs to be 0 for logic */ 88 0, 0, 0, 0, 0, 0, 0, 0, 0, 89 0, /* contains DRA, needs to be 0 for logic */ 90 0, 0, 0, 0, 0, 0, 0 91 }; 92 93 static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = { 94 CONTROL_MPAGE, 95 CONTROL_MPAGE_LEN - 2, 96 2, /* DSENSE=0, GLTSD=1 */ 97 0, /* [QAM+QERR may be 1, see 05-359r1] */ 98 0, 0, 0, 0, 0xff, 0xff, 99 0, 30 /* extended self test time, see 05-359r1 */ 100 }; 101 102 /* 103 * libata transport template. libata doesn't do real transport stuff. 104 * It just needs the eh_timed_out hook. 105 */ 106 static struct scsi_transport_template ata_scsi_transport_template = { 107 .eh_strategy_handler = ata_scsi_error, 108 .eh_timed_out = ata_scsi_timed_out, 109 .user_scan = ata_scsi_user_scan, 110 }; 111 112 113 static const struct { 114 enum link_pm value; 115 const char *name; 116 } link_pm_policy[] = { 117 { NOT_AVAILABLE, "max_performance" }, 118 { MIN_POWER, "min_power" }, 119 { MAX_PERFORMANCE, "max_performance" }, 120 { MEDIUM_POWER, "medium_power" }, 121 }; 122 123 static const char *ata_scsi_lpm_get(enum link_pm policy) 124 { 125 int i; 126 127 for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++) 128 if (link_pm_policy[i].value == policy) 129 return link_pm_policy[i].name; 130 131 return NULL; 132 } 133 134 static ssize_t ata_scsi_lpm_put(struct class_device *class_dev, 135 const char *buf, size_t count) 136 { 137 struct Scsi_Host *shost = class_to_shost(class_dev); 138 struct ata_port *ap = ata_shost_to_port(shost); 139 enum link_pm policy = 0; 140 int i; 141 142 /* 143 * we are skipping array location 0 on purpose - this 144 * is because a value of NOT_AVAILABLE is displayed 145 * to the user as max_performance, but when the user 146 * writes "max_performance", they actually want the 147 * value to match MAX_PERFORMANCE. 148 */ 149 for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) { 150 const int len = strlen(link_pm_policy[i].name); 151 if (strncmp(link_pm_policy[i].name, buf, len) == 0 && 152 buf[len] == '\n') { 153 policy = link_pm_policy[i].value; 154 break; 155 } 156 } 157 if (!policy) 158 return -EINVAL; 159 160 ata_lpm_schedule(ap, policy); 161 return count; 162 } 163 164 static ssize_t 165 ata_scsi_lpm_show(struct class_device *class_dev, char *buf) 166 { 167 struct Scsi_Host *shost = class_to_shost(class_dev); 168 struct ata_port *ap = ata_shost_to_port(shost); 169 const char *policy = 170 ata_scsi_lpm_get(ap->pm_policy); 171 172 if (!policy) 173 return -EINVAL; 174 175 return snprintf(buf, 23, "%s\n", policy); 176 } 177 CLASS_DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, 178 ata_scsi_lpm_show, ata_scsi_lpm_put); 179 EXPORT_SYMBOL_GPL(class_device_attr_link_power_management_policy); 180 181 static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, 182 void (*done)(struct scsi_cmnd *)) 183 { 184 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); 185 /* "Invalid field in cbd" */ 186 done(cmd); 187 } 188 189 /** 190 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. 191 * @sdev: SCSI device for which BIOS geometry is to be determined 192 * @bdev: block device associated with @sdev 193 * @capacity: capacity of SCSI device 194 * @geom: location to which geometry will be output 195 * 196 * Generic bios head/sector/cylinder calculator 197 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS) 198 * mapping. Some situations may arise where the disk is not 199 * bootable if this is not used. 200 * 201 * LOCKING: 202 * Defined by the SCSI layer. We don't really care. 203 * 204 * RETURNS: 205 * Zero. 206 */ 207 int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, 208 sector_t capacity, int geom[]) 209 { 210 geom[0] = 255; 211 geom[1] = 63; 212 sector_div(capacity, 255*63); 213 geom[2] = capacity; 214 215 return 0; 216 } 217 218 /** 219 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl 220 * @sdev: SCSI device to get identify data for 221 * @arg: User buffer area for identify data 222 * 223 * LOCKING: 224 * Defined by the SCSI layer. We don't really care. 225 * 226 * RETURNS: 227 * Zero on success, negative errno on error. 228 */ 229 static int ata_get_identity(struct scsi_device *sdev, void __user *arg) 230 { 231 struct ata_port *ap = ata_shost_to_port(sdev->host); 232 struct ata_device *dev = ata_scsi_find_dev(ap, sdev); 233 u16 __user *dst = arg; 234 char buf[40]; 235 236 if (!dev) 237 return -ENOMSG; 238 239 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16))) 240 return -EFAULT; 241 242 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN); 243 if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN)) 244 return -EFAULT; 245 246 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN); 247 if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN)) 248 return -EFAULT; 249 250 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN); 251 if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN)) 252 return -EFAULT; 253 254 return 0; 255 } 256 257 /** 258 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl 259 * @scsidev: Device to which we are issuing command 260 * @arg: User provided data for issuing command 261 * 262 * LOCKING: 263 * Defined by the SCSI layer. We don't really care. 264 * 265 * RETURNS: 266 * Zero on success, negative errno on error. 267 */ 268 int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) 269 { 270 int rc = 0; 271 u8 scsi_cmd[MAX_COMMAND_SIZE]; 272 u8 args[4], *argbuf = NULL, *sensebuf = NULL; 273 int argsize = 0; 274 enum dma_data_direction data_dir; 275 int cmd_result; 276 277 if (arg == NULL) 278 return -EINVAL; 279 280 if (copy_from_user(args, arg, sizeof(args))) 281 return -EFAULT; 282 283 sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 284 if (!sensebuf) 285 return -ENOMEM; 286 287 memset(scsi_cmd, 0, sizeof(scsi_cmd)); 288 289 if (args[3]) { 290 argsize = SECTOR_SIZE * args[3]; 291 argbuf = kmalloc(argsize, GFP_KERNEL); 292 if (argbuf == NULL) { 293 rc = -ENOMEM; 294 goto error; 295 } 296 297 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ 298 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev, 299 block count in sector count field */ 300 data_dir = DMA_FROM_DEVICE; 301 } else { 302 scsi_cmd[1] = (3 << 1); /* Non-data */ 303 scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ 304 data_dir = DMA_NONE; 305 } 306 307 scsi_cmd[0] = ATA_16; 308 309 scsi_cmd[4] = args[2]; 310 if (args[0] == WIN_SMART) { /* hack -- ide driver does this too... */ 311 scsi_cmd[6] = args[3]; 312 scsi_cmd[8] = args[1]; 313 scsi_cmd[10] = 0x4f; 314 scsi_cmd[12] = 0xc2; 315 } else { 316 scsi_cmd[6] = args[1]; 317 } 318 scsi_cmd[14] = args[0]; 319 320 /* Good values for timeout and retries? Values below 321 from scsi_ioctl_send_command() for default case... */ 322 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, 323 sensebuf, (10*HZ), 5, 0); 324 325 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ 326 u8 *desc = sensebuf + 8; 327 cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ 328 329 /* If we set cc then ATA pass-through will cause a 330 * check condition even if no error. Filter that. */ 331 if (cmd_result & SAM_STAT_CHECK_CONDITION) { 332 struct scsi_sense_hdr sshdr; 333 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 334 &sshdr); 335 if (sshdr.sense_key == 0 && 336 sshdr.asc == 0 && sshdr.ascq == 0) 337 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 338 } 339 340 /* Send userspace a few ATA registers (same as drivers/ide) */ 341 if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 342 desc[0] == 0x09) { /* code is "ATA Descriptor" */ 343 args[0] = desc[13]; /* status */ 344 args[1] = desc[3]; /* error */ 345 args[2] = desc[5]; /* sector count (0:7) */ 346 if (copy_to_user(arg, args, sizeof(args))) 347 rc = -EFAULT; 348 } 349 } 350 351 352 if (cmd_result) { 353 rc = -EIO; 354 goto error; 355 } 356 357 if ((argbuf) 358 && copy_to_user(arg + sizeof(args), argbuf, argsize)) 359 rc = -EFAULT; 360 error: 361 kfree(sensebuf); 362 kfree(argbuf); 363 return rc; 364 } 365 366 /** 367 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl 368 * @scsidev: Device to which we are issuing command 369 * @arg: User provided data for issuing command 370 * 371 * LOCKING: 372 * Defined by the SCSI layer. We don't really care. 373 * 374 * RETURNS: 375 * Zero on success, negative errno on error. 376 */ 377 int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) 378 { 379 int rc = 0; 380 u8 scsi_cmd[MAX_COMMAND_SIZE]; 381 u8 args[7], *sensebuf = NULL; 382 int cmd_result; 383 384 if (arg == NULL) 385 return -EINVAL; 386 387 if (copy_from_user(args, arg, sizeof(args))) 388 return -EFAULT; 389 390 sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 391 if (!sensebuf) 392 return -ENOMEM; 393 394 memset(scsi_cmd, 0, sizeof(scsi_cmd)); 395 scsi_cmd[0] = ATA_16; 396 scsi_cmd[1] = (3 << 1); /* Non-data */ 397 scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ 398 scsi_cmd[4] = args[1]; 399 scsi_cmd[6] = args[2]; 400 scsi_cmd[8] = args[3]; 401 scsi_cmd[10] = args[4]; 402 scsi_cmd[12] = args[5]; 403 scsi_cmd[13] = args[6] & 0x4f; 404 scsi_cmd[14] = args[0]; 405 406 /* Good values for timeout and retries? Values below 407 from scsi_ioctl_send_command() for default case... */ 408 cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0, 409 sensebuf, (10*HZ), 5, 0); 410 411 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ 412 u8 *desc = sensebuf + 8; 413 cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ 414 415 /* If we set cc then ATA pass-through will cause a 416 * check condition even if no error. Filter that. */ 417 if (cmd_result & SAM_STAT_CHECK_CONDITION) { 418 struct scsi_sense_hdr sshdr; 419 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 420 &sshdr); 421 if (sshdr.sense_key == 0 && 422 sshdr.asc == 0 && sshdr.ascq == 0) 423 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 424 } 425 426 /* Send userspace ATA registers */ 427 if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 428 desc[0] == 0x09) {/* code is "ATA Descriptor" */ 429 args[0] = desc[13]; /* status */ 430 args[1] = desc[3]; /* error */ 431 args[2] = desc[5]; /* sector count (0:7) */ 432 args[3] = desc[7]; /* lbal */ 433 args[4] = desc[9]; /* lbam */ 434 args[5] = desc[11]; /* lbah */ 435 args[6] = desc[12]; /* select */ 436 if (copy_to_user(arg, args, sizeof(args))) 437 rc = -EFAULT; 438 } 439 } 440 441 if (cmd_result) { 442 rc = -EIO; 443 goto error; 444 } 445 446 error: 447 kfree(sensebuf); 448 return rc; 449 } 450 451 int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) 452 { 453 int val = -EINVAL, rc = -EINVAL; 454 455 switch (cmd) { 456 case ATA_IOC_GET_IO32: 457 val = 0; 458 if (copy_to_user(arg, &val, 1)) 459 return -EFAULT; 460 return 0; 461 462 case ATA_IOC_SET_IO32: 463 val = (unsigned long) arg; 464 if (val != 0) 465 return -EINVAL; 466 return 0; 467 468 case HDIO_GET_IDENTITY: 469 return ata_get_identity(scsidev, arg); 470 471 case HDIO_DRIVE_CMD: 472 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 473 return -EACCES; 474 return ata_cmd_ioctl(scsidev, arg); 475 476 case HDIO_DRIVE_TASK: 477 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 478 return -EACCES; 479 return ata_task_ioctl(scsidev, arg); 480 481 default: 482 rc = -ENOTTY; 483 break; 484 } 485 486 return rc; 487 } 488 489 /** 490 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 491 * @dev: ATA device to which the new command is attached 492 * @cmd: SCSI command that originated this ATA command 493 * @done: SCSI command completion function 494 * 495 * Obtain a reference to an unused ata_queued_cmd structure, 496 * which is the basic libata structure representing a single 497 * ATA command sent to the hardware. 498 * 499 * If a command was available, fill in the SCSI-specific 500 * portions of the structure with information on the 501 * current command. 502 * 503 * LOCKING: 504 * spin_lock_irqsave(host lock) 505 * 506 * RETURNS: 507 * Command allocated, or %NULL if none available. 508 */ 509 static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, 510 struct scsi_cmnd *cmd, 511 void (*done)(struct scsi_cmnd *)) 512 { 513 struct ata_queued_cmd *qc; 514 515 qc = ata_qc_new_init(dev); 516 if (qc) { 517 qc->scsicmd = cmd; 518 qc->scsidone = done; 519 520 qc->__sg = scsi_sglist(cmd); 521 qc->n_elem = scsi_sg_count(cmd); 522 } else { 523 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); 524 done(cmd); 525 } 526 527 return qc; 528 } 529 530 /** 531 * ata_dump_status - user friendly display of error info 532 * @id: id of the port in question 533 * @tf: ptr to filled out taskfile 534 * 535 * Decode and dump the ATA error/status registers for the user so 536 * that they have some idea what really happened at the non 537 * make-believe layer. 538 * 539 * LOCKING: 540 * inherited from caller 541 */ 542 static void ata_dump_status(unsigned id, struct ata_taskfile *tf) 543 { 544 u8 stat = tf->command, err = tf->feature; 545 546 printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat); 547 if (stat & ATA_BUSY) { 548 printk("Busy }\n"); /* Data is not valid in this case */ 549 } else { 550 if (stat & 0x40) printk("DriveReady "); 551 if (stat & 0x20) printk("DeviceFault "); 552 if (stat & 0x10) printk("SeekComplete "); 553 if (stat & 0x08) printk("DataRequest "); 554 if (stat & 0x04) printk("CorrectedError "); 555 if (stat & 0x02) printk("Index "); 556 if (stat & 0x01) printk("Error "); 557 printk("}\n"); 558 559 if (err) { 560 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err); 561 if (err & 0x04) printk("DriveStatusError "); 562 if (err & 0x80) { 563 if (err & 0x04) printk("BadCRC "); 564 else printk("Sector "); 565 } 566 if (err & 0x40) printk("UncorrectableError "); 567 if (err & 0x10) printk("SectorIdNotFound "); 568 if (err & 0x02) printk("TrackZeroNotFound "); 569 if (err & 0x01) printk("AddrMarkNotFound "); 570 printk("}\n"); 571 } 572 } 573 } 574 575 /** 576 * ata_to_sense_error - convert ATA error to SCSI error 577 * @id: ATA device number 578 * @drv_stat: value contained in ATA status register 579 * @drv_err: value contained in ATA error register 580 * @sk: the sense key we'll fill out 581 * @asc: the additional sense code we'll fill out 582 * @ascq: the additional sense code qualifier we'll fill out 583 * @verbose: be verbose 584 * 585 * Converts an ATA error into a SCSI error. Fill out pointers to 586 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor 587 * format sense blocks. 588 * 589 * LOCKING: 590 * spin_lock_irqsave(host lock) 591 */ 592 static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, 593 u8 *asc, u8 *ascq, int verbose) 594 { 595 int i; 596 597 /* Based on the 3ware driver translation table */ 598 static const unsigned char sense_table[][4] = { 599 /* BBD|ECC|ID|MAR */ 600 {0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command 601 /* BBD|ECC|ID */ 602 {0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command 603 /* ECC|MC|MARK */ 604 {0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error 605 /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */ 606 {0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error 607 /* MC|ID|ABRT|TRK0|MARK */ 608 {0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready 609 /* MCR|MARK */ 610 {0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready 611 /* Bad address mark */ 612 {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field 613 /* TRK0 */ 614 {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error 615 /* Abort & !ICRC */ 616 {0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command 617 /* Media change request */ 618 {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline 619 /* SRV */ 620 {0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found 621 /* Media change */ 622 {0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline 623 /* ECC */ 624 {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error 625 /* BBD - block marked bad */ 626 {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error 627 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 628 }; 629 static const unsigned char stat_table[][4] = { 630 /* Must be first because BUSY means no other bits valid */ 631 {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now 632 {0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault 633 {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now 634 {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered 635 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 636 }; 637 638 /* 639 * Is this an error we can process/parse 640 */ 641 if (drv_stat & ATA_BUSY) { 642 drv_err = 0; /* Ignore the err bits, they're invalid */ 643 } 644 645 if (drv_err) { 646 /* Look for drv_err */ 647 for (i = 0; sense_table[i][0] != 0xFF; i++) { 648 /* Look for best matches first */ 649 if ((sense_table[i][0] & drv_err) == 650 sense_table[i][0]) { 651 *sk = sense_table[i][1]; 652 *asc = sense_table[i][2]; 653 *ascq = sense_table[i][3]; 654 goto translate_done; 655 } 656 } 657 /* No immediate match */ 658 if (verbose) 659 printk(KERN_WARNING "ata%u: no sense translation for " 660 "error 0x%02x\n", id, drv_err); 661 } 662 663 /* Fall back to interpreting status bits */ 664 for (i = 0; stat_table[i][0] != 0xFF; i++) { 665 if (stat_table[i][0] & drv_stat) { 666 *sk = stat_table[i][1]; 667 *asc = stat_table[i][2]; 668 *ascq = stat_table[i][3]; 669 goto translate_done; 670 } 671 } 672 /* No error? Undecoded? */ 673 if (verbose) 674 printk(KERN_WARNING "ata%u: no sense translation for " 675 "status: 0x%02x\n", id, drv_stat); 676 677 /* We need a sensible error return here, which is tricky, and one 678 that won't cause people to do things like return a disk wrongly */ 679 *sk = ABORTED_COMMAND; 680 *asc = 0x00; 681 *ascq = 0x00; 682 683 translate_done: 684 if (verbose) 685 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x " 686 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n", 687 id, drv_stat, drv_err, *sk, *asc, *ascq); 688 return; 689 } 690 691 /* 692 * ata_gen_passthru_sense - Generate check condition sense block. 693 * @qc: Command that completed. 694 * 695 * This function is specific to the ATA descriptor format sense 696 * block specified for the ATA pass through commands. Regardless 697 * of whether the command errored or not, return a sense 698 * block. Copy all controller registers into the sense 699 * block. Clear sense key, ASC & ASCQ if there is no error. 700 * 701 * LOCKING: 702 * None. 703 */ 704 static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) 705 { 706 struct scsi_cmnd *cmd = qc->scsicmd; 707 struct ata_taskfile *tf = &qc->result_tf; 708 unsigned char *sb = cmd->sense_buffer; 709 unsigned char *desc = sb + 8; 710 int verbose = qc->ap->ops->error_handler == NULL; 711 712 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 713 714 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 715 716 /* 717 * Use ata_to_sense_error() to map status register bits 718 * onto sense key, asc & ascq. 719 */ 720 if (qc->err_mask || 721 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 722 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, 723 &sb[1], &sb[2], &sb[3], verbose); 724 sb[1] &= 0x0f; 725 } 726 727 /* 728 * Sense data is current and format is descriptor. 729 */ 730 sb[0] = 0x72; 731 732 desc[0] = 0x09; 733 734 /* set length of additional sense data */ 735 sb[7] = 14; 736 desc[1] = 12; 737 738 /* 739 * Copy registers into sense buffer. 740 */ 741 desc[2] = 0x00; 742 desc[3] = tf->feature; /* == error reg */ 743 desc[5] = tf->nsect; 744 desc[7] = tf->lbal; 745 desc[9] = tf->lbam; 746 desc[11] = tf->lbah; 747 desc[12] = tf->device; 748 desc[13] = tf->command; /* == status reg */ 749 750 /* 751 * Fill in Extend bit, and the high order bytes 752 * if applicable. 753 */ 754 if (tf->flags & ATA_TFLAG_LBA48) { 755 desc[2] |= 0x01; 756 desc[4] = tf->hob_nsect; 757 desc[6] = tf->hob_lbal; 758 desc[8] = tf->hob_lbam; 759 desc[10] = tf->hob_lbah; 760 } 761 } 762 763 /** 764 * ata_gen_ata_sense - generate a SCSI fixed sense block 765 * @qc: Command that we are erroring out 766 * 767 * Generate sense block for a failed ATA command @qc. Descriptor 768 * format is used to accomodate LBA48 block address. 769 * 770 * LOCKING: 771 * None. 772 */ 773 static void ata_gen_ata_sense(struct ata_queued_cmd *qc) 774 { 775 struct ata_device *dev = qc->dev; 776 struct scsi_cmnd *cmd = qc->scsicmd; 777 struct ata_taskfile *tf = &qc->result_tf; 778 unsigned char *sb = cmd->sense_buffer; 779 unsigned char *desc = sb + 8; 780 int verbose = qc->ap->ops->error_handler == NULL; 781 u64 block; 782 783 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 784 785 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 786 787 /* sense data is current and format is descriptor */ 788 sb[0] = 0x72; 789 790 /* Use ata_to_sense_error() to map status register bits 791 * onto sense key, asc & ascq. 792 */ 793 if (qc->err_mask || 794 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 795 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, 796 &sb[1], &sb[2], &sb[3], verbose); 797 sb[1] &= 0x0f; 798 } 799 800 block = ata_tf_read_block(&qc->result_tf, dev); 801 802 /* information sense data descriptor */ 803 sb[7] = 12; 804 desc[0] = 0x00; 805 desc[1] = 10; 806 807 desc[2] |= 0x80; /* valid */ 808 desc[6] = block >> 40; 809 desc[7] = block >> 32; 810 desc[8] = block >> 24; 811 desc[9] = block >> 16; 812 desc[10] = block >> 8; 813 desc[11] = block; 814 } 815 816 static void ata_scsi_sdev_config(struct scsi_device *sdev) 817 { 818 sdev->use_10_for_rw = 1; 819 sdev->use_10_for_ms = 1; 820 821 /* Schedule policy is determined by ->qc_defer() callback and 822 * it needs to see every deferred qc. Set dev_blocked to 1 to 823 * prevent SCSI midlayer from automatically deferring 824 * requests. 825 */ 826 sdev->max_device_blocked = 1; 827 } 828 829 static void ata_scsi_dev_config(struct scsi_device *sdev, 830 struct ata_device *dev) 831 { 832 /* configure max sectors */ 833 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); 834 835 /* SATA DMA transfers must be multiples of 4 byte, so 836 * we need to pad ATAPI transfers using an extra sg. 837 * Decrement max hw segments accordingly. 838 */ 839 if (dev->class == ATA_DEV_ATAPI) { 840 struct request_queue *q = sdev->request_queue; 841 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 842 } 843 844 if (dev->flags & ATA_DFLAG_AN) 845 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 846 847 if (dev->flags & ATA_DFLAG_NCQ) { 848 int depth; 849 850 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); 851 depth = min(ATA_MAX_QUEUE - 1, depth); 852 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 853 } 854 } 855 856 /** 857 * ata_scsi_slave_config - Set SCSI device attributes 858 * @sdev: SCSI device to examine 859 * 860 * This is called before we actually start reading 861 * and writing to the device, to configure certain 862 * SCSI mid-layer behaviors. 863 * 864 * LOCKING: 865 * Defined by SCSI layer. We don't really care. 866 */ 867 868 int ata_scsi_slave_config(struct scsi_device *sdev) 869 { 870 struct ata_port *ap = ata_shost_to_port(sdev->host); 871 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 872 873 ata_scsi_sdev_config(sdev); 874 875 sdev->manage_start_stop = 1; 876 877 if (dev) 878 ata_scsi_dev_config(sdev, dev); 879 880 return 0; /* scsi layer doesn't check return value, sigh */ 881 } 882 883 /** 884 * ata_scsi_slave_destroy - SCSI device is about to be destroyed 885 * @sdev: SCSI device to be destroyed 886 * 887 * @sdev is about to be destroyed for hot/warm unplugging. If 888 * this unplugging was initiated by libata as indicated by NULL 889 * dev->sdev, this function doesn't have to do anything. 890 * Otherwise, SCSI layer initiated warm-unplug is in progress. 891 * Clear dev->sdev, schedule the device for ATA detach and invoke 892 * EH. 893 * 894 * LOCKING: 895 * Defined by SCSI layer. We don't really care. 896 */ 897 void ata_scsi_slave_destroy(struct scsi_device *sdev) 898 { 899 struct ata_port *ap = ata_shost_to_port(sdev->host); 900 unsigned long flags; 901 struct ata_device *dev; 902 903 if (!ap->ops->error_handler) 904 return; 905 906 spin_lock_irqsave(ap->lock, flags); 907 dev = __ata_scsi_find_dev(ap, sdev); 908 if (dev && dev->sdev) { 909 /* SCSI device already in CANCEL state, no need to offline it */ 910 dev->sdev = NULL; 911 dev->flags |= ATA_DFLAG_DETACH; 912 ata_port_schedule_eh(ap); 913 } 914 spin_unlock_irqrestore(ap->lock, flags); 915 } 916 917 /** 918 * ata_scsi_change_queue_depth - SCSI callback for queue depth config 919 * @sdev: SCSI device to configure queue depth for 920 * @queue_depth: new queue depth 921 * 922 * This is libata standard hostt->change_queue_depth callback. 923 * SCSI will call into this callback when user tries to set queue 924 * depth via sysfs. 925 * 926 * LOCKING: 927 * SCSI layer (we don't care) 928 * 929 * RETURNS: 930 * Newly configured queue depth. 931 */ 932 int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) 933 { 934 struct ata_port *ap = ata_shost_to_port(sdev->host); 935 struct ata_device *dev; 936 unsigned long flags; 937 938 if (queue_depth < 1 || queue_depth == sdev->queue_depth) 939 return sdev->queue_depth; 940 941 dev = ata_scsi_find_dev(ap, sdev); 942 if (!dev || !ata_dev_enabled(dev)) 943 return sdev->queue_depth; 944 945 /* NCQ enabled? */ 946 spin_lock_irqsave(ap->lock, flags); 947 dev->flags &= ~ATA_DFLAG_NCQ_OFF; 948 if (queue_depth == 1 || !ata_ncq_enabled(dev)) { 949 dev->flags |= ATA_DFLAG_NCQ_OFF; 950 queue_depth = 1; 951 } 952 spin_unlock_irqrestore(ap->lock, flags); 953 954 /* limit and apply queue depth */ 955 queue_depth = min(queue_depth, sdev->host->can_queue); 956 queue_depth = min(queue_depth, ata_id_queue_depth(dev->id)); 957 queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1); 958 959 if (sdev->queue_depth == queue_depth) 960 return -EINVAL; 961 962 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth); 963 return queue_depth; 964 } 965 966 /* XXX: for spindown warning */ 967 static void ata_delayed_done_timerfn(unsigned long arg) 968 { 969 struct scsi_cmnd *scmd = (void *)arg; 970 971 scmd->scsi_done(scmd); 972 } 973 974 /* XXX: for spindown warning */ 975 static void ata_delayed_done(struct scsi_cmnd *scmd) 976 { 977 static struct timer_list timer; 978 979 setup_timer(&timer, ata_delayed_done_timerfn, (unsigned long)scmd); 980 mod_timer(&timer, jiffies + 5 * HZ); 981 } 982 983 /** 984 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 985 * @qc: Storage for translated ATA taskfile 986 * 987 * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY 988 * (to start). Perhaps these commands should be preceded by 989 * CHECK POWER MODE to see what power mode the device is already in. 990 * [See SAT revision 5 at www.t10.org] 991 * 992 * LOCKING: 993 * spin_lock_irqsave(host lock) 994 * 995 * RETURNS: 996 * Zero on success, non-zero on error. 997 */ 998 static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) 999 { 1000 struct scsi_cmnd *scmd = qc->scsicmd; 1001 struct ata_taskfile *tf = &qc->tf; 1002 const u8 *cdb = scmd->cmnd; 1003 1004 if (scmd->cmd_len < 5) 1005 goto invalid_fld; 1006 1007 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1008 tf->protocol = ATA_PROT_NODATA; 1009 if (cdb[1] & 0x1) { 1010 ; /* ignore IMMED bit, violates sat-r05 */ 1011 } 1012 if (cdb[4] & 0x2) 1013 goto invalid_fld; /* LOEJ bit set not supported */ 1014 if (((cdb[4] >> 4) & 0xf) != 0) 1015 goto invalid_fld; /* power conditions not supported */ 1016 1017 if (qc->dev->horkage & ATA_HORKAGE_SKIP_PM) { 1018 /* the device lacks PM support, finish without doing anything */ 1019 scmd->result = SAM_STAT_GOOD; 1020 return 1; 1021 } 1022 1023 if (cdb[4] & 0x1) { 1024 tf->nsect = 1; /* 1 sector, lba=0 */ 1025 1026 if (qc->dev->flags & ATA_DFLAG_LBA) { 1027 tf->flags |= ATA_TFLAG_LBA; 1028 1029 tf->lbah = 0x0; 1030 tf->lbam = 0x0; 1031 tf->lbal = 0x0; 1032 tf->device |= ATA_LBA; 1033 } else { 1034 /* CHS */ 1035 tf->lbal = 0x1; /* sect */ 1036 tf->lbam = 0x0; /* cyl low */ 1037 tf->lbah = 0x0; /* cyl high */ 1038 } 1039 1040 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ 1041 } else { 1042 /* XXX: This is for backward compatibility, will be 1043 * removed. Read Documentation/feature-removal-schedule.txt 1044 * for more info. 1045 */ 1046 if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) && 1047 (system_state == SYSTEM_HALT || 1048 system_state == SYSTEM_POWER_OFF)) { 1049 static unsigned long warned; 1050 1051 if (!test_and_set_bit(0, &warned)) { 1052 ata_dev_printk(qc->dev, KERN_WARNING, 1053 "DISK MIGHT NOT BE SPUN DOWN PROPERLY. " 1054 "UPDATE SHUTDOWN UTILITY\n"); 1055 ata_dev_printk(qc->dev, KERN_WARNING, 1056 "For more info, visit " 1057 "http://linux-ata.org/shutdown.html\n"); 1058 1059 /* ->scsi_done is not used, use it for 1060 * delayed completion. 1061 */ 1062 scmd->scsi_done = qc->scsidone; 1063 qc->scsidone = ata_delayed_done; 1064 } 1065 scmd->result = SAM_STAT_GOOD; 1066 return 1; 1067 } 1068 1069 /* Issue ATA STANDBY IMMEDIATE command */ 1070 tf->command = ATA_CMD_STANDBYNOW1; 1071 } 1072 1073 /* 1074 * Standby and Idle condition timers could be implemented but that 1075 * would require libata to implement the Power condition mode page 1076 * and allow the user to change it. Changing mode pages requires 1077 * MODE SELECT to be implemented. 1078 */ 1079 1080 return 0; 1081 1082 invalid_fld: 1083 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); 1084 /* "Invalid field in cbd" */ 1085 return 1; 1086 } 1087 1088 1089 /** 1090 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command 1091 * @qc: Storage for translated ATA taskfile 1092 * 1093 * Sets up an ATA taskfile to issue FLUSH CACHE or 1094 * FLUSH CACHE EXT. 1095 * 1096 * LOCKING: 1097 * spin_lock_irqsave(host lock) 1098 * 1099 * RETURNS: 1100 * Zero on success, non-zero on error. 1101 */ 1102 static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc) 1103 { 1104 struct ata_taskfile *tf = &qc->tf; 1105 1106 tf->flags |= ATA_TFLAG_DEVICE; 1107 tf->protocol = ATA_PROT_NODATA; 1108 1109 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT) 1110 tf->command = ATA_CMD_FLUSH_EXT; 1111 else 1112 tf->command = ATA_CMD_FLUSH; 1113 1114 /* flush is critical for IO integrity, consider it an IO command */ 1115 qc->flags |= ATA_QCFLAG_IO; 1116 1117 return 0; 1118 } 1119 1120 /** 1121 * scsi_6_lba_len - Get LBA and transfer length 1122 * @cdb: SCSI command to translate 1123 * 1124 * Calculate LBA and transfer length for 6-byte commands. 1125 * 1126 * RETURNS: 1127 * @plba: the LBA 1128 * @plen: the transfer length 1129 */ 1130 static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1131 { 1132 u64 lba = 0; 1133 u32 len; 1134 1135 VPRINTK("six-byte command\n"); 1136 1137 lba |= ((u64)(cdb[1] & 0x1f)) << 16; 1138 lba |= ((u64)cdb[2]) << 8; 1139 lba |= ((u64)cdb[3]); 1140 1141 len = cdb[4]; 1142 1143 *plba = lba; 1144 *plen = len; 1145 } 1146 1147 /** 1148 * scsi_10_lba_len - Get LBA and transfer length 1149 * @cdb: SCSI command to translate 1150 * 1151 * Calculate LBA and transfer length for 10-byte commands. 1152 * 1153 * RETURNS: 1154 * @plba: the LBA 1155 * @plen: the transfer length 1156 */ 1157 static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1158 { 1159 u64 lba = 0; 1160 u32 len = 0; 1161 1162 VPRINTK("ten-byte command\n"); 1163 1164 lba |= ((u64)cdb[2]) << 24; 1165 lba |= ((u64)cdb[3]) << 16; 1166 lba |= ((u64)cdb[4]) << 8; 1167 lba |= ((u64)cdb[5]); 1168 1169 len |= ((u32)cdb[7]) << 8; 1170 len |= ((u32)cdb[8]); 1171 1172 *plba = lba; 1173 *plen = len; 1174 } 1175 1176 /** 1177 * scsi_16_lba_len - Get LBA and transfer length 1178 * @cdb: SCSI command to translate 1179 * 1180 * Calculate LBA and transfer length for 16-byte commands. 1181 * 1182 * RETURNS: 1183 * @plba: the LBA 1184 * @plen: the transfer length 1185 */ 1186 static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1187 { 1188 u64 lba = 0; 1189 u32 len = 0; 1190 1191 VPRINTK("sixteen-byte command\n"); 1192 1193 lba |= ((u64)cdb[2]) << 56; 1194 lba |= ((u64)cdb[3]) << 48; 1195 lba |= ((u64)cdb[4]) << 40; 1196 lba |= ((u64)cdb[5]) << 32; 1197 lba |= ((u64)cdb[6]) << 24; 1198 lba |= ((u64)cdb[7]) << 16; 1199 lba |= ((u64)cdb[8]) << 8; 1200 lba |= ((u64)cdb[9]); 1201 1202 len |= ((u32)cdb[10]) << 24; 1203 len |= ((u32)cdb[11]) << 16; 1204 len |= ((u32)cdb[12]) << 8; 1205 len |= ((u32)cdb[13]); 1206 1207 *plba = lba; 1208 *plen = len; 1209 } 1210 1211 /** 1212 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one 1213 * @qc: Storage for translated ATA taskfile 1214 * 1215 * Converts SCSI VERIFY command to an ATA READ VERIFY command. 1216 * 1217 * LOCKING: 1218 * spin_lock_irqsave(host lock) 1219 * 1220 * RETURNS: 1221 * Zero on success, non-zero on error. 1222 */ 1223 static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) 1224 { 1225 struct scsi_cmnd *scmd = qc->scsicmd; 1226 struct ata_taskfile *tf = &qc->tf; 1227 struct ata_device *dev = qc->dev; 1228 u64 dev_sectors = qc->dev->n_sectors; 1229 const u8 *cdb = scmd->cmnd; 1230 u64 block; 1231 u32 n_block; 1232 1233 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1234 tf->protocol = ATA_PROT_NODATA; 1235 1236 if (cdb[0] == VERIFY) { 1237 if (scmd->cmd_len < 10) 1238 goto invalid_fld; 1239 scsi_10_lba_len(cdb, &block, &n_block); 1240 } else if (cdb[0] == VERIFY_16) { 1241 if (scmd->cmd_len < 16) 1242 goto invalid_fld; 1243 scsi_16_lba_len(cdb, &block, &n_block); 1244 } else 1245 goto invalid_fld; 1246 1247 if (!n_block) 1248 goto nothing_to_do; 1249 if (block >= dev_sectors) 1250 goto out_of_range; 1251 if ((block + n_block) > dev_sectors) 1252 goto out_of_range; 1253 1254 if (dev->flags & ATA_DFLAG_LBA) { 1255 tf->flags |= ATA_TFLAG_LBA; 1256 1257 if (lba_28_ok(block, n_block)) { 1258 /* use LBA28 */ 1259 tf->command = ATA_CMD_VERIFY; 1260 tf->device |= (block >> 24) & 0xf; 1261 } else if (lba_48_ok(block, n_block)) { 1262 if (!(dev->flags & ATA_DFLAG_LBA48)) 1263 goto out_of_range; 1264 1265 /* use LBA48 */ 1266 tf->flags |= ATA_TFLAG_LBA48; 1267 tf->command = ATA_CMD_VERIFY_EXT; 1268 1269 tf->hob_nsect = (n_block >> 8) & 0xff; 1270 1271 tf->hob_lbah = (block >> 40) & 0xff; 1272 tf->hob_lbam = (block >> 32) & 0xff; 1273 tf->hob_lbal = (block >> 24) & 0xff; 1274 } else 1275 /* request too large even for LBA48 */ 1276 goto out_of_range; 1277 1278 tf->nsect = n_block & 0xff; 1279 1280 tf->lbah = (block >> 16) & 0xff; 1281 tf->lbam = (block >> 8) & 0xff; 1282 tf->lbal = block & 0xff; 1283 1284 tf->device |= ATA_LBA; 1285 } else { 1286 /* CHS */ 1287 u32 sect, head, cyl, track; 1288 1289 if (!lba_28_ok(block, n_block)) 1290 goto out_of_range; 1291 1292 /* Convert LBA to CHS */ 1293 track = (u32)block / dev->sectors; 1294 cyl = track / dev->heads; 1295 head = track % dev->heads; 1296 sect = (u32)block % dev->sectors + 1; 1297 1298 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 1299 (u32)block, track, cyl, head, sect); 1300 1301 /* Check whether the converted CHS can fit. 1302 Cylinder: 0-65535 1303 Head: 0-15 1304 Sector: 1-255*/ 1305 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 1306 goto out_of_range; 1307 1308 tf->command = ATA_CMD_VERIFY; 1309 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 1310 tf->lbal = sect; 1311 tf->lbam = cyl; 1312 tf->lbah = cyl >> 8; 1313 tf->device |= head; 1314 } 1315 1316 return 0; 1317 1318 invalid_fld: 1319 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); 1320 /* "Invalid field in cbd" */ 1321 return 1; 1322 1323 out_of_range: 1324 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0); 1325 /* "Logical Block Address out of range" */ 1326 return 1; 1327 1328 nothing_to_do: 1329 scmd->result = SAM_STAT_GOOD; 1330 return 1; 1331 } 1332 1333 /** 1334 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one 1335 * @qc: Storage for translated ATA taskfile 1336 * 1337 * Converts any of six SCSI read/write commands into the 1338 * ATA counterpart, including starting sector (LBA), 1339 * sector count, and taking into account the device's LBA48 1340 * support. 1341 * 1342 * Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and 1343 * %WRITE_16 are currently supported. 1344 * 1345 * LOCKING: 1346 * spin_lock_irqsave(host lock) 1347 * 1348 * RETURNS: 1349 * Zero on success, non-zero on error. 1350 */ 1351 static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) 1352 { 1353 struct scsi_cmnd *scmd = qc->scsicmd; 1354 const u8 *cdb = scmd->cmnd; 1355 unsigned int tf_flags = 0; 1356 u64 block; 1357 u32 n_block; 1358 int rc; 1359 1360 if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16) 1361 tf_flags |= ATA_TFLAG_WRITE; 1362 1363 /* Calculate the SCSI LBA, transfer length and FUA. */ 1364 switch (cdb[0]) { 1365 case READ_10: 1366 case WRITE_10: 1367 if (unlikely(scmd->cmd_len < 10)) 1368 goto invalid_fld; 1369 scsi_10_lba_len(cdb, &block, &n_block); 1370 if (unlikely(cdb[1] & (1 << 3))) 1371 tf_flags |= ATA_TFLAG_FUA; 1372 break; 1373 case READ_6: 1374 case WRITE_6: 1375 if (unlikely(scmd->cmd_len < 6)) 1376 goto invalid_fld; 1377 scsi_6_lba_len(cdb, &block, &n_block); 1378 1379 /* for 6-byte r/w commands, transfer length 0 1380 * means 256 blocks of data, not 0 block. 1381 */ 1382 if (!n_block) 1383 n_block = 256; 1384 break; 1385 case READ_16: 1386 case WRITE_16: 1387 if (unlikely(scmd->cmd_len < 16)) 1388 goto invalid_fld; 1389 scsi_16_lba_len(cdb, &block, &n_block); 1390 if (unlikely(cdb[1] & (1 << 3))) 1391 tf_flags |= ATA_TFLAG_FUA; 1392 break; 1393 default: 1394 DPRINTK("no-byte command\n"); 1395 goto invalid_fld; 1396 } 1397 1398 /* Check and compose ATA command */ 1399 if (!n_block) 1400 /* For 10-byte and 16-byte SCSI R/W commands, transfer 1401 * length 0 means transfer 0 block of data. 1402 * However, for ATA R/W commands, sector count 0 means 1403 * 256 or 65536 sectors, not 0 sectors as in SCSI. 1404 * 1405 * WARNING: one or two older ATA drives treat 0 as 0... 1406 */ 1407 goto nothing_to_do; 1408 1409 qc->flags |= ATA_QCFLAG_IO; 1410 qc->nbytes = n_block * ATA_SECT_SIZE; 1411 1412 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, 1413 qc->tag); 1414 if (likely(rc == 0)) 1415 return 0; 1416 1417 if (rc == -ERANGE) 1418 goto out_of_range; 1419 /* treat all other errors as -EINVAL, fall through */ 1420 invalid_fld: 1421 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); 1422 /* "Invalid field in cbd" */ 1423 return 1; 1424 1425 out_of_range: 1426 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0); 1427 /* "Logical Block Address out of range" */ 1428 return 1; 1429 1430 nothing_to_do: 1431 scmd->result = SAM_STAT_GOOD; 1432 return 1; 1433 } 1434 1435 static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) 1436 { 1437 struct ata_port *ap = qc->ap; 1438 struct scsi_cmnd *cmd = qc->scsicmd; 1439 u8 *cdb = cmd->cmnd; 1440 int need_sense = (qc->err_mask != 0); 1441 1442 /* For ATA pass thru (SAT) commands, generate a sense block if 1443 * user mandated it or if there's an error. Note that if we 1444 * generate because the user forced us to, a check condition 1445 * is generated and the ATA register values are returned 1446 * whether the command completed successfully or not. If there 1447 * was no error, SK, ASC and ASCQ will all be zero. 1448 */ 1449 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1450 ((cdb[2] & 0x20) || need_sense)) { 1451 ata_gen_passthru_sense(qc); 1452 } else { 1453 if (!need_sense) { 1454 cmd->result = SAM_STAT_GOOD; 1455 } else { 1456 /* TODO: decide which descriptor format to use 1457 * for 48b LBA devices and call that here 1458 * instead of the fixed desc, which is only 1459 * good for smaller LBA (and maybe CHS?) 1460 * devices. 1461 */ 1462 ata_gen_ata_sense(qc); 1463 } 1464 } 1465 1466 /* XXX: track spindown state for spindown skipping and warning */ 1467 if (unlikely(qc->tf.command == ATA_CMD_STANDBY || 1468 qc->tf.command == ATA_CMD_STANDBYNOW1)) 1469 qc->dev->flags |= ATA_DFLAG_SPUNDOWN; 1470 else if (likely(system_state != SYSTEM_HALT && 1471 system_state != SYSTEM_POWER_OFF)) 1472 qc->dev->flags &= ~ATA_DFLAG_SPUNDOWN; 1473 1474 if (need_sense && !ap->ops->error_handler) 1475 ata_dump_status(ap->print_id, &qc->result_tf); 1476 1477 qc->scsidone(cmd); 1478 1479 ata_qc_free(qc); 1480 } 1481 1482 /** 1483 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1484 * @dev: ATA device to which the command is addressed 1485 * @cmd: SCSI command to execute 1486 * @done: SCSI command completion function 1487 * @xlat_func: Actor which translates @cmd to an ATA taskfile 1488 * 1489 * Our ->queuecommand() function has decided that the SCSI 1490 * command issued can be directly translated into an ATA 1491 * command, rather than handled internally. 1492 * 1493 * This function sets up an ata_queued_cmd structure for the 1494 * SCSI command, and sends that ata_queued_cmd to the hardware. 1495 * 1496 * The xlat_func argument (actor) returns 0 if ready to execute 1497 * ATA command, else 1 to finish translation. If 1 is returned 1498 * then cmd->result (and possibly cmd->sense_buffer) are assumed 1499 * to be set reflecting an error condition or clean (early) 1500 * termination. 1501 * 1502 * LOCKING: 1503 * spin_lock_irqsave(host lock) 1504 * 1505 * RETURNS: 1506 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command 1507 * needs to be deferred. 1508 */ 1509 static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, 1510 void (*done)(struct scsi_cmnd *), 1511 ata_xlat_func_t xlat_func) 1512 { 1513 struct ata_port *ap = dev->link->ap; 1514 struct ata_queued_cmd *qc; 1515 int rc; 1516 1517 VPRINTK("ENTER\n"); 1518 1519 qc = ata_scsi_qc_new(dev, cmd, done); 1520 if (!qc) 1521 goto err_mem; 1522 1523 /* data is present; dma-map it */ 1524 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 1525 cmd->sc_data_direction == DMA_TO_DEVICE) { 1526 if (unlikely(scsi_bufflen(cmd) < 1)) { 1527 ata_dev_printk(dev, KERN_WARNING, 1528 "WARNING: zero len r/w req\n"); 1529 goto err_did; 1530 } 1531 1532 ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd)); 1533 1534 qc->dma_dir = cmd->sc_data_direction; 1535 } 1536 1537 qc->complete_fn = ata_scsi_qc_complete; 1538 1539 if (xlat_func(qc)) 1540 goto early_finish; 1541 1542 if (ap->ops->qc_defer) { 1543 if ((rc = ap->ops->qc_defer(qc))) 1544 goto defer; 1545 } 1546 1547 /* select device, send command to hardware */ 1548 ata_qc_issue(qc); 1549 1550 VPRINTK("EXIT\n"); 1551 return 0; 1552 1553 early_finish: 1554 ata_qc_free(qc); 1555 qc->scsidone(cmd); 1556 DPRINTK("EXIT - early finish (good or error)\n"); 1557 return 0; 1558 1559 err_did: 1560 ata_qc_free(qc); 1561 cmd->result = (DID_ERROR << 16); 1562 qc->scsidone(cmd); 1563 err_mem: 1564 DPRINTK("EXIT - internal\n"); 1565 return 0; 1566 1567 defer: 1568 ata_qc_free(qc); 1569 DPRINTK("EXIT - defer\n"); 1570 if (rc == ATA_DEFER_LINK) 1571 return SCSI_MLQUEUE_DEVICE_BUSY; 1572 else 1573 return SCSI_MLQUEUE_HOST_BUSY; 1574 } 1575 1576 /** 1577 * ata_scsi_rbuf_get - Map response buffer. 1578 * @cmd: SCSI command containing buffer to be mapped. 1579 * @buf_out: Pointer to mapped area. 1580 * 1581 * Maps buffer contained within SCSI command @cmd. 1582 * 1583 * LOCKING: 1584 * spin_lock_irqsave(host lock) 1585 * 1586 * RETURNS: 1587 * Length of response buffer. 1588 */ 1589 1590 static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out) 1591 { 1592 u8 *buf; 1593 unsigned int buflen; 1594 1595 struct scatterlist *sg = scsi_sglist(cmd); 1596 1597 if (sg) { 1598 buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; 1599 buflen = sg->length; 1600 } else { 1601 buf = NULL; 1602 buflen = 0; 1603 } 1604 1605 *buf_out = buf; 1606 return buflen; 1607 } 1608 1609 /** 1610 * ata_scsi_rbuf_put - Unmap response buffer. 1611 * @cmd: SCSI command containing buffer to be unmapped. 1612 * @buf: buffer to unmap 1613 * 1614 * Unmaps response buffer contained within @cmd. 1615 * 1616 * LOCKING: 1617 * spin_lock_irqsave(host lock) 1618 */ 1619 1620 static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) 1621 { 1622 struct scatterlist *sg = scsi_sglist(cmd); 1623 if (sg) 1624 kunmap_atomic(buf - sg->offset, KM_IRQ0); 1625 } 1626 1627 /** 1628 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators 1629 * @args: device IDENTIFY data / SCSI command of interest. 1630 * @actor: Callback hook for desired SCSI command simulator 1631 * 1632 * Takes care of the hard work of simulating a SCSI command... 1633 * Mapping the response buffer, calling the command's handler, 1634 * and handling the handler's return value. This return value 1635 * indicates whether the handler wishes the SCSI command to be 1636 * completed successfully (0), or not (in which case cmd->result 1637 * and sense buffer are assumed to be set). 1638 * 1639 * LOCKING: 1640 * spin_lock_irqsave(host lock) 1641 */ 1642 1643 void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 1644 unsigned int (*actor) (struct ata_scsi_args *args, 1645 u8 *rbuf, unsigned int buflen)) 1646 { 1647 u8 *rbuf; 1648 unsigned int buflen, rc; 1649 struct scsi_cmnd *cmd = args->cmd; 1650 1651 buflen = ata_scsi_rbuf_get(cmd, &rbuf); 1652 memset(rbuf, 0, buflen); 1653 rc = actor(args, rbuf, buflen); 1654 ata_scsi_rbuf_put(cmd, rbuf); 1655 1656 if (rc == 0) 1657 cmd->result = SAM_STAT_GOOD; 1658 args->done(cmd); 1659 } 1660 1661 /** 1662 * ATA_SCSI_RBUF_SET - helper to set values in SCSI response buffer 1663 * @idx: byte index into SCSI response buffer 1664 * @val: value to set 1665 * 1666 * To be used by SCSI command simulator functions. This macros 1667 * expects two local variables, u8 *rbuf and unsigned int buflen, 1668 * are in scope. 1669 * 1670 * LOCKING: 1671 * None. 1672 */ 1673 #define ATA_SCSI_RBUF_SET(idx, val) do { \ 1674 if ((idx) < buflen) rbuf[(idx)] = (u8)(val); \ 1675 } while (0) 1676 1677 /** 1678 * ata_scsiop_inq_std - Simulate INQUIRY command 1679 * @args: device IDENTIFY data / SCSI command of interest. 1680 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1681 * @buflen: Response buffer length. 1682 * 1683 * Returns standard device identification data associated 1684 * with non-VPD INQUIRY command output. 1685 * 1686 * LOCKING: 1687 * spin_lock_irqsave(host lock) 1688 */ 1689 1690 unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 1691 unsigned int buflen) 1692 { 1693 u8 hdr[] = { 1694 TYPE_DISK, 1695 0, 1696 0x5, /* claim SPC-3 version compatibility */ 1697 2, 1698 95 - 4 1699 }; 1700 1701 /* set scsi removeable (RMB) bit per ata bit */ 1702 if (ata_id_removeable(args->id)) 1703 hdr[1] |= (1 << 7); 1704 1705 VPRINTK("ENTER\n"); 1706 1707 memcpy(rbuf, hdr, sizeof(hdr)); 1708 1709 if (buflen > 35) { 1710 memcpy(&rbuf[8], "ATA ", 8); 1711 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); 1712 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); 1713 if (rbuf[32] == 0 || rbuf[32] == ' ') 1714 memcpy(&rbuf[32], "n/a ", 4); 1715 } 1716 1717 if (buflen > 63) { 1718 const u8 versions[] = { 1719 0x60, /* SAM-3 (no version claimed) */ 1720 1721 0x03, 1722 0x20, /* SBC-2 (no version claimed) */ 1723 1724 0x02, 1725 0x60 /* SPC-3 (no version claimed) */ 1726 }; 1727 1728 memcpy(rbuf + 59, versions, sizeof(versions)); 1729 } 1730 1731 return 0; 1732 } 1733 1734 /** 1735 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages 1736 * @args: device IDENTIFY data / SCSI command of interest. 1737 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1738 * @buflen: Response buffer length. 1739 * 1740 * Returns list of inquiry VPD pages available. 1741 * 1742 * LOCKING: 1743 * spin_lock_irqsave(host lock) 1744 */ 1745 1746 unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, 1747 unsigned int buflen) 1748 { 1749 const u8 pages[] = { 1750 0x00, /* page 0x00, this page */ 1751 0x80, /* page 0x80, unit serial no page */ 1752 0x83 /* page 0x83, device ident page */ 1753 }; 1754 rbuf[3] = sizeof(pages); /* number of supported VPD pages */ 1755 1756 if (buflen > 6) 1757 memcpy(rbuf + 4, pages, sizeof(pages)); 1758 1759 return 0; 1760 } 1761 1762 /** 1763 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number 1764 * @args: device IDENTIFY data / SCSI command of interest. 1765 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1766 * @buflen: Response buffer length. 1767 * 1768 * Returns ATA device serial number. 1769 * 1770 * LOCKING: 1771 * spin_lock_irqsave(host lock) 1772 */ 1773 1774 unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, 1775 unsigned int buflen) 1776 { 1777 const u8 hdr[] = { 1778 0, 1779 0x80, /* this page code */ 1780 0, 1781 ATA_ID_SERNO_LEN, /* page len */ 1782 }; 1783 memcpy(rbuf, hdr, sizeof(hdr)); 1784 1785 if (buflen > (ATA_ID_SERNO_LEN + 4 - 1)) 1786 ata_id_string(args->id, (unsigned char *) &rbuf[4], 1787 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1788 1789 return 0; 1790 } 1791 1792 /** 1793 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity 1794 * @args: device IDENTIFY data / SCSI command of interest. 1795 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1796 * @buflen: Response buffer length. 1797 * 1798 * Yields two logical unit device identification designators: 1799 * - vendor specific ASCII containing the ATA serial number 1800 * - SAT defined "t10 vendor id based" containing ASCII vendor 1801 * name ("ATA "), model and serial numbers. 1802 * 1803 * LOCKING: 1804 * spin_lock_irqsave(host lock) 1805 */ 1806 1807 unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, 1808 unsigned int buflen) 1809 { 1810 int num; 1811 const int sat_model_serial_desc_len = 68; 1812 1813 rbuf[1] = 0x83; /* this page code */ 1814 num = 4; 1815 1816 if (buflen > (ATA_ID_SERNO_LEN + num + 3)) { 1817 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ 1818 rbuf[num + 0] = 2; 1819 rbuf[num + 3] = ATA_ID_SERNO_LEN; 1820 num += 4; 1821 ata_id_string(args->id, (unsigned char *) rbuf + num, 1822 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1823 num += ATA_ID_SERNO_LEN; 1824 } 1825 if (buflen > (sat_model_serial_desc_len + num + 3)) { 1826 /* SAT defined lu model and serial numbers descriptor */ 1827 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ 1828 rbuf[num + 0] = 2; 1829 rbuf[num + 1] = 1; 1830 rbuf[num + 3] = sat_model_serial_desc_len; 1831 num += 4; 1832 memcpy(rbuf + num, "ATA ", 8); 1833 num += 8; 1834 ata_id_string(args->id, (unsigned char *) rbuf + num, 1835 ATA_ID_PROD, ATA_ID_PROD_LEN); 1836 num += ATA_ID_PROD_LEN; 1837 ata_id_string(args->id, (unsigned char *) rbuf + num, 1838 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1839 num += ATA_ID_SERNO_LEN; 1840 } 1841 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ 1842 return 0; 1843 } 1844 1845 /** 1846 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info 1847 * @args: device IDENTIFY data / SCSI command of interest. 1848 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1849 * @buflen: Response buffer length. 1850 * 1851 * Yields SAT-specified ATA VPD page. 1852 * 1853 * LOCKING: 1854 * spin_lock_irqsave(host lock) 1855 */ 1856 1857 unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf, 1858 unsigned int buflen) 1859 { 1860 u8 pbuf[60]; 1861 struct ata_taskfile tf; 1862 unsigned int i; 1863 1864 if (!buflen) 1865 return 0; 1866 1867 memset(&pbuf, 0, sizeof(pbuf)); 1868 memset(&tf, 0, sizeof(tf)); 1869 1870 pbuf[1] = 0x89; /* our page code */ 1871 pbuf[2] = (0x238 >> 8); /* page size fixed at 238h */ 1872 pbuf[3] = (0x238 & 0xff); 1873 1874 memcpy(&pbuf[8], "linux ", 8); 1875 memcpy(&pbuf[16], "libata ", 16); 1876 memcpy(&pbuf[32], DRV_VERSION, 4); 1877 ata_id_string(args->id, &pbuf[32], ATA_ID_FW_REV, 4); 1878 1879 /* we don't store the ATA device signature, so we fake it */ 1880 1881 tf.command = ATA_DRDY; /* really, this is Status reg */ 1882 tf.lbal = 0x1; 1883 tf.nsect = 0x1; 1884 1885 ata_tf_to_fis(&tf, 0, 1, &pbuf[36]); /* TODO: PMP? */ 1886 pbuf[36] = 0x34; /* force D2H Reg FIS (34h) */ 1887 1888 pbuf[56] = ATA_CMD_ID_ATA; 1889 1890 i = min(buflen, 60U); 1891 memcpy(rbuf, &pbuf[0], i); 1892 buflen -= i; 1893 1894 if (!buflen) 1895 return 0; 1896 1897 memcpy(&rbuf[60], &args->id[0], min(buflen, 512U)); 1898 return 0; 1899 } 1900 1901 /** 1902 * ata_scsiop_noop - Command handler that simply returns success. 1903 * @args: device IDENTIFY data / SCSI command of interest. 1904 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1905 * @buflen: Response buffer length. 1906 * 1907 * No operation. Simply returns success to caller, to indicate 1908 * that the caller should successfully complete this SCSI command. 1909 * 1910 * LOCKING: 1911 * spin_lock_irqsave(host lock) 1912 */ 1913 1914 unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, 1915 unsigned int buflen) 1916 { 1917 VPRINTK("ENTER\n"); 1918 return 0; 1919 } 1920 1921 /** 1922 * ata_msense_push - Push data onto MODE SENSE data output buffer 1923 * @ptr_io: (input/output) Location to store more output data 1924 * @last: End of output data buffer 1925 * @buf: Pointer to BLOB being added to output buffer 1926 * @buflen: Length of BLOB 1927 * 1928 * Store MODE SENSE data on an output buffer. 1929 * 1930 * LOCKING: 1931 * None. 1932 */ 1933 1934 static void ata_msense_push(u8 **ptr_io, const u8 *last, 1935 const u8 *buf, unsigned int buflen) 1936 { 1937 u8 *ptr = *ptr_io; 1938 1939 if ((ptr + buflen - 1) > last) 1940 return; 1941 1942 memcpy(ptr, buf, buflen); 1943 1944 ptr += buflen; 1945 1946 *ptr_io = ptr; 1947 } 1948 1949 /** 1950 * ata_msense_caching - Simulate MODE SENSE caching info page 1951 * @id: device IDENTIFY data 1952 * @ptr_io: (input/output) Location to store more output data 1953 * @last: End of output data buffer 1954 * 1955 * Generate a caching info page, which conditionally indicates 1956 * write caching to the SCSI layer, depending on device 1957 * capabilities. 1958 * 1959 * LOCKING: 1960 * None. 1961 */ 1962 1963 static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io, 1964 const u8 *last) 1965 { 1966 u8 page[CACHE_MPAGE_LEN]; 1967 1968 memcpy(page, def_cache_mpage, sizeof(page)); 1969 if (ata_id_wcache_enabled(id)) 1970 page[2] |= (1 << 2); /* write cache enable */ 1971 if (!ata_id_rahead_enabled(id)) 1972 page[12] |= (1 << 5); /* disable read ahead */ 1973 1974 ata_msense_push(ptr_io, last, page, sizeof(page)); 1975 return sizeof(page); 1976 } 1977 1978 /** 1979 * ata_msense_ctl_mode - Simulate MODE SENSE control mode page 1980 * @dev: Device associated with this MODE SENSE command 1981 * @ptr_io: (input/output) Location to store more output data 1982 * @last: End of output data buffer 1983 * 1984 * Generate a generic MODE SENSE control mode page. 1985 * 1986 * LOCKING: 1987 * None. 1988 */ 1989 1990 static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last) 1991 { 1992 ata_msense_push(ptr_io, last, def_control_mpage, 1993 sizeof(def_control_mpage)); 1994 return sizeof(def_control_mpage); 1995 } 1996 1997 /** 1998 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page 1999 * @dev: Device associated with this MODE SENSE command 2000 * @ptr_io: (input/output) Location to store more output data 2001 * @last: End of output data buffer 2002 * 2003 * Generate a generic MODE SENSE r/w error recovery page. 2004 * 2005 * LOCKING: 2006 * None. 2007 */ 2008 2009 static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last) 2010 { 2011 2012 ata_msense_push(ptr_io, last, def_rw_recovery_mpage, 2013 sizeof(def_rw_recovery_mpage)); 2014 return sizeof(def_rw_recovery_mpage); 2015 } 2016 2017 /* 2018 * We can turn this into a real blacklist if it's needed, for now just 2019 * blacklist any Maxtor BANC1G10 revision firmware 2020 */ 2021 static int ata_dev_supports_fua(u16 *id) 2022 { 2023 unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1]; 2024 2025 if (!libata_fua) 2026 return 0; 2027 if (!ata_id_has_fua(id)) 2028 return 0; 2029 2030 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); 2031 ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw)); 2032 2033 if (strcmp(model, "Maxtor")) 2034 return 1; 2035 if (strcmp(fw, "BANC1G10")) 2036 return 1; 2037 2038 return 0; /* blacklisted */ 2039 } 2040 2041 /** 2042 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands 2043 * @args: device IDENTIFY data / SCSI command of interest. 2044 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2045 * @buflen: Response buffer length. 2046 * 2047 * Simulate MODE SENSE commands. Assume this is invoked for direct 2048 * access devices (e.g. disks) only. There should be no block 2049 * descriptor for other device types. 2050 * 2051 * LOCKING: 2052 * spin_lock_irqsave(host lock) 2053 */ 2054 2055 unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, 2056 unsigned int buflen) 2057 { 2058 struct ata_device *dev = args->dev; 2059 u8 *scsicmd = args->cmd->cmnd, *p, *last; 2060 const u8 sat_blk_desc[] = { 2061 0, 0, 0, 0, /* number of blocks: sat unspecified */ 2062 0, 2063 0, 0x2, 0x0 /* block length: 512 bytes */ 2064 }; 2065 u8 pg, spg; 2066 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen; 2067 u8 dpofua; 2068 2069 VPRINTK("ENTER\n"); 2070 2071 six_byte = (scsicmd[0] == MODE_SENSE); 2072 ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */ 2073 /* 2074 * LLBA bit in msense(10) ignored (compliant) 2075 */ 2076 2077 page_control = scsicmd[2] >> 6; 2078 switch (page_control) { 2079 case 0: /* current */ 2080 break; /* supported */ 2081 case 3: /* saved */ 2082 goto saving_not_supp; 2083 case 1: /* changeable */ 2084 case 2: /* defaults */ 2085 default: 2086 goto invalid_fld; 2087 } 2088 2089 if (six_byte) { 2090 output_len = 4 + (ebd ? 8 : 0); 2091 alloc_len = scsicmd[4]; 2092 } else { 2093 output_len = 8 + (ebd ? 8 : 0); 2094 alloc_len = (scsicmd[7] << 8) + scsicmd[8]; 2095 } 2096 minlen = (alloc_len < buflen) ? alloc_len : buflen; 2097 2098 p = rbuf + output_len; 2099 last = rbuf + minlen - 1; 2100 2101 pg = scsicmd[2] & 0x3f; 2102 spg = scsicmd[3]; 2103 /* 2104 * No mode subpages supported (yet) but asking for _all_ 2105 * subpages may be valid 2106 */ 2107 if (spg && (spg != ALL_SUB_MPAGES)) 2108 goto invalid_fld; 2109 2110 switch(pg) { 2111 case RW_RECOVERY_MPAGE: 2112 output_len += ata_msense_rw_recovery(&p, last); 2113 break; 2114 2115 case CACHE_MPAGE: 2116 output_len += ata_msense_caching(args->id, &p, last); 2117 break; 2118 2119 case CONTROL_MPAGE: { 2120 output_len += ata_msense_ctl_mode(&p, last); 2121 break; 2122 } 2123 2124 case ALL_MPAGES: 2125 output_len += ata_msense_rw_recovery(&p, last); 2126 output_len += ata_msense_caching(args->id, &p, last); 2127 output_len += ata_msense_ctl_mode(&p, last); 2128 break; 2129 2130 default: /* invalid page code */ 2131 goto invalid_fld; 2132 } 2133 2134 if (minlen < 1) 2135 return 0; 2136 2137 dpofua = 0; 2138 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) && 2139 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) 2140 dpofua = 1 << 4; 2141 2142 if (six_byte) { 2143 output_len--; 2144 rbuf[0] = output_len; 2145 if (minlen > 2) 2146 rbuf[2] |= dpofua; 2147 if (ebd) { 2148 if (minlen > 3) 2149 rbuf[3] = sizeof(sat_blk_desc); 2150 if (minlen > 11) 2151 memcpy(rbuf + 4, sat_blk_desc, 2152 sizeof(sat_blk_desc)); 2153 } 2154 } else { 2155 output_len -= 2; 2156 rbuf[0] = output_len >> 8; 2157 if (minlen > 1) 2158 rbuf[1] = output_len; 2159 if (minlen > 3) 2160 rbuf[3] |= dpofua; 2161 if (ebd) { 2162 if (minlen > 7) 2163 rbuf[7] = sizeof(sat_blk_desc); 2164 if (minlen > 15) 2165 memcpy(rbuf + 8, sat_blk_desc, 2166 sizeof(sat_blk_desc)); 2167 } 2168 } 2169 return 0; 2170 2171 invalid_fld: 2172 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0); 2173 /* "Invalid field in cbd" */ 2174 return 1; 2175 2176 saving_not_supp: 2177 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); 2178 /* "Saving parameters not supported" */ 2179 return 1; 2180 } 2181 2182 /** 2183 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands 2184 * @args: device IDENTIFY data / SCSI command of interest. 2185 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2186 * @buflen: Response buffer length. 2187 * 2188 * Simulate READ CAPACITY commands. 2189 * 2190 * LOCKING: 2191 * None. 2192 */ 2193 unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, 2194 unsigned int buflen) 2195 { 2196 u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */ 2197 2198 VPRINTK("ENTER\n"); 2199 2200 if (args->cmd->cmnd[0] == READ_CAPACITY) { 2201 if (last_lba >= 0xffffffffULL) 2202 last_lba = 0xffffffff; 2203 2204 /* sector count, 32-bit */ 2205 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 3)); 2206 ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 2)); 2207 ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 1)); 2208 ATA_SCSI_RBUF_SET(3, last_lba); 2209 2210 /* sector size */ 2211 ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8); 2212 ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE); 2213 } else { 2214 /* sector count, 64-bit */ 2215 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7)); 2216 ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 6)); 2217 ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 5)); 2218 ATA_SCSI_RBUF_SET(3, last_lba >> (8 * 4)); 2219 ATA_SCSI_RBUF_SET(4, last_lba >> (8 * 3)); 2220 ATA_SCSI_RBUF_SET(5, last_lba >> (8 * 2)); 2221 ATA_SCSI_RBUF_SET(6, last_lba >> (8 * 1)); 2222 ATA_SCSI_RBUF_SET(7, last_lba); 2223 2224 /* sector size */ 2225 ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8); 2226 ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE); 2227 } 2228 2229 return 0; 2230 } 2231 2232 /** 2233 * ata_scsiop_report_luns - Simulate REPORT LUNS command 2234 * @args: device IDENTIFY data / SCSI command of interest. 2235 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2236 * @buflen: Response buffer length. 2237 * 2238 * Simulate REPORT LUNS command. 2239 * 2240 * LOCKING: 2241 * spin_lock_irqsave(host lock) 2242 */ 2243 2244 unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, 2245 unsigned int buflen) 2246 { 2247 VPRINTK("ENTER\n"); 2248 rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */ 2249 2250 return 0; 2251 } 2252 2253 /** 2254 * ata_scsi_set_sense - Set SCSI sense data and status 2255 * @cmd: SCSI request to be handled 2256 * @sk: SCSI-defined sense key 2257 * @asc: SCSI-defined additional sense code 2258 * @ascq: SCSI-defined additional sense code qualifier 2259 * 2260 * Helper function that builds a valid fixed format, current 2261 * response code and the given sense key (sk), additional sense 2262 * code (asc) and additional sense code qualifier (ascq) with 2263 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and 2264 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result . 2265 * 2266 * LOCKING: 2267 * Not required 2268 */ 2269 2270 void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) 2271 { 2272 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 2273 2274 cmd->sense_buffer[0] = 0x70; /* fixed format, current */ 2275 cmd->sense_buffer[2] = sk; 2276 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */ 2277 cmd->sense_buffer[12] = asc; 2278 cmd->sense_buffer[13] = ascq; 2279 } 2280 2281 /** 2282 * ata_scsi_badcmd - End a SCSI request with an error 2283 * @cmd: SCSI request to be handled 2284 * @done: SCSI command completion function 2285 * @asc: SCSI-defined additional sense code 2286 * @ascq: SCSI-defined additional sense code qualifier 2287 * 2288 * Helper function that completes a SCSI command with 2289 * %SAM_STAT_CHECK_CONDITION, with a sense key %ILLEGAL_REQUEST 2290 * and the specified additional sense codes. 2291 * 2292 * LOCKING: 2293 * spin_lock_irqsave(host lock) 2294 */ 2295 2296 void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) 2297 { 2298 DPRINTK("ENTER\n"); 2299 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq); 2300 2301 done(cmd); 2302 } 2303 2304 static void atapi_sense_complete(struct ata_queued_cmd *qc) 2305 { 2306 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) { 2307 /* FIXME: not quite right; we don't want the 2308 * translation of taskfile registers into 2309 * a sense descriptors, since that's only 2310 * correct for ATA, not ATAPI 2311 */ 2312 ata_gen_passthru_sense(qc); 2313 } 2314 2315 qc->scsidone(qc->scsicmd); 2316 ata_qc_free(qc); 2317 } 2318 2319 /* is it pointless to prefer PIO for "safety reasons"? */ 2320 static inline int ata_pio_use_silly(struct ata_port *ap) 2321 { 2322 return (ap->flags & ATA_FLAG_PIO_DMA); 2323 } 2324 2325 static void atapi_request_sense(struct ata_queued_cmd *qc) 2326 { 2327 struct ata_port *ap = qc->ap; 2328 struct scsi_cmnd *cmd = qc->scsicmd; 2329 2330 DPRINTK("ATAPI request sense\n"); 2331 2332 /* FIXME: is this needed? */ 2333 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 2334 2335 ap->ops->tf_read(ap, &qc->tf); 2336 2337 /* fill these in, for the case where they are -not- overwritten */ 2338 cmd->sense_buffer[0] = 0x70; 2339 cmd->sense_buffer[2] = qc->tf.feature >> 4; 2340 2341 ata_qc_reinit(qc); 2342 2343 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); 2344 qc->dma_dir = DMA_FROM_DEVICE; 2345 2346 memset(&qc->cdb, 0, qc->dev->cdb_len); 2347 qc->cdb[0] = REQUEST_SENSE; 2348 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; 2349 2350 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2351 qc->tf.command = ATA_CMD_PACKET; 2352 2353 if (ata_pio_use_silly(ap)) { 2354 qc->tf.protocol = ATA_PROT_ATAPI_DMA; 2355 qc->tf.feature |= ATAPI_PKT_DMA; 2356 } else { 2357 qc->tf.protocol = ATA_PROT_ATAPI; 2358 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE; 2359 qc->tf.lbah = 0; 2360 } 2361 qc->nbytes = SCSI_SENSE_BUFFERSIZE; 2362 2363 qc->complete_fn = atapi_sense_complete; 2364 2365 ata_qc_issue(qc); 2366 2367 DPRINTK("EXIT\n"); 2368 } 2369 2370 static void atapi_qc_complete(struct ata_queued_cmd *qc) 2371 { 2372 struct scsi_cmnd *cmd = qc->scsicmd; 2373 unsigned int err_mask = qc->err_mask; 2374 2375 VPRINTK("ENTER, err_mask 0x%X\n", err_mask); 2376 2377 /* handle completion from new EH */ 2378 if (unlikely(qc->ap->ops->error_handler && 2379 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) { 2380 2381 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) { 2382 /* FIXME: not quite right; we don't want the 2383 * translation of taskfile registers into a 2384 * sense descriptors, since that's only 2385 * correct for ATA, not ATAPI 2386 */ 2387 ata_gen_passthru_sense(qc); 2388 } 2389 2390 /* SCSI EH automatically locks door if sdev->locked is 2391 * set. Sometimes door lock request continues to 2392 * fail, for example, when no media is present. This 2393 * creates a loop - SCSI EH issues door lock which 2394 * fails and gets invoked again to acquire sense data 2395 * for the failed command. 2396 * 2397 * If door lock fails, always clear sdev->locked to 2398 * avoid this infinite loop. 2399 */ 2400 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) 2401 qc->dev->sdev->locked = 0; 2402 2403 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; 2404 qc->scsidone(cmd); 2405 ata_qc_free(qc); 2406 return; 2407 } 2408 2409 /* successful completion or old EH failure path */ 2410 if (unlikely(err_mask & AC_ERR_DEV)) { 2411 cmd->result = SAM_STAT_CHECK_CONDITION; 2412 atapi_request_sense(qc); 2413 return; 2414 } else if (unlikely(err_mask)) { 2415 /* FIXME: not quite right; we don't want the 2416 * translation of taskfile registers into 2417 * a sense descriptors, since that's only 2418 * correct for ATA, not ATAPI 2419 */ 2420 ata_gen_passthru_sense(qc); 2421 } else { 2422 u8 *scsicmd = cmd->cmnd; 2423 2424 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { 2425 u8 *buf = NULL; 2426 unsigned int buflen; 2427 2428 buflen = ata_scsi_rbuf_get(cmd, &buf); 2429 2430 /* ATAPI devices typically report zero for their SCSI version, 2431 * and sometimes deviate from the spec WRT response data 2432 * format. If SCSI version is reported as zero like normal, 2433 * then we make the following fixups: 1) Fake MMC-5 version, 2434 * to indicate to the Linux scsi midlayer this is a modern 2435 * device. 2) Ensure response data format / ATAPI information 2436 * are always correct. 2437 */ 2438 if (buf[2] == 0) { 2439 buf[2] = 0x5; 2440 buf[3] = 0x32; 2441 } 2442 2443 ata_scsi_rbuf_put(cmd, buf); 2444 } 2445 2446 cmd->result = SAM_STAT_GOOD; 2447 } 2448 2449 qc->scsidone(cmd); 2450 ata_qc_free(qc); 2451 } 2452 /** 2453 * atapi_xlat - Initialize PACKET taskfile 2454 * @qc: command structure to be initialized 2455 * 2456 * LOCKING: 2457 * spin_lock_irqsave(host lock) 2458 * 2459 * RETURNS: 2460 * Zero on success, non-zero on failure. 2461 */ 2462 static unsigned int atapi_xlat(struct ata_queued_cmd *qc) 2463 { 2464 struct scsi_cmnd *scmd = qc->scsicmd; 2465 struct ata_device *dev = qc->dev; 2466 int using_pio = (dev->flags & ATA_DFLAG_PIO); 2467 int nodata = (scmd->sc_data_direction == DMA_NONE); 2468 unsigned int nbytes; 2469 2470 memset(qc->cdb, 0, dev->cdb_len); 2471 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); 2472 2473 qc->complete_fn = atapi_qc_complete; 2474 2475 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2476 if (scmd->sc_data_direction == DMA_TO_DEVICE) { 2477 qc->tf.flags |= ATA_TFLAG_WRITE; 2478 DPRINTK("direction: write\n"); 2479 } 2480 2481 qc->tf.command = ATA_CMD_PACKET; 2482 qc->nbytes = scsi_bufflen(scmd); 2483 2484 /* check whether ATAPI DMA is safe */ 2485 if (!using_pio && ata_check_atapi_dma(qc)) 2486 using_pio = 1; 2487 2488 /* Some controller variants snoop this value for Packet transfers 2489 to do state machine and FIFO management. Thus we want to set it 2490 properly, and for DMA where it is effectively meaningless */ 2491 nbytes = min(qc->nbytes, (unsigned int)63 * 1024); 2492 2493 qc->tf.lbam = (nbytes & 0xFF); 2494 qc->tf.lbah = (nbytes >> 8); 2495 2496 if (using_pio || nodata) { 2497 /* no data, or PIO data xfer */ 2498 if (nodata) 2499 qc->tf.protocol = ATA_PROT_ATAPI_NODATA; 2500 else 2501 qc->tf.protocol = ATA_PROT_ATAPI; 2502 } else { 2503 /* DMA data xfer */ 2504 qc->tf.protocol = ATA_PROT_ATAPI_DMA; 2505 qc->tf.feature |= ATAPI_PKT_DMA; 2506 2507 if (atapi_dmadir && (scmd->sc_data_direction != DMA_TO_DEVICE)) 2508 /* some SATA bridges need us to indicate data xfer direction */ 2509 qc->tf.feature |= ATAPI_DMADIR; 2510 } 2511 2512 2513 /* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE 2514 as ATAPI tape drives don't get this right otherwise */ 2515 return 0; 2516 } 2517 2518 static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) 2519 { 2520 if (ap->nr_pmp_links == 0) { 2521 if (likely(devno < ata_link_max_devices(&ap->link))) 2522 return &ap->link.device[devno]; 2523 } else { 2524 if (likely(devno < ap->nr_pmp_links)) 2525 return &ap->pmp_link[devno].device[0]; 2526 } 2527 2528 return NULL; 2529 } 2530 2531 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 2532 const struct scsi_device *scsidev) 2533 { 2534 int devno; 2535 2536 /* skip commands not addressed to targets we simulate */ 2537 if (ap->nr_pmp_links == 0) { 2538 if (unlikely(scsidev->channel || scsidev->lun)) 2539 return NULL; 2540 devno = scsidev->id; 2541 } else { 2542 if (unlikely(scsidev->id || scsidev->lun)) 2543 return NULL; 2544 devno = scsidev->channel; 2545 } 2546 2547 return ata_find_dev(ap, devno); 2548 } 2549 2550 /** 2551 * ata_scsi_dev_enabled - determine if device is enabled 2552 * @dev: ATA device 2553 * 2554 * Determine if commands should be sent to the specified device. 2555 * 2556 * LOCKING: 2557 * spin_lock_irqsave(host lock) 2558 * 2559 * RETURNS: 2560 * 0 if commands are not allowed / 1 if commands are allowed 2561 */ 2562 2563 static int ata_scsi_dev_enabled(struct ata_device *dev) 2564 { 2565 if (unlikely(!ata_dev_enabled(dev))) 2566 return 0; 2567 2568 if (!atapi_enabled || (dev->link->ap->flags & ATA_FLAG_NO_ATAPI)) { 2569 if (unlikely(dev->class == ATA_DEV_ATAPI)) { 2570 ata_dev_printk(dev, KERN_WARNING, 2571 "WARNING: ATAPI is %s, device ignored.\n", 2572 atapi_enabled ? "not supported with this driver" : "disabled"); 2573 return 0; 2574 } 2575 } 2576 2577 return 1; 2578 } 2579 2580 /** 2581 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd 2582 * @ap: ATA port to which the device is attached 2583 * @scsidev: SCSI device from which we derive the ATA device 2584 * 2585 * Given various information provided in struct scsi_cmnd, 2586 * map that onto an ATA bus, and using that mapping 2587 * determine which ata_device is associated with the 2588 * SCSI command to be sent. 2589 * 2590 * LOCKING: 2591 * spin_lock_irqsave(host lock) 2592 * 2593 * RETURNS: 2594 * Associated ATA device, or %NULL if not found. 2595 */ 2596 static struct ata_device * 2597 ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) 2598 { 2599 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev); 2600 2601 if (unlikely(!dev || !ata_scsi_dev_enabled(dev))) 2602 return NULL; 2603 2604 return dev; 2605 } 2606 2607 /* 2608 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value. 2609 * @byte1: Byte 1 from pass-thru CDB. 2610 * 2611 * RETURNS: 2612 * ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise. 2613 */ 2614 static u8 2615 ata_scsi_map_proto(u8 byte1) 2616 { 2617 switch((byte1 & 0x1e) >> 1) { 2618 case 3: /* Non-data */ 2619 return ATA_PROT_NODATA; 2620 2621 case 6: /* DMA */ 2622 case 10: /* UDMA Data-in */ 2623 case 11: /* UDMA Data-Out */ 2624 return ATA_PROT_DMA; 2625 2626 case 4: /* PIO Data-in */ 2627 case 5: /* PIO Data-out */ 2628 return ATA_PROT_PIO; 2629 2630 case 0: /* Hard Reset */ 2631 case 1: /* SRST */ 2632 case 8: /* Device Diagnostic */ 2633 case 9: /* Device Reset */ 2634 case 7: /* DMA Queued */ 2635 case 12: /* FPDMA */ 2636 case 15: /* Return Response Info */ 2637 default: /* Reserved */ 2638 break; 2639 } 2640 2641 return ATA_PROT_UNKNOWN; 2642 } 2643 2644 /** 2645 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile 2646 * @qc: command structure to be initialized 2647 * 2648 * Handles either 12 or 16-byte versions of the CDB. 2649 * 2650 * RETURNS: 2651 * Zero on success, non-zero on failure. 2652 */ 2653 static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) 2654 { 2655 struct ata_taskfile *tf = &(qc->tf); 2656 struct scsi_cmnd *scmd = qc->scsicmd; 2657 struct ata_device *dev = qc->dev; 2658 const u8 *cdb = scmd->cmnd; 2659 2660 if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN) 2661 goto invalid_fld; 2662 2663 /* We may not issue DMA commands if no DMA mode is set */ 2664 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) 2665 goto invalid_fld; 2666 2667 /* 2668 * 12 and 16 byte CDBs use different offsets to 2669 * provide the various register values. 2670 */ 2671 if (cdb[0] == ATA_16) { 2672 /* 2673 * 16-byte CDB - may contain extended commands. 2674 * 2675 * If that is the case, copy the upper byte register values. 2676 */ 2677 if (cdb[1] & 0x01) { 2678 tf->hob_feature = cdb[3]; 2679 tf->hob_nsect = cdb[5]; 2680 tf->hob_lbal = cdb[7]; 2681 tf->hob_lbam = cdb[9]; 2682 tf->hob_lbah = cdb[11]; 2683 tf->flags |= ATA_TFLAG_LBA48; 2684 } else 2685 tf->flags &= ~ATA_TFLAG_LBA48; 2686 2687 /* 2688 * Always copy low byte, device and command registers. 2689 */ 2690 tf->feature = cdb[4]; 2691 tf->nsect = cdb[6]; 2692 tf->lbal = cdb[8]; 2693 tf->lbam = cdb[10]; 2694 tf->lbah = cdb[12]; 2695 tf->device = cdb[13]; 2696 tf->command = cdb[14]; 2697 } else { 2698 /* 2699 * 12-byte CDB - incapable of extended commands. 2700 */ 2701 tf->flags &= ~ATA_TFLAG_LBA48; 2702 2703 tf->feature = cdb[3]; 2704 tf->nsect = cdb[4]; 2705 tf->lbal = cdb[5]; 2706 tf->lbam = cdb[6]; 2707 tf->lbah = cdb[7]; 2708 tf->device = cdb[8]; 2709 tf->command = cdb[9]; 2710 } 2711 2712 /* enforce correct master/slave bit */ 2713 tf->device = dev->devno ? 2714 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; 2715 2716 /* sanity check for pio multi commands */ 2717 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) 2718 goto invalid_fld; 2719 2720 if (is_multi_taskfile(tf)) { 2721 unsigned int multi_count = 1 << (cdb[1] >> 5); 2722 2723 /* compare the passed through multi_count 2724 * with the cached multi_count of libata 2725 */ 2726 if (multi_count != dev->multi_count) 2727 ata_dev_printk(dev, KERN_WARNING, 2728 "invalid multi_count %u ignored\n", 2729 multi_count); 2730 } 2731 2732 /* READ/WRITE LONG use a non-standard sect_size */ 2733 qc->sect_size = ATA_SECT_SIZE; 2734 switch (tf->command) { 2735 case ATA_CMD_READ_LONG: 2736 case ATA_CMD_READ_LONG_ONCE: 2737 case ATA_CMD_WRITE_LONG: 2738 case ATA_CMD_WRITE_LONG_ONCE: 2739 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) 2740 goto invalid_fld; 2741 qc->sect_size = scsi_bufflen(scmd); 2742 } 2743 2744 /* 2745 * Filter SET_FEATURES - XFER MODE command -- otherwise, 2746 * SET_FEATURES - XFER MODE must be preceded/succeeded 2747 * by an update to hardware-specific registers for each 2748 * controller (i.e. the reason for ->set_piomode(), 2749 * ->set_dmamode(), and ->post_set_mode() hooks). 2750 */ 2751 if ((tf->command == ATA_CMD_SET_FEATURES) 2752 && (tf->feature == SETFEATURES_XFER)) 2753 goto invalid_fld; 2754 2755 /* 2756 * Set flags so that all registers will be written, 2757 * and pass on write indication (used for PIO/DMA 2758 * setup.) 2759 */ 2760 tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE); 2761 2762 if (scmd->sc_data_direction == DMA_TO_DEVICE) 2763 tf->flags |= ATA_TFLAG_WRITE; 2764 2765 /* 2766 * Set transfer length. 2767 * 2768 * TODO: find out if we need to do more here to 2769 * cover scatter/gather case. 2770 */ 2771 qc->nbytes = scsi_bufflen(scmd); 2772 2773 /* request result TF and be quiet about device error */ 2774 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; 2775 2776 return 0; 2777 2778 invalid_fld: 2779 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00); 2780 /* "Invalid field in cdb" */ 2781 return 1; 2782 } 2783 2784 /** 2785 * ata_get_xlat_func - check if SCSI to ATA translation is possible 2786 * @dev: ATA device 2787 * @cmd: SCSI command opcode to consider 2788 * 2789 * Look up the SCSI command given, and determine whether the 2790 * SCSI command is to be translated or simulated. 2791 * 2792 * RETURNS: 2793 * Pointer to translation function if possible, %NULL if not. 2794 */ 2795 2796 static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) 2797 { 2798 switch (cmd) { 2799 case READ_6: 2800 case READ_10: 2801 case READ_16: 2802 2803 case WRITE_6: 2804 case WRITE_10: 2805 case WRITE_16: 2806 return ata_scsi_rw_xlat; 2807 2808 case SYNCHRONIZE_CACHE: 2809 if (ata_try_flush_cache(dev)) 2810 return ata_scsi_flush_xlat; 2811 break; 2812 2813 case VERIFY: 2814 case VERIFY_16: 2815 return ata_scsi_verify_xlat; 2816 2817 case ATA_12: 2818 case ATA_16: 2819 return ata_scsi_pass_thru; 2820 2821 case START_STOP: 2822 return ata_scsi_start_stop_xlat; 2823 } 2824 2825 return NULL; 2826 } 2827 2828 /** 2829 * ata_scsi_dump_cdb - dump SCSI command contents to dmesg 2830 * @ap: ATA port to which the command was being sent 2831 * @cmd: SCSI command to dump 2832 * 2833 * Prints the contents of a SCSI command via printk(). 2834 */ 2835 2836 static inline void ata_scsi_dump_cdb(struct ata_port *ap, 2837 struct scsi_cmnd *cmd) 2838 { 2839 #ifdef ATA_DEBUG 2840 struct scsi_device *scsidev = cmd->device; 2841 u8 *scsicmd = cmd->cmnd; 2842 2843 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", 2844 ap->print_id, 2845 scsidev->channel, scsidev->id, scsidev->lun, 2846 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3], 2847 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7], 2848 scsicmd[8]); 2849 #endif 2850 } 2851 2852 static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, 2853 void (*done)(struct scsi_cmnd *), 2854 struct ata_device *dev) 2855 { 2856 u8 scsi_op = scmd->cmnd[0]; 2857 ata_xlat_func_t xlat_func; 2858 int rc = 0; 2859 2860 if (dev->class == ATA_DEV_ATA) { 2861 if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) 2862 goto bad_cdb_len; 2863 2864 xlat_func = ata_get_xlat_func(dev, scsi_op); 2865 } else { 2866 if (unlikely(!scmd->cmd_len)) 2867 goto bad_cdb_len; 2868 2869 xlat_func = NULL; 2870 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { 2871 /* relay SCSI command to ATAPI device */ 2872 if (unlikely(scmd->cmd_len > dev->cdb_len)) 2873 goto bad_cdb_len; 2874 2875 xlat_func = atapi_xlat; 2876 } else { 2877 /* ATA_16 passthru, treat as an ATA command */ 2878 if (unlikely(scmd->cmd_len > 16)) 2879 goto bad_cdb_len; 2880 2881 xlat_func = ata_get_xlat_func(dev, scsi_op); 2882 } 2883 } 2884 2885 if (xlat_func) 2886 rc = ata_scsi_translate(dev, scmd, done, xlat_func); 2887 else 2888 ata_scsi_simulate(dev, scmd, done); 2889 2890 return rc; 2891 2892 bad_cdb_len: 2893 DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", 2894 scmd->cmd_len, scsi_op, dev->cdb_len); 2895 scmd->result = DID_ERROR << 16; 2896 done(scmd); 2897 return 0; 2898 } 2899 2900 /** 2901 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device 2902 * @cmd: SCSI command to be sent 2903 * @done: Completion function, called when command is complete 2904 * 2905 * In some cases, this function translates SCSI commands into 2906 * ATA taskfiles, and queues the taskfiles to be sent to 2907 * hardware. In other cases, this function simulates a 2908 * SCSI device by evaluating and responding to certain 2909 * SCSI commands. This creates the overall effect of 2910 * ATA and ATAPI devices appearing as SCSI devices. 2911 * 2912 * LOCKING: 2913 * Releases scsi-layer-held lock, and obtains host lock. 2914 * 2915 * RETURNS: 2916 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 2917 * 0 otherwise. 2918 */ 2919 int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 2920 { 2921 struct ata_port *ap; 2922 struct ata_device *dev; 2923 struct scsi_device *scsidev = cmd->device; 2924 struct Scsi_Host *shost = scsidev->host; 2925 int rc = 0; 2926 2927 ap = ata_shost_to_port(shost); 2928 2929 spin_unlock(shost->host_lock); 2930 spin_lock(ap->lock); 2931 2932 ata_scsi_dump_cdb(ap, cmd); 2933 2934 dev = ata_scsi_find_dev(ap, scsidev); 2935 if (likely(dev)) 2936 rc = __ata_scsi_queuecmd(cmd, done, dev); 2937 else { 2938 cmd->result = (DID_BAD_TARGET << 16); 2939 done(cmd); 2940 } 2941 2942 spin_unlock(ap->lock); 2943 spin_lock(shost->host_lock); 2944 return rc; 2945 } 2946 2947 /** 2948 * ata_scsi_simulate - simulate SCSI command on ATA device 2949 * @dev: the target device 2950 * @cmd: SCSI command being sent to device. 2951 * @done: SCSI command completion function. 2952 * 2953 * Interprets and directly executes a select list of SCSI commands 2954 * that can be handled internally. 2955 * 2956 * LOCKING: 2957 * spin_lock_irqsave(host lock) 2958 */ 2959 2960 void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, 2961 void (*done)(struct scsi_cmnd *)) 2962 { 2963 struct ata_scsi_args args; 2964 const u8 *scsicmd = cmd->cmnd; 2965 u8 tmp8; 2966 2967 args.dev = dev; 2968 args.id = dev->id; 2969 args.cmd = cmd; 2970 args.done = done; 2971 2972 switch(scsicmd[0]) { 2973 /* TODO: worth improving? */ 2974 case FORMAT_UNIT: 2975 ata_scsi_invalid_field(cmd, done); 2976 break; 2977 2978 case INQUIRY: 2979 if (scsicmd[1] & 2) /* is CmdDt set? */ 2980 ata_scsi_invalid_field(cmd, done); 2981 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 2982 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 2983 else switch (scsicmd[2]) { 2984 case 0x00: 2985 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); 2986 break; 2987 case 0x80: 2988 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); 2989 break; 2990 case 0x83: 2991 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 2992 break; 2993 case 0x89: 2994 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); 2995 break; 2996 default: 2997 ata_scsi_invalid_field(cmd, done); 2998 break; 2999 } 3000 break; 3001 3002 case MODE_SENSE: 3003 case MODE_SENSE_10: 3004 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); 3005 break; 3006 3007 case MODE_SELECT: /* unconditionally return */ 3008 case MODE_SELECT_10: /* bad-field-in-cdb */ 3009 ata_scsi_invalid_field(cmd, done); 3010 break; 3011 3012 case READ_CAPACITY: 3013 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 3014 break; 3015 3016 case SERVICE_ACTION_IN: 3017 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 3018 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 3019 else 3020 ata_scsi_invalid_field(cmd, done); 3021 break; 3022 3023 case REPORT_LUNS: 3024 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); 3025 break; 3026 3027 case REQUEST_SENSE: 3028 ata_scsi_set_sense(cmd, 0, 0, 0); 3029 cmd->result = (DRIVER_SENSE << 24); 3030 done(cmd); 3031 break; 3032 3033 /* if we reach this, then writeback caching is disabled, 3034 * turning this into a no-op. 3035 */ 3036 case SYNCHRONIZE_CACHE: 3037 /* fall through */ 3038 3039 /* no-op's, complete with success */ 3040 case REZERO_UNIT: 3041 case SEEK_6: 3042 case SEEK_10: 3043 case TEST_UNIT_READY: 3044 ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 3045 break; 3046 3047 case SEND_DIAGNOSTIC: 3048 tmp8 = scsicmd[1] & ~(1 << 3); 3049 if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) 3050 ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 3051 else 3052 ata_scsi_invalid_field(cmd, done); 3053 break; 3054 3055 /* all other commands */ 3056 default: 3057 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); 3058 /* "Invalid command operation code" */ 3059 done(cmd); 3060 break; 3061 } 3062 } 3063 3064 int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) 3065 { 3066 int i, rc; 3067 3068 for (i = 0; i < host->n_ports; i++) { 3069 struct ata_port *ap = host->ports[i]; 3070 struct Scsi_Host *shost; 3071 3072 rc = -ENOMEM; 3073 shost = scsi_host_alloc(sht, sizeof(struct ata_port *)); 3074 if (!shost) 3075 goto err_alloc; 3076 3077 *(struct ata_port **)&shost->hostdata[0] = ap; 3078 ap->scsi_host = shost; 3079 3080 shost->transportt = &ata_scsi_transport_template; 3081 shost->unique_id = ap->print_id; 3082 shost->max_id = 16; 3083 shost->max_lun = 1; 3084 shost->max_channel = 1; 3085 shost->max_cmd_len = 16; 3086 3087 /* Schedule policy is determined by ->qc_defer() 3088 * callback and it needs to see every deferred qc. 3089 * Set host_blocked to 1 to prevent SCSI midlayer from 3090 * automatically deferring requests. 3091 */ 3092 shost->max_host_blocked = 1; 3093 3094 rc = scsi_add_host(ap->scsi_host, ap->host->dev); 3095 if (rc) 3096 goto err_add; 3097 } 3098 3099 return 0; 3100 3101 err_add: 3102 scsi_host_put(host->ports[i]->scsi_host); 3103 err_alloc: 3104 while (--i >= 0) { 3105 struct Scsi_Host *shost = host->ports[i]->scsi_host; 3106 3107 scsi_remove_host(shost); 3108 scsi_host_put(shost); 3109 } 3110 return rc; 3111 } 3112 3113 void ata_scsi_scan_host(struct ata_port *ap, int sync) 3114 { 3115 int tries = 5; 3116 struct ata_device *last_failed_dev = NULL; 3117 struct ata_link *link; 3118 struct ata_device *dev; 3119 3120 if (ap->flags & ATA_FLAG_DISABLED) 3121 return; 3122 3123 repeat: 3124 ata_port_for_each_link(link, ap) { 3125 ata_link_for_each_dev(dev, link) { 3126 struct scsi_device *sdev; 3127 int channel = 0, id = 0; 3128 3129 if (!ata_dev_enabled(dev) || dev->sdev) 3130 continue; 3131 3132 if (ata_is_host_link(link)) 3133 id = dev->devno; 3134 else 3135 channel = link->pmp; 3136 3137 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, 3138 NULL); 3139 if (!IS_ERR(sdev)) { 3140 dev->sdev = sdev; 3141 scsi_device_put(sdev); 3142 } 3143 } 3144 } 3145 3146 /* If we scanned while EH was in progress or allocation 3147 * failure occurred, scan would have failed silently. Check 3148 * whether all devices are attached. 3149 */ 3150 ata_port_for_each_link(link, ap) { 3151 ata_link_for_each_dev(dev, link) { 3152 if (ata_dev_enabled(dev) && !dev->sdev) 3153 goto exit_loop; 3154 } 3155 } 3156 exit_loop: 3157 if (!link) 3158 return; 3159 3160 /* we're missing some SCSI devices */ 3161 if (sync) { 3162 /* If caller requested synchrnous scan && we've made 3163 * any progress, sleep briefly and repeat. 3164 */ 3165 if (dev != last_failed_dev) { 3166 msleep(100); 3167 last_failed_dev = dev; 3168 goto repeat; 3169 } 3170 3171 /* We might be failing to detect boot device, give it 3172 * a few more chances. 3173 */ 3174 if (--tries) { 3175 msleep(100); 3176 goto repeat; 3177 } 3178 3179 ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan " 3180 "failed without making any progress,\n" 3181 " switching to async\n"); 3182 } 3183 3184 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 3185 round_jiffies_relative(HZ)); 3186 } 3187 3188 /** 3189 * ata_scsi_offline_dev - offline attached SCSI device 3190 * @dev: ATA device to offline attached SCSI device for 3191 * 3192 * This function is called from ata_eh_hotplug() and responsible 3193 * for taking the SCSI device attached to @dev offline. This 3194 * function is called with host lock which protects dev->sdev 3195 * against clearing. 3196 * 3197 * LOCKING: 3198 * spin_lock_irqsave(host lock) 3199 * 3200 * RETURNS: 3201 * 1 if attached SCSI device exists, 0 otherwise. 3202 */ 3203 int ata_scsi_offline_dev(struct ata_device *dev) 3204 { 3205 if (dev->sdev) { 3206 scsi_device_set_state(dev->sdev, SDEV_OFFLINE); 3207 return 1; 3208 } 3209 return 0; 3210 } 3211 3212 /** 3213 * ata_scsi_remove_dev - remove attached SCSI device 3214 * @dev: ATA device to remove attached SCSI device for 3215 * 3216 * This function is called from ata_eh_scsi_hotplug() and 3217 * responsible for removing the SCSI device attached to @dev. 3218 * 3219 * LOCKING: 3220 * Kernel thread context (may sleep). 3221 */ 3222 static void ata_scsi_remove_dev(struct ata_device *dev) 3223 { 3224 struct ata_port *ap = dev->link->ap; 3225 struct scsi_device *sdev; 3226 unsigned long flags; 3227 3228 /* Alas, we need to grab scan_mutex to ensure SCSI device 3229 * state doesn't change underneath us and thus 3230 * scsi_device_get() always succeeds. The mutex locking can 3231 * be removed if there is __scsi_device_get() interface which 3232 * increments reference counts regardless of device state. 3233 */ 3234 mutex_lock(&ap->scsi_host->scan_mutex); 3235 spin_lock_irqsave(ap->lock, flags); 3236 3237 /* clearing dev->sdev is protected by host lock */ 3238 sdev = dev->sdev; 3239 dev->sdev = NULL; 3240 3241 if (sdev) { 3242 /* If user initiated unplug races with us, sdev can go 3243 * away underneath us after the host lock and 3244 * scan_mutex are released. Hold onto it. 3245 */ 3246 if (scsi_device_get(sdev) == 0) { 3247 /* The following ensures the attached sdev is 3248 * offline on return from ata_scsi_offline_dev() 3249 * regardless it wins or loses the race 3250 * against this function. 3251 */ 3252 scsi_device_set_state(sdev, SDEV_OFFLINE); 3253 } else { 3254 WARN_ON(1); 3255 sdev = NULL; 3256 } 3257 } 3258 3259 spin_unlock_irqrestore(ap->lock, flags); 3260 mutex_unlock(&ap->scsi_host->scan_mutex); 3261 3262 if (sdev) { 3263 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n", 3264 sdev->sdev_gendev.bus_id); 3265 3266 scsi_remove_device(sdev); 3267 scsi_device_put(sdev); 3268 } 3269 } 3270 3271 static void ata_scsi_handle_link_detach(struct ata_link *link) 3272 { 3273 struct ata_port *ap = link->ap; 3274 struct ata_device *dev; 3275 3276 ata_link_for_each_dev(dev, link) { 3277 unsigned long flags; 3278 3279 if (!(dev->flags & ATA_DFLAG_DETACHED)) 3280 continue; 3281 3282 spin_lock_irqsave(ap->lock, flags); 3283 dev->flags &= ~ATA_DFLAG_DETACHED; 3284 spin_unlock_irqrestore(ap->lock, flags); 3285 3286 ata_scsi_remove_dev(dev); 3287 } 3288 } 3289 3290 /** 3291 * ata_scsi_media_change_notify - send media change event 3292 * @dev: Pointer to the disk device with media change event 3293 * 3294 * Tell the block layer to send a media change notification 3295 * event. 3296 * 3297 * LOCKING: 3298 * spin_lock_irqsave(host lock) 3299 */ 3300 void ata_scsi_media_change_notify(struct ata_device *dev) 3301 { 3302 if (dev->sdev) 3303 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE, 3304 GFP_ATOMIC); 3305 } 3306 3307 /** 3308 * ata_scsi_hotplug - SCSI part of hotplug 3309 * @work: Pointer to ATA port to perform SCSI hotplug on 3310 * 3311 * Perform SCSI part of hotplug. It's executed from a separate 3312 * workqueue after EH completes. This is necessary because SCSI 3313 * hot plugging requires working EH and hot unplugging is 3314 * synchronized with hot plugging with a mutex. 3315 * 3316 * LOCKING: 3317 * Kernel thread context (may sleep). 3318 */ 3319 void ata_scsi_hotplug(struct work_struct *work) 3320 { 3321 struct ata_port *ap = 3322 container_of(work, struct ata_port, hotplug_task.work); 3323 int i; 3324 3325 if (ap->pflags & ATA_PFLAG_UNLOADING) { 3326 DPRINTK("ENTER/EXIT - unloading\n"); 3327 return; 3328 } 3329 3330 DPRINTK("ENTER\n"); 3331 3332 /* Unplug detached devices. We cannot use link iterator here 3333 * because PMP links have to be scanned even if PMP is 3334 * currently not attached. Iterate manually. 3335 */ 3336 ata_scsi_handle_link_detach(&ap->link); 3337 if (ap->pmp_link) 3338 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 3339 ata_scsi_handle_link_detach(&ap->pmp_link[i]); 3340 3341 /* scan for new ones */ 3342 ata_scsi_scan_host(ap, 0); 3343 3344 DPRINTK("EXIT\n"); 3345 } 3346 3347 /** 3348 * ata_scsi_user_scan - indication for user-initiated bus scan 3349 * @shost: SCSI host to scan 3350 * @channel: Channel to scan 3351 * @id: ID to scan 3352 * @lun: LUN to scan 3353 * 3354 * This function is called when user explicitly requests bus 3355 * scan. Set probe pending flag and invoke EH. 3356 * 3357 * LOCKING: 3358 * SCSI layer (we don't care) 3359 * 3360 * RETURNS: 3361 * Zero. 3362 */ 3363 static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 3364 unsigned int id, unsigned int lun) 3365 { 3366 struct ata_port *ap = ata_shost_to_port(shost); 3367 unsigned long flags; 3368 int devno, rc = 0; 3369 3370 if (!ap->ops->error_handler) 3371 return -EOPNOTSUPP; 3372 3373 if (lun != SCAN_WILD_CARD && lun) 3374 return -EINVAL; 3375 3376 if (ap->nr_pmp_links == 0) { 3377 if (channel != SCAN_WILD_CARD && channel) 3378 return -EINVAL; 3379 devno = id; 3380 } else { 3381 if (id != SCAN_WILD_CARD && id) 3382 return -EINVAL; 3383 devno = channel; 3384 } 3385 3386 spin_lock_irqsave(ap->lock, flags); 3387 3388 if (devno == SCAN_WILD_CARD) { 3389 struct ata_link *link; 3390 3391 ata_port_for_each_link(link, ap) { 3392 struct ata_eh_info *ehi = &link->eh_info; 3393 ehi->probe_mask |= (1 << ata_link_max_devices(link)) - 1; 3394 ehi->action |= ATA_EH_SOFTRESET; 3395 } 3396 } else { 3397 struct ata_device *dev = ata_find_dev(ap, devno); 3398 3399 if (dev) { 3400 struct ata_eh_info *ehi = &dev->link->eh_info; 3401 ehi->probe_mask |= 1 << dev->devno; 3402 ehi->action |= ATA_EH_SOFTRESET; 3403 ehi->flags |= ATA_EHI_RESUME_LINK; 3404 } else 3405 rc = -EINVAL; 3406 } 3407 3408 if (rc == 0) { 3409 ata_port_schedule_eh(ap); 3410 spin_unlock_irqrestore(ap->lock, flags); 3411 ata_port_wait_eh(ap); 3412 } else 3413 spin_unlock_irqrestore(ap->lock, flags); 3414 3415 return rc; 3416 } 3417 3418 /** 3419 * ata_scsi_dev_rescan - initiate scsi_rescan_device() 3420 * @work: Pointer to ATA port to perform scsi_rescan_device() 3421 * 3422 * After ATA pass thru (SAT) commands are executed successfully, 3423 * libata need to propagate the changes to SCSI layer. This 3424 * function must be executed from ata_aux_wq such that sdev 3425 * attach/detach don't race with rescan. 3426 * 3427 * LOCKING: 3428 * Kernel thread context (may sleep). 3429 */ 3430 void ata_scsi_dev_rescan(struct work_struct *work) 3431 { 3432 struct ata_port *ap = 3433 container_of(work, struct ata_port, scsi_rescan_task); 3434 struct ata_link *link; 3435 struct ata_device *dev; 3436 unsigned long flags; 3437 3438 spin_lock_irqsave(ap->lock, flags); 3439 3440 ata_port_for_each_link(link, ap) { 3441 ata_link_for_each_dev(dev, link) { 3442 struct scsi_device *sdev = dev->sdev; 3443 3444 if (!ata_dev_enabled(dev) || !sdev) 3445 continue; 3446 if (scsi_device_get(sdev)) 3447 continue; 3448 3449 spin_unlock_irqrestore(ap->lock, flags); 3450 scsi_rescan_device(&(sdev->sdev_gendev)); 3451 scsi_device_put(sdev); 3452 spin_lock_irqsave(ap->lock, flags); 3453 } 3454 } 3455 3456 spin_unlock_irqrestore(ap->lock, flags); 3457 } 3458 3459 /** 3460 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device 3461 * @host: ATA host container for all SAS ports 3462 * @port_info: Information from low-level host driver 3463 * @shost: SCSI host that the scsi device is attached to 3464 * 3465 * LOCKING: 3466 * PCI/etc. bus probe sem. 3467 * 3468 * RETURNS: 3469 * ata_port pointer on success / NULL on failure. 3470 */ 3471 3472 struct ata_port *ata_sas_port_alloc(struct ata_host *host, 3473 struct ata_port_info *port_info, 3474 struct Scsi_Host *shost) 3475 { 3476 struct ata_port *ap; 3477 3478 ap = ata_port_alloc(host); 3479 if (!ap) 3480 return NULL; 3481 3482 ap->port_no = 0; 3483 ap->lock = shost->host_lock; 3484 ap->pio_mask = port_info->pio_mask; 3485 ap->mwdma_mask = port_info->mwdma_mask; 3486 ap->udma_mask = port_info->udma_mask; 3487 ap->flags |= port_info->flags; 3488 ap->ops = port_info->port_ops; 3489 ap->cbl = ATA_CBL_SATA; 3490 3491 return ap; 3492 } 3493 EXPORT_SYMBOL_GPL(ata_sas_port_alloc); 3494 3495 /** 3496 * ata_sas_port_start - Set port up for dma. 3497 * @ap: Port to initialize 3498 * 3499 * Called just after data structures for each port are 3500 * initialized. Allocates DMA pad. 3501 * 3502 * May be used as the port_start() entry in ata_port_operations. 3503 * 3504 * LOCKING: 3505 * Inherited from caller. 3506 */ 3507 int ata_sas_port_start(struct ata_port *ap) 3508 { 3509 return ata_pad_alloc(ap, ap->dev); 3510 } 3511 EXPORT_SYMBOL_GPL(ata_sas_port_start); 3512 3513 /** 3514 * ata_port_stop - Undo ata_sas_port_start() 3515 * @ap: Port to shut down 3516 * 3517 * Frees the DMA pad. 3518 * 3519 * May be used as the port_stop() entry in ata_port_operations. 3520 * 3521 * LOCKING: 3522 * Inherited from caller. 3523 */ 3524 3525 void ata_sas_port_stop(struct ata_port *ap) 3526 { 3527 ata_pad_free(ap, ap->dev); 3528 } 3529 EXPORT_SYMBOL_GPL(ata_sas_port_stop); 3530 3531 /** 3532 * ata_sas_port_init - Initialize a SATA device 3533 * @ap: SATA port to initialize 3534 * 3535 * LOCKING: 3536 * PCI/etc. bus probe sem. 3537 * 3538 * RETURNS: 3539 * Zero on success, non-zero on error. 3540 */ 3541 3542 int ata_sas_port_init(struct ata_port *ap) 3543 { 3544 int rc = ap->ops->port_start(ap); 3545 3546 if (!rc) { 3547 ap->print_id = ata_print_id++; 3548 rc = ata_bus_probe(ap); 3549 } 3550 3551 return rc; 3552 } 3553 EXPORT_SYMBOL_GPL(ata_sas_port_init); 3554 3555 /** 3556 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc 3557 * @ap: SATA port to destroy 3558 * 3559 */ 3560 3561 void ata_sas_port_destroy(struct ata_port *ap) 3562 { 3563 if (ap->ops->port_stop) 3564 ap->ops->port_stop(ap); 3565 kfree(ap); 3566 } 3567 EXPORT_SYMBOL_GPL(ata_sas_port_destroy); 3568 3569 /** 3570 * ata_sas_slave_configure - Default slave_config routine for libata devices 3571 * @sdev: SCSI device to configure 3572 * @ap: ATA port to which SCSI device is attached 3573 * 3574 * RETURNS: 3575 * Zero. 3576 */ 3577 3578 int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) 3579 { 3580 ata_scsi_sdev_config(sdev); 3581 ata_scsi_dev_config(sdev, ap->link.device); 3582 return 0; 3583 } 3584 EXPORT_SYMBOL_GPL(ata_sas_slave_configure); 3585 3586 /** 3587 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device 3588 * @cmd: SCSI command to be sent 3589 * @done: Completion function, called when command is complete 3590 * @ap: ATA port to which the command is being sent 3591 * 3592 * RETURNS: 3593 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 3594 * 0 otherwise. 3595 */ 3596 3597 int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 3598 struct ata_port *ap) 3599 { 3600 int rc = 0; 3601 3602 ata_scsi_dump_cdb(ap, cmd); 3603 3604 if (likely(ata_scsi_dev_enabled(ap->link.device))) 3605 rc = __ata_scsi_queuecmd(cmd, done, ap->link.device); 3606 else { 3607 cmd->result = (DID_BAD_TARGET << 16); 3608 done(cmd); 3609 } 3610 return rc; 3611 } 3612 EXPORT_SYMBOL_GPL(ata_sas_queuecmd); 3613